diff --git "a/488.jsonl" "b/488.jsonl" new file mode 100644--- /dev/null +++ "b/488.jsonl" @@ -0,0 +1,622 @@ +{"seq_id":"3340908725","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 6 15:59:12 2022\n\n@author: Ireneg\n\"\"\"\nfrom IPython import get_ipython\nget_ipython().magic('reset -sf')\n\nimport plotly.express as px\n\n#####################################Params #############################################################\n#########################################################################################################\nglobal StartCycle,StartCycle4Avr,PHpoitToIgnor,MaxWaveWindow,DistanceBtWPointMM,Panel,Cycle2Display,Panel2Disply,colorPNL,MaxMinPcnt\n\n\n\n## for plot per panel and plot per cycle and WaveData SUB Average_PerPanel_PerCycle\nplotPerPanel=1;# On/OFF plot\nplotPerCycle=0;## On/OFF plot\nWaveDataSUBAverage_PerPanel_PerCycle=1 # On/OFF plot (Avi method)\nCycleNumber =3 # cycle view in => plot Per Panel\nStartCycle4Avr = 2; # Start averaging for all plots defult = 2\nPanel = 6; #view panel for plot Per cycle\nColorForDisplay = 'Cyan' # Not in use\nCycle2Display = 4 # defult visible cycle in plot WaveDataSUBAverage_PerPanel_PerCycle\nPanel2Disply= [11,6]\nMaxMinPcnt=90 # %\ncolorPNL=px.colors.sequential.Reds[2:]+px.colors.sequential.Viridis;\n\n\n\n## for plot CIScurve\nCIScurve=1;# On/OFF plot\n\n## for plot registration estimation in Wave Prints (yuval)\nregistrationBetweenWavePrints=0; # On/OFF plot ERROR\nStartCycle=3\nrgistBtwPntStartCycle=StartCycle # (it is not a parameter)\nrgistBtwPntEndCycle=StartCycle+1 # for long print can change to larger number\nMainColor = \"Black\" #Referance Color\n\n## Wave plot ( before and after correction)\nBeforAndAfterCorr=0# On/OFF plot\n\n\n## DX plot - delta between wave and starvitzky filer (residue) \nWaveFilterResidue_dxPlot=1 # On/OFF plot\nPHpoitToIgnor=2; # Ponits of Print head to ignar (16 point in total) in each side\nMaxWaveWindow=51;# S.gol filter window\nS_g_Degree=1;# S.gol filter degree\nDistanceBtWPointMM=2.734\nNieghborColorsFor7colrs=6# parameter for distortion correction (number of nighboring colors)\n\n\n###for Tables\nPlotTables=1# On/OFF table\nColorLevels= 5; # Heat Map for offset- number of levels of colors from white to hot red\nDivideByNum= 20; # Correction for offset Haet map- if occurs error try to increase this number\nColorLevelsTilt=7; #Heat Map for tilt- number of levels of colors from white to hot red\nDivideByNumTilt=1;# Correction for tilt Haet map- if occurs error try to increase this number\n\n#########################################################################################################\n\n#########################################################################################################\nimport os\n\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.io\nfrom datetime import datetime\nimport glob\nfrom zipfile import ZipFile \nfrom pathlib import Path\nfrom collections import OrderedDict\nfrom scipy.signal import savgol_filter\nfrom plotly.colors import n_colors\nimport zipfile\nimport csv\nfrom io import BytesIO\n\n# Load the Pandas libraries with alias 'pd' \nimport pandas as pd \nimport plotly.graph_objects as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot\nfrom plotly.subplots import make_subplots\nimport math \n\n\nclass CalcWaveFromRawData:\n def __init__(self, pthF,side,Panel): \n self.pthF = pthF;\n self.side = side;\n self.Panel= Panel;\n \n def LoadRawDataOLD(self):\n RawData=pd.read_csv(self.pthF+self.side+'/'+'RawResults/WavePrintDirection.csv');\n\n return RawData;\n \n \n def GetFileFromZip(self,zip_file_path,subdir_name_in_zip,file_name_in_zip):\n \n with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:\n file_path_in_zip = subdir_name_in_zip + \"/\" + file_name_in_zip\n with zip_ref.open(file_path_in_zip) as file:\n # read the contents of the file into memory\n file_content = file.read()\n \n # convert the file content to a pandas dataframe\n df = pd.read_csv(BytesIO(file_content))\n return df; \n \n \n def LoadRawData(self):\n \n zip_file_path=self.pthF\n subdir_name_in_zip=self.side+'/'+'RawResults'; \n file_name_in_zip='WavePrintDirection.csv'\n \n # RawData=pd.read_csv(self.pthF+self.side+'/'+'RawResults/WavePrintDirection.csv');\n RawData=self.GetFileFromZip(zip_file_path, subdir_name_in_zip, file_name_in_zip)\n return RawData;\n \n def getColors(self):\n RawData= self.LoadRawData();\n ColorList=RawData.iloc[:,7].unique().tolist();\n return ColorList\n \n def getNumberOfFlats(self):\n RawData= self.LoadRawData();\n FlatList=RawData.iloc[:,1].unique().tolist();\n return FlatList\n \n def FilterRawData(self,ColorForDisplay):\n RawData= self.LoadRawData();\n \n # DataSec=RawData[RawData['Overall Status']=='Success'].reset_index(drop=True);\n \n DataSec=RawData;\n\n\n DataSecPrintDirc=DataSec[DataSec['Direction Type ']=='Print Direction']\n \n DataSecPrintDircPanel=DataSecPrintDirc[DataSecPrintDirc['Panel Id']==self.Panel]\n \n DataSecPrintDircPanelColor=DataSecPrintDircPanel[DataSecPrintDircPanel[' Seperation']==ColorForDisplay].reset_index(drop=True)\n \n col=list(DataSecPrintDircPanelColor.columns)\n \n cutCols=col[12:396]\n \n DataSecPrintDircPanelColorCUT=DataSecPrintDircPanelColor[cutCols];\n LocatorIndex= DataSec['Locator Index'][int(len(DataSec['Locator Index'])/2)];\n \n return LocatorIndex,DataSecPrintDircPanelColorCUT,cutCols;\n \n \n def ArrangeRawDataForAnalize(self,ColorForDisplay):\n \n LocatorIndex,DataSecPrintDircPanelColorCUT,cutCols=self.FilterRawData(ColorForDisplay);\n WaveRaw=pd.DataFrame();\n\n for i in range(len(DataSecPrintDircPanelColorCUT[cutCols[0]])):\n l=list(DataSecPrintDircPanelColorCUT.loc[i,:])\n tmpDF=pd.DataFrame();\n while (1):\n tmp=next((j for j, x in enumerate(l) if not isinstance(x, float)), 'DONE');\n if tmp == 'DONE':\n break;\n else:\n if l[tmp].replace('.', '', 1).replace('-', '').isdigit():\n l[tmp]=float(l[tmp]);\n else: \n if l[tmp] == 'NotFound':\n if isinstance(l[tmp+1], float):\n l[tmp]=l[tmp+1]\n else:\n if l[tmp+1].replace('.', '', 1).replace('-', '').isdigit():\n l[tmp]=float(l[tmp+1])\n else:\n break;\n if not tmp == 'DONE':\n WaveRaw=pd.concat([WaveRaw,pd.DataFrame(l[0:tmp-1])],axis=1).rename(columns={0:i+1}) \n return WaveRaw; \n\n def GetLocatorIndex(self,ColorForDisplay):\n LocatorIndex,DataSecPrintDircPanelColorCUT,cutCols=self.FilterRawData(ColorForDisplay);\n return LocatorIndex;\n \n def CreateDicOfWaveRawData(self):\n ColorList=self.getColors();\n WaveRawDataDic={};\n for ColorForDisplay in ColorList: \n tmp=self.ArrangeRawDataForAnalize(ColorForDisplay);\n tmp=pd.concat([tmp,tmp.loc[:,StartCycle4Avr:].mean(axis=1)],axis=1).rename(columns={0:'Mean'})\n WaveRawDataDic[ColorForDisplay]=tmp;\n return WaveRawDataDic;\n \n def FilterWaveDataDic(self):\n ColorList=self.getColors();\n WaveRawDataDic=self.CreateDicOfWaveRawData();\n WaveDataWithMaxFilterDic={};\n\n for ColorForDisplay in ColorList: \n tmp=pd.DataFrame();\n for col in WaveRawDataDic[ColorForDisplay].columns:\n tmp=pd.concat([tmp,pd.Series(savgol_filter(WaveRawDataDic[ColorForDisplay][col], MaxWaveWindow, S_g_Degree))],axis=1)\n tmp=tmp.rename(columns={0:col})\n WaveDataWithMaxFilterDic[ColorForDisplay]=tmp\n return WaveDataWithMaxFilterDic;\n\n def FilterWaveDataDicTEST(self,WaveRawDataDic):\n ColorList=self.getColors();\n # WaveRawDataDic=self.CreateDicOfWaveRawData();\n WaveDataWithMaxFilterDic={};\n\n for ColorForDisplay in ColorList: \n tmp=pd.DataFrame();\n for col in WaveRawDataDic[ColorForDisplay].columns:\n WaveRawDataDic[ColorForDisplay][col]=WaveRawDataDic[ColorForDisplay][col].fillna(method='ffill')\n tmp=pd.concat([tmp,pd.Series(savgol_filter(WaveRawDataDic[ColorForDisplay][col], MaxWaveWindow, S_g_Degree))],axis=1)\n tmp=tmp.rename(columns={0:col})\n WaveDataWithMaxFilterDic[ColorForDisplay]=tmp\n return WaveDataWithMaxFilterDic;\n\n def CalcPHlocation(self,ColorForDisplay):\n \n LocatorIndex= self.GetLocatorIndex(ColorForDisplay)\n WaveRawDataDic=self.CreateDicOfWaveRawData();\n PHloc=[]\n PHloc.append(LocatorIndex)\n numForward=LocatorIndex\n numBack=LocatorIndex\n\n for i in range(len(WaveRawDataDic[ColorForDisplay]['Mean'])):\n numForward=numForward+16;\n numBack=numBack-16;\n if numBack>0:\n PHloc.append(numBack);\n if numForward1: \n lines.append(line)\n else:\n if len(line)==0: \n break;\n return lines;\n \n def LoadRawData(self):\n \n \n zip_file_path=self.pthF\n subdir_name_in_zip='Data'\n file_name_in_zip='JobData.csv'\n \n lines = self.GetFileFromZip(zip_file_path, subdir_name_in_zip, file_name_in_zip)\n \n return lines; \n \n def GetFileFromZip(self,zip_file_path,subdir_name_in_zip,file_name_in_zip):\n \n with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:\n file_path_in_zip = subdir_name_in_zip + \"/\" + file_name_in_zip\n with zip_ref.open(file_path_in_zip) as file:\n # read the contents of the file into memory\n lines = [row for row in csv.reader(file.read().decode(\"utf-8\").splitlines())]\n \n # convert the file content to a pandas dataframe\n # df = pd.read_csv(BytesIO(file_content))\n return lines; \n \n def GetCIScurveOldVersion(self):\n jobData=self.LoadRawData()\n sub='CisCurvatureDataBasedOnWaveFormat=';\n res = list(filter(lambda x: sub in x, jobData));\n cisFRONT=[]\n cisBACK=[]\n\n if len(res)>0:\n for i,rs in enumerate(res):\n if len(rs)> 1000:\n tmp=rs.split(',')\n tmp.pop(0);\n for c in tmp:\n if c.replace('.', '', 1).replace('-', '').isdigit():\n if i==0:\n cisFRONT.append(float(c))\n else:\n cisBACK.append(float(c))\n\n \n return cisBACK,cisFRONT; \n \n \n def GetCIScurveOldVersion_SecondTry(self):\n jobData=self.LoadRawData()\n sub='CisCurvatureDataBasedOnWaveFormat=';\n indices = []\n \n for line_num, line in enumerate(jobData):\n if len(line)>1:\n if sub in line[0]:\n indices.append(line_num) \n cisFRONT=[] \n try:\n cisFRONT = list(map(float, jobData[indices[0]][1:]))\n except:\n 1\n cisBACK=[]\n if len(indices)>1:\n cisBACK = list(map(float, jobData[indices[1]][1:]))\n\n flag = True # Set flag to True by default\n\n if len(cisFRONT) == len(cisBACK): # Check if the lists have the same length\n for i in range(len(cisFRONT)):\n if cisFRONT[i] != cisBACK[i]: # Check if the corresponding elements are different\n flag = False # Set flag to False if a difference is found\n break\n else:\n flag = False\n \n if flag:\n cisBACK=[]\n \n return cisBACK,cisFRONT; \n \n def GetCIScurveNewVersion(self):\n jobData=self.LoadRawData()\n sub='ShouldUseCISCurvaturePerPixel=Value:True';\n res = list(filter(lambda x: sub in x, jobData));\n cisFRONT=[]\n cisBACK=[]\n \n if len(res)>0:\n sub='CISTilt=Value';\n res = list(filter(lambda x: sub in x, jobData));\n for i,rs in enumerate(res):\n ind=jobData.index(rs)\n if len(jobData[ind+1])> 1000:\n tmp=jobData[ind+1].split(',')\n # tmp=list(map(float, jobData[ind+1].split(',')))\n for j,c in enumerate(tmp): \n if c.replace('.', '', 1).replace('-', '').isdigit():\n if i==0:\n cisFRONT.append(float(c))\n else:\n cisBACK.append(float(c)) \n \n\n \n return cisBACK,cisFRONT; \n \n def GetCIScurveNewVersion_secondTry(self):\n jobData=self.LoadRawData()\n sub='CISTilt=Value';\n \n indices = []\n for line_num, line in enumerate(jobData):\n try:\n if sub in line[0]:\n indices.append(line_num) \n except:\n continue\n cisFRONT=[] \n cisBACK=[] \n for s in jobData[indices[0]+1]:\n try:\n cisFRONT.append(float(s)) \n except:\n continue\n \n if len(indices)>1:\n for s in jobData[indices[1]+1]:\n try:\n cisBACK.append(float(s)) \n except:\n continue\n \n return cisBACK,cisFRONT; \n\nclass RepareDistortions:\n def __init__(self, WaveRawDataDic,WaveDataWithMaxFilterDic,ColorList): \n self.WaveRawDataDic = WaveRawDataDic;\n self.WaveDataWithMaxFilterDic = WaveDataWithMaxFilterDic;\n self.ColorList= ColorList;\n \n def CalcWaveAfterFilterSubstraction(self): \n col='Mean'\n WaveFilter_RawData={}\n \n for clr in self.ColorList:\n WaveFilter_RawData[clr]=list(self.WaveRawDataDic[clr][col]-self.WaveDataWithMaxFilterDic[clr][col])\n return WaveFilter_RawData;\n \n def CalcCorrectionArrayOLD(self):\n \n WaveFilter_RawData=self.CalcWaveAfterFilterSubstraction();\n \n minDistpC=pd.DataFrame();\n CorrectionArr=[]\n for i in range(len(WaveFilter_RawData['Black'])):\n minDistpC=pd.DataFrame();\n for clrD in self.ColorList:\n difList={}\n for clr in ColorList:\n if clr == clrD:\n continue;\n difList[(abs(WaveFilter_RawData[clrD][i]-WaveFilter_RawData[clr][i]))]=clr;\n tmpList=list(abs(np.array(list(difList.keys()))));\n \n minVal=min(tmpList);\n if len(self.ColorList) > 4: \n \n \n tmpList.remove(min(tmpList)) \n DistanceVal=math.sqrt(math.pow(minVal,2)+math.pow(min(tmpList),2));\n minDistpC=pd.concat([minDistpC,pd.DataFrame([[DistanceVal,difList[minVal],difList[min(tmpList)]]])],axis=0).rename(index={0:clrD})\n else:\n\n DistanceVal=math.sqrt(math.pow(minVal,2)+math.pow(min(tmpList),2));\n minDistpC=pd.concat([minDistpC,pd.DataFrame([[DistanceVal,difList[minVal],difList[min(tmpList)]]])],axis=0).rename(index={0:clrD})\n \n clrName=pd.Series(); \n clrName=minDistpC[[0]].idxmin()\n ColssetCols=[]\n ColssetCols.append(WaveFilter_RawData[clrName[0]][i])\n for k in range(1,len(minDistpC.columns)):\n ColssetCols.append(WaveFilter_RawData[minDistpC[k][clrName[0]]][i])\n \n CorrectionArr.append(np.mean(ColssetCols)) \n \n return CorrectionArr\n \n def CalcCorrectionArray(self):\n WaveFilter_RawData=self.CalcWaveAfterFilterSubstraction();\n \n minDistpC=pd.DataFrame();\n CorrectionArr=[]\n for i in range(len(WaveFilter_RawData['Black'])):\n minDistpC=pd.DataFrame();\n ##Build dic of distance for each color\n count=0\n for clrD in self.ColorList:\n difList={}\n for clr in self.ColorList:\n if clr == clrD:\n continue;\n difList[abs(WaveFilter_RawData[clrD][i]-WaveFilter_RawData[clr][i])]=clr;\n count=count+1;\n tmpList=list((np.array(list(difList.keys()))));\n tmpList.sort()\n DistanceVal=0;\n listToAdd=[]\n # minVal=min(tmpList);\n if len(self.ColorList) > 4: \n NieghborColors = NieghborColorsFor7colrs-1;\n else:\n NieghborColors = 1; \n for nbr in range(NieghborColors): \n DistanceVal=DistanceVal+math.pow(tmpList[nbr],2);\n listToAdd.append(difList[tmpList[nbr]])\n DistanceVal=math.sqrt(DistanceVal);\n listToAdd= [DistanceVal]+listToAdd\n minDistpC=pd.concat([minDistpC,pd.DataFrame([listToAdd])],axis=0).rename(index={0:clrD})\n # else:\n \n clrName=pd.Series(); \n clrName=minDistpC[[0]].idxmin()\n ColssetCols=[]\n ColssetCols.append(WaveFilter_RawData[clrName[0]][i])\n for k in range(1,len(minDistpC.columns)):\n ColssetCols.append(WaveFilter_RawData[minDistpC[k][clrName[0]]][i])\n \n CorrectionArr.append(np.mean(ColssetCols))\n \n return CorrectionArr\n \n def correctWaveRawData(self):\n \n CorrectionArr=self.CalcCorrectionArray();\n WaveRawDataDicAfterCorr={};\n WaveDataWithMaxFilterDicAfterCorr={};\n for clr in self.ColorList:\n WaveRawDataDicAfterCorr[clr]=self.WaveRawDataDic[clr]['Mean']-CorrectionArr;\n WaveDataWithMaxFilterDicAfterCorr[clr]=pd.Series(savgol_filter(WaveRawDataDicAfterCorr[clr], MaxWaveWindow, S_g_Degree))\n \n return WaveRawDataDicAfterCorr,WaveDataWithMaxFilterDicAfterCorr,CorrectionArr \n \n \n\n \ndef CalcMeanAndTilt(WaveRawDataDic,WaveDataWithMaxFilterDic,PHloc):\n PHoffSet={}\n PHtilt={}\n \n PHoffsetPerH={}\n PHtiltPerH={}\n \n for ColorForDisplay in ColorList: \n try:\n y=WaveRawDataDic[ColorForDisplay]['Mean']-WaveDataWithMaxFilterDic[ColorForDisplay]['Mean'];\n except:\n y=WaveRawDataDic[ColorForDisplay]-WaveDataWithMaxFilterDic[ColorForDisplay];\n t=list(y);\n tlt=t.copy();\n PHoffsetPerHList=[]\n PHtiltPerHList=[]\n # x=[]\n # tlt1=[]\n for i in range(len(PHloc)+1):\n # for i in range(9):\n\n if i==0:\n PHrangeForCalc=slice(PHloc[0]-PHpoitToIgnor);\n indexSlice=slice(PHloc[0])\n PHrange=abs(PHloc[0]);\n else:\n if i== len(PHloc):\n PHrangeForCalc=slice(PHloc[i-1]+PHpoitToIgnor,len(y));\n indexSlice=slice(PHloc[i-1],len(y))\n PHrange=abs(len(y)-PHloc[i-1]);\n # break;\n\n else:\n \n PHrangeForCalc=slice(PHloc[i-1]+PHpoitToIgnor,PHloc[i]-PHpoitToIgnor+1);\n indexSlice=slice(PHloc[i-1],PHloc[i])\n PHrange=abs(PHloc[i]-PHloc[i-1]); \n \n Points=y[PHrangeForCalc].index*DistanceBtWPointMM\n PHoffsetPerHList.append(int(np.mean(y[PHrangeForCalc])))\n z=np.polyfit(list(Points), list(y[PHrangeForCalc]), 1)\n tlt[PHrangeForCalc]=list(z[0]*(Points)+z[1])\n t[indexSlice]=[np.mean(y[PHrangeForCalc])]*PHrange;\n PHtiltPerHList.append((z[0]))\n # x=x+list(y[PHrangeForCalc].index)\n # tlt1=tlt1+tlt[PHrangeForCalc]\n \n \n PHoffSet[ColorForDisplay]=t\n PHtilt[ColorForDisplay]=tlt\n ## For Table plot ##\n PHoffsetPerH[ColorForDisplay]=PHoffsetPerHList\n PHtiltPerH[ColorForDisplay]=PHtiltPerHList\n \n return PHoffSet,PHtilt,PHoffsetPerH,PHtiltPerH \n\n\n\n\n \nclass PlotGraphPlotly(CalcWaveFromRawData):\n def __init__(self ,pthF,side,Panel,ColorList):\n super().__init__(pthF,side,Panel)\n self.ColorList = ColorList;\n \n \n\n\n def ShowWaveRawData_SubOffset_PerCycle(self,PlotTitle,offSetType,fileName,pnl):\n fig= go.Figure()\n for ColorForDisplay in self.ColorList: \n db=self.ArrangeRawDataForAnalize(ColorForDisplay);\n \n if ColorForDisplay=='Yellow':\n ColorForDisplay='gold'; \n \n col=list(db.columns) \n rnge=range(len(col))\n \n \n for i in rnge:\n if offSetType == 'Left Side':\n offSet=db[i+1][0];\n if offSetType == 'Right Side':\n offSet=db[i+1][(len(db[i+1]))-1] \n if offSetType == 'Middle':\n offSet=np.min(db[i+1][int(len(db[i+1])/2)-50:int(len(db[i+1])/2)+50])\n if offSetType == 'Average All':\n offSet=np.mean(db[i+1])\n if offSetType == 'Average Left Right':\n offSet=np.mean([db[i+1][0],db[i+1][(len(db[i+1]))-1]]) \n fig.add_trace(\n go.Scatter(y=list(db[i+1]-offSet),line_color= ColorForDisplay,\n name='Cycle '+str(i+1)+' '+'Panel '+str(pnl)+' ' +ColorForDisplay))\n \n fig.update_layout(\n hoverlabel=dict(\n namelength=-1\n )\n )\n\n fig.update_layout(title=self.side+' '+PlotTitle)\n \n \n plot(fig,filename=fileName+' '+str(pnl)+' '+self.side+\".html\") \n \n return fig\n \n def CalcSTDbyOffset(self,offSet,db):\n \n col=list(db.columns)\n rnge=range(len(col))\n dbOffset=pd.DataFrame()\n STDOffset=[]\n for i in rnge:\n \n dbOffset=pd.concat([dbOffset,db[i+1]-offSet[i]],axis=1);\n for j,i in enumerate(dbOffset.index):\n if j == 0:\n continue;\n STDOffset.append(np.std(dbOffset.loc[i,:]))\n \n return STDOffset;\n \n def ShowSTDforRawWaveWithOffset(self,PlotTitle,fileName,pnl):\n \n fig= go.Figure()\n for ColorForDisplay in self.ColorList:\n db=self.ArrangeRawDataForAnalize(ColorForDisplay);\n \n if ColorForDisplay=='Yellow':\n ColorForDisplay='gold'; \n \n offSet1=[]\n offSet2=[]\n offSet3=[]\n offSetAvgAll=[]\n offSetLRavrg=[]\n \n rnge=range(len(db.columns))\n for i in rnge:\n offSet1.append(db[i+1][0])\n offSet2.append(np.min(db[i+1][int(len(db[i+1])/2)-50:int(len(db[i+1])/2)+50]))\n offSet3.append(db[i+1][(len(db[i+1]))-1])\n \n offSetAvgAll.append(np.mean(db[i+1]))\n offSetLRavrg.append(np.mean([db[i+1][0],db[i+1][(len(db[i+1]))-1]]))\n \n middleSTD=self.CalcSTDbyOffset(offSet2,db)\n RightSTD=self.CalcSTDbyOffset(offSet3,db)\n LeftSTD=self.CalcSTDbyOffset(offSet1,db)\n \n AvgAllSTD=self.CalcSTDbyOffset(offSetAvgAll,db)\n LRavrgSTD=self.CalcSTDbyOffset(offSetLRavrg,db)\n\n \n fig.add_trace(\n go.Scatter(y=LeftSTD,line_color= ColorForDisplay,\n name='Panel '+str(pnl)+' ' +ColorForDisplay+' LeftSide'))\n \n fig.data[len(fig.data)-1].visible = 'legendonly';\n \n fig.add_trace(\n go.Scatter(y=middleSTD,line_color= ColorForDisplay,\n name='Panel '+str(pnl)+' ' +ColorForDisplay+' Middle'))\n fig.data[len(fig.data)-1].visible = 'legendonly';\n \n fig.add_trace(\n go.Scatter(y=RightSTD,line_color= ColorForDisplay,\n name='Panel '+str(pnl)+' ' +ColorForDisplay+' RightSide'))\n fig.data[len(fig.data)-1].visible = 'legendonly';\n \n fig.add_trace(\n go.Scatter(y=AvgAllSTD,line_color= ColorForDisplay,\n name='Panel '+str(pnl)+' ' +ColorForDisplay+' Average All'))\n \n fig.add_trace(\n go.Scatter(y=LRavrgSTD,line_color= ColorForDisplay,\n name='Panel '+str(pnl)+' ' +ColorForDisplay+' Left Right Average'))\n \n \n fig.update_layout(\n hoverlabel=dict(\n namelength=-1\n )\n )\n\n fig.update_layout(title=self.side+' '+PlotTitle)\n\n # plot(fig00)\n plot(fig,filename=fileName+' '+str(pnl)+' '+self.side+\".html\") \n \n return fig\n\n \n \n def ShowWaveRawData_SubOffset_PerPanel(self,PlotTitle,offSetType,fileName,CcleNmber):\n \n fig = go.Figure()\n\n for Pnl in range(1,12): \n for ColorForDisplay in self.ColorList:\n db=CalcWaveFromRawData(self.pthF,self.side,Pnl).ArrangeRawDataForAnalize(ColorForDisplay);\n if ColorForDisplay=='Yellow':\n ColorForDisplay='gold';\n \n\n if offSetType == 'Left Side':\n offSet=db[CcleNmber][0];\n \n if offSetType == 'Right Side':\n offSet=db[CcleNmber][(len(db[CcleNmber]))-1] \n if offSetType == 'Middle':\n offSet=np.min(db[CcleNmber][int(len(db[CcleNmber])/2)-50:int(len(db[CcleNmber])/2)+50]) \n \n if offSetType == 'Average All':\n offSet=np.mean(db[CcleNmber])\n \n if offSetType == 'Average Left Right':\n offSet=np.mean([db[CcleNmber][0],db[CcleNmber][(len(db[CcleNmber]))-1]]) \n\n fig.add_trace(\n go.Scatter(y=list(db[CcleNmber]-offSet),line_color= ColorForDisplay,\n name='Cycle '+str(CcleNmber)+' '+'Panel '+str(Pnl)+' ' +ColorForDisplay))\n \n fig.update_layout(\n hoverlabel=dict(\n namelength=-1\n )\n )\n fig.update_layout(title=self.side+' '+PlotTitle)\n\n # plot(fig00)\n plot(fig,filename=fileName+' '+str(CcleNmber)+' '+self.side+\".html\") \n \n return fig\n\n def PlotCIScurve(self,cisCurve,PlotTitle,fileName): \n \n \n fig = go.Figure()\n \n \n fig.add_trace(\n go.Scatter(y=cisCurve,\n name='CIS '+self.side+' curve'))\n \n \n \n \n fig.update_layout(\n hoverlabel=dict(\n namelength=-1\n )\n )\n fig.update_layout(title=self.side+' '+PlotTitle)\n \n \n plot(fig,filename=fileName+\"CIScurve\"+self.side+\".html\") \n \n \n if len(cisCurve)<1:\n print('************************************************************************************')\n print(fileName+' Has No CIS '+self.side+' curve information')\n print('************************************************************************************')\n \n return fig\n \n def PlotRegistrationBetweenWavePrints(self,DFdicPerClr,MainColor,rgistBtwPntStartCycle,rgistBtwPntEndCycle,fileName):\n \n fig = go.Figure()\n \n for clr in self.ColorList:\n if clr == MainColor:\n continue;\n \n for col in range(rgistBtwPntStartCycle,rgistBtwPntEndCycle+1): \n # for col in DFdicPerClr[clr].columns:\n \n fig.add_trace(\n go.Scatter(y=DFdicPerClr[clr][col],line_color= clr,\n name='Registration for cycle '+str(col)+' color '+clr))\n \n \n fig.update_layout(\n hoverlabel=dict(\n namelength=-1\n )\n )\n fig.update_layout(title=self.side+' wave registration normalized to '+MainColor+' for Cycle Start ='+str(rgistBtwPntStartCycle)+' Cycle End='+str(rgistBtwPntEndCycle)+' ---> '+f)\n \n \n # plot(fig00)\n plot(fig,filename=fileName+' '+str(rgistBtwPntStartCycle)+'_'+str(rgistBtwPntEndCycle)+'_'+self.side+\".html\") \n \n return fig\n\n def PlotWaveDataAfterApliedAVRGCorrection(self,WaveRawDataDic,WaveRawDataDicAfterCorr,CorrectionArr,PlotTitle,fileName):\n \n fig = make_subplots(specs=[[{\"secondary_y\": True}]]) \n for clr in self.ColorList:\n \n OffsetBefore= WaveRawDataDic[clr]['Mean'][0] \n \n \n fig.add_trace(\n go.Scatter(y=list(WaveRawDataDic[clr]['Mean']-OffsetBefore),line_color= clr,line=dict(dash='dot'),\n name='Wave Raw Data Before Corr '+ clr),secondary_y=False)\n fig.add_trace(\n go.Scatter(y=list(WaveRawDataDicAfterCorr[clr]-OffsetBefore),line_color= clr,\n name='Wave Raw Data After Corr '+ clr),secondary_y=False)\n fig.add_trace(\n go.Scatter(y=list(CorrectionArr),line=dict(color=\"#d8576b\", width=3),\n name='Average Correction'),secondary_y=True)\n fig.update_layout(title=self.side+' '+PlotTitle)\n \n \n # plot(fig00)\n plot(fig,filename=fileName+' ' +self.side+\".html\")\n \n return fig\n def PlotDesidueBeforAfterAndAverageCorr(self,WaveRawDataDic,WaveRawDataDicAfterCorr,WaveDataWithMaxFilterDic,WaveDataWithMaxFilterDicAfterCorr,CorrectionArr,PlotTitle,fileName):\n \n fig = go.Figure()\n \n for clr in self.ColorList:\n \n ResidueBEFORE=(WaveRawDataDic[clr]['Mean']-WaveDataWithMaxFilterDic[clr]['Mean']);\n ResidueAFTER=(WaveRawDataDicAfterCorr[clr]-WaveDataWithMaxFilterDicAfterCorr[clr]);\n # fig.add_trace(\n # go.Scatter(y=list(WaveRawDataDic[clr]['Mean']-WaveDataWithMaxFilterDic[clr]['Mean']),line_color= clr,line=dict(dash='dot'),\n # name='Wave Raw Data Before Corr '+ clr))\n fig.add_trace(\n go.Scatter(y=ResidueBEFORE,line_color= clr,line=dict(dash='dot'),\n name='Wave Raw Data Before Corr '+ clr))\n fig.add_trace(\n go.Scatter(y=ResidueAFTER,line_color= clr,\n name='Wave Raw Data After Corr '+ clr))\n \n fig.add_trace(\n go.Scatter(y=WaveRawDataDic[clr]['Mean']-WaveRawDataDicAfterCorr[clr],line_color= clr,line=dict(dash='dash'),\n name='WaveRawData-WaveRawDataDicAfter '+ clr))\n fig.add_trace(\n go.Scatter(y=list(CorrectionArr),line=dict(color=\"#d8576b\", width=3),\n name='Average Correction'))\n fig.update_layout(title=self.side+' '+PlotTitle)\n \n \n # plot(fig00)\n plot(fig,filename=fileName+' ' +self.side+\".html\")\n \n return fig \n \n def PlotWaveDataResidue(self,WaveRawDataDic,WaveDataWithMaxFilterDic,PHloc,PHoffSet,PHtilt,PlotTitle,fileName):\n \n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n \n for clr in self.ColorList: \n lineColor=clr;\n \n \n if lineColor=='Yellow':\n lineColor='gold';\n \n fig.add_trace(\n go.Scatter(y=WaveRawDataDic[clr],line_color= lineColor,\n name='WaveData Raw '+str('Mean')+' color '+clr), secondary_y=False)\n \n fig.add_trace(\n go.Scatter(y=WaveDataWithMaxFilterDic[clr],line_color= lineColor,\n name='WaveData with Filter color '+clr), secondary_y=False)\n \n fig.add_trace(\n go.Scatter(y=WaveRawDataDic[clr]-WaveDataWithMaxFilterDic[clr],line_color= lineColor,\n name='Fiter - Raw color '+clr), secondary_y=True)\n \n # ymax=max(WaveRawDataDic[ColorList[0]]-WaveDataWithMaxFilterDic[self.ColorList[0]])\n ymax=20\n \n for i,PHlocMem in enumerate(PHloc):\n fig.add_trace(go.Scatter(x=[PHlocMem], y=[ymax],\n marker=dict(color=\"green\", size=6),\n mode=\"markers\",\n text='PH #'+str(i),\n # font_size=18,\n hoverinfo='text'),secondary_y=True)\n fig.data[len(fig.data)-1].showlegend = False\n \n fig.add_vline(x=PHlocMem, line_width=2, line_dash=\"dash\", line_color=\"green\")\n \n \n \n fig.add_trace(\n go.Scatter(y=PHoffSet[clr],line_color= lineColor,\n name='Average(Fiter - Raw) color '+clr), secondary_y=True)\n \n \n \n fig.add_trace(\n go.Scatter(y=PHtilt[clr],line_color= lineColor,line=dict(dash='dot'),\n name='Tilt(Fiter - Raw) color '+clr), secondary_y=True)\n \n \n fig.update_layout(\n hoverlabel=dict(\n namelength=-1\n )\n )\n fig.update_layout(title=self.side+' '+PlotTitle)\n \n \n plot(fig,filename=self.side+' '+fileName+\".html\") \n \n return fig\n \n \n def PlotWaveDataSUBAveragePerPanelPerCycle_withMAX_MIN_diff(self,WaveRawDataDic,offSetType,PlotTitle,fileName):\n \n fig = go.Figure()\n max_vals=pd.DataFrame()\n min_vals=pd.DataFrame()\n max_valsPerPanel=pd.DataFrame()\n min_valsPerPanel=pd.DataFrame()\n for pnl in range(1,12):\n WaveRawDataDic=CalcWaveFromRawData(pthF,side,pnl).CreateDicOfWaveRawData();\n \n WaveRawDataDic_mean_offset=WaveRawDataDic;\n max_valsCLR=pd.DataFrame()\n min_valsCLR=pd.DataFrame()\n for clr in self.ColorList: \n lineColor=clr;\n \n \n if lineColor=='Yellow':\n lineColor='gold';\n for col in WaveRawDataDic[clr].columns:\n if col == 'Mean':\n WaveRawDataDic_mean_offset[clr].drop(col, axis=1, inplace=True)\n \n break;\n if col< StartCycle4Avr:\n WaveRawDataDic_mean_offset[clr].drop(col, axis=1, inplace=True)\n \n continue;\n \n if offSetType == 'Average All':\n offset=np.mean(WaveRawDataDic[clr][col]-WaveRawDataDic[clr]['Mean']);\n \n if offSetType == 'Average Left Right':\n WaveSUBmean=WaveRawDataDic[clr][col]-WaveRawDataDic[clr]['Mean'];\n offset=np.mean([WaveSUBmean[0],WaveSUBmean[len(WaveSUBmean)-1]]);\n \n WaveRawDataDic_mean_offset[clr][col]=WaveRawDataDic[clr][col]-WaveRawDataDic[clr]['Mean']-offset\n \n fig.add_trace(\n go.Scatter(y=WaveRawDataDic_mean_offset[clr][col],line_color= lineColor,\n name='WaveData Raw cycle '+str(col)+' - Mean'+' color '+clr+' Panel '+str(pnl))) \n \n if not col == Cycle2Display:\n fig.data[len(fig.data)-1].visible = 'legendonly';\n if not pnl in Panel2Disply:\n fig.data[len(fig.data)-1].visible = 'legendonly';\n \n max_valsCLR=pd.concat([max_valsCLR,WaveRawDataDic_mean_offset[clr].max(axis=1)],axis=1).rename(columns={0: clr})\n min_valsCLR=pd.concat([min_valsCLR,WaveRawDataDic_mean_offset[clr].min(axis=1)],axis=1).rename(columns={0: clr}) \n \n max_valsPerPanel =pd.concat([max_valsPerPanel, max_valsCLR.max(axis=1)],axis=1).rename(columns={0: pnl}).dropna()\n min_valsPerPanel =pd.concat([min_valsPerPanel, min_valsCLR.min(axis=1)],axis=1).rename(columns={0: pnl}).dropna()\n \n max_vals=max_valsPerPanel.max(axis=1)\n min_vals=min_valsPerPanel.min(axis=1)\n \n \n fig.add_trace(\n go.Scatter(y=list( max_vals),line=dict(color=colorPNL[3], width=3),\n name='Max value')) \n \n fig.add_trace(\n go.Scatter(y=list( min_vals),line=dict(color=colorPNL[3], width=3),\n name='Min value')) \n \n # if not pnl in Panel2Disply:\n # fig.data[len(fig.data)-1].visible = 'legendonly';\n \n fig.update_layout(\n hoverlabel=dict(\n namelength=-1\n )\n )\n # fig.update_layout(title=PlotGraphPlotly(pthF,side,Panel,ColorList).side+' '+PlotTitle ,subtitle='Max-Min Mean ='+\"{:.3f}\".format(np.mean(max_vals-min_vals))+' Max-Min STD ='+\"{:.3f}\".format(np.std(max_vals-min_vals)))\n \n \n \n fig.update_layout(\n title=self.side+' '+PlotTitle+\"
\" +'Max-Min Mean ='+\"{:.3f}\".format(np.mean(max_vals-min_vals))+' Max-Min '+str(MaxMinPcnt)+'% ='+\"{:.3f}\".format(np.percentile(max_vals-min_vals, MaxMinPcnt))+\"\",\n title_font=dict(size=16)\n )\n \n \n plot(fig,filename=self.side+' '+fileName+\".html\") \n \n return fig \n \n def PlotWaveDataSUBAveragePerPanelPerCycle(self,WaveRawDataDic,offSetType,PlotTitle,fileName):\n \n fig = go.Figure()\n \n for pnl in range(1,12):\n WaveRawDataDic=CalcWaveFromRawData(pthF,side,pnl).CreateDicOfWaveRawData();\n \n \n for clr in self.ColorList: \n lineColor=clr;\n \n \n if lineColor=='Yellow':\n lineColor='gold';\n for col in WaveRawDataDic[clr].columns:\n if col == 'Mean':\n break;\n if col< StartCycle4Avr:\n continue;\n \n if offSetType == 'Average All':\n offset=np.mean(WaveRawDataDic[clr][col]-WaveRawDataDic[clr]['Mean']);\n if offSetType == 'Average Left Right':\n WaveSUBmean=WaveRawDataDic[clr][col]-WaveRawDataDic[clr]['Mean'];\n offset=np.mean([WaveSUBmean[0],WaveSUBmean[len(WaveSUBmean)-1]]);\n\n fig.add_trace(\n go.Scatter(y=WaveRawDataDic[clr][col]-WaveRawDataDic[clr]['Mean']-offset,line_color= lineColor,\n name='WaveData Raw cycle '+str(col)+' - Mean'+' color '+clr+' Panel '+str(pnl))) \n \n if not col == Cycle2Display:\n fig.data[len(fig.data)-1].visible = 'legendonly';\n if not pnl in Panel2Disply:\n fig.data[len(fig.data)-1].visible = 'legendonly';\n\n \n \n \n fig.update_layout(\n hoverlabel=dict(\n namelength=-1\n )\n )\n fig.update_layout(title=self.side+' '+PlotTitle)\n \n \n plot(fig,filename=self.side+' '+fileName+\".html\") \n \n return fig\n \n def PlotOffsetTabel(self,PHoffsetPerH,PlotTitle,fileName):\n \n PHname=[]\n header=[]\n ListofList=[]\n \n \n for i in range(24):\n PHname.append('PH NUMBER# '+str(i))\n \n for col in self.ColorList:\n header.append(col+' Offset')\n # header.append(col+' Tilt')\n new_list = [-number for number in PHoffsetPerH[col]]\n ListofList.append(new_list)\n # ListofList.append(PHtiltPerH[col])\n ####FRONT \n figTable = go.Figure(data=[go.Table(header=dict(values=['PH#']+header),\n cells=dict(values=[PHname]+ListofList,font=dict(color='black', size=15)))\n ])\n \n figTable.update_layout(title=self.side+' '+PlotTitle)\n \n plot(figTable,filename=self.side+' '+fileName+\".html\") \n \n return figTable\n \n def PlotTiltTable(self,PHtiltPerH,ColorLevelsTilt,DivideByNumTilt,PlotTitle,fileName):\n \n PHname=[]\n for i in range(24):\n PHname.append('PH NUMBER# '+str(i)) \n headerTilt=[]\n ListofListTilt=[]\n \n for col in self.ColorList:\n headerTilt.append(col+' Tilt')\n # header.append(col+' Tilt')\n ListofListTilt.append(PHtiltPerH[col])\n \n backGroundCLR='rgb(200, 200, 200)'\n colors = n_colors(backGroundCLR, 'rgb(200, 0, 0)', ColorLevelsTilt, colortype='rgb')\n fillcolorList=[]\n formatList=[]\n formatList.append(\"\")\n for i in range(len(ListofListTilt)):\n fillcolorList.append(np.array(colors)[(abs(np.asarray(ListofListTilt[i]))/DivideByNumTilt).astype(int)])\n formatList.append(\"0.2f\")\n \n \n ####FRONT Tilt\n figTableTilt = go.Figure(data=[go.Table(header=dict(values=['PH#']+headerTilt),\n cells=dict(values=[PHname]+ListofListTilt,fill_color=[backGroundCLR]+fillcolorList,font=dict(color='black', size=15),format=formatList))\n ])\n \n figTableTilt.update_layout(title=self.side+' '+PlotTitle) \n plot(figTableTilt,filename=self.side+' '+fileName+\".html\") \n \n return figTableTilt\n \n \n def PlotFRONT_BACKDeltaTable(self,PHoffsetPerHFRONT,PHoffsetPerHBACK,DivideByNum,ColorLevels,PlotTitle,fileName):\n \n PHname=[]\n for i in range(24):\n PHname.append('PH NUMBER# '+str(i)) \n ListofListDelta=[] \n header=[]\n fillcolorList=[] \n backGroundCLR='rgb(200, 200, 200)'\n colors = n_colors(backGroundCLR, 'rgb(200, 0, 0)', ColorLevels, colortype='rgb')\n \n for col in ColorList:\n header.append(col+'Delta(Front-Back) Offset')\n for col in ColorList:\n ListofListDelta.append(list(np.asarray(PHoffsetPerHFRONT[col])-np.asarray(PHoffsetPerHBACK[col])))\n formatList=[]\n formatList.append(\"\") \n for i in range(len(ListofListDelta)):\n # x2 = 30 * np.ones(len(ListofListDelta[i]))\n fillcolorList.append(np.array(colors)[(abs(np.asarray(ListofListDelta[i]))/DivideByNum).astype(int)])\n formatList.append(\"0.2f\")\n\n \n \n figTableDelta = go.Figure(data=[go.Table(header=dict(values=['PH#']+header),\n cells=dict(values=[PHname]+ListofListDelta,fill_color=[backGroundCLR]+fillcolorList,font=dict(color='black', size=15),format=formatList))\n ])\n figTableDelta.update_layout(title=PlotTitle) \n \n plot(figTableDelta,filename=fileName+\"_.html\") \n \n return figTableDelta;\n \n \n def PlotFRONT_BACKAverageTable(self,PHoffsetPerHFRONT,PHoffsetPerHBACK,PlotTitle,fileName):\n \n PHname=[]\n for i in range(24):\n PHname.append('PH NUMBER# '+str(i)) \n ListofListAverage=[] \n header=[]\n fillcolorList=[] \n \n \n for col in ColorList:\n header.append(col+'Average(Front&Back) Offset')\n for col in ColorList:\n ListofListAverage.append(list(-(np.asarray(PHoffsetPerHFRONT[col])+np.asarray(PHoffsetPerHBACK[col]))/2))\n \n \n \n \n \n figTableAverage = go.Figure(data=[go.Table(header=dict(values=['PH#']+header),\n cells=dict(values=[PHname]+ListofListAverage,font=dict(color='black', size=15)))\n ])\n figTableAverage.update_layout(title=PlotTitle) \n \n plot(figTableAverage,filename=fileName+\"_.html\") \n \n return figTableAverage;\n# plt.figure()\n# plt.plot(y)\n# plt.plot(tlt)\n# plt.plot(x,tlt1,'o')\n\n\n \n# WaveRawDataDic=WaveRawDataDicFRONT;\n# WaveDataWithMaxFilterDic=WaveDataWithMaxFilterDicFRONT;\n# PHloc=PHlocFRONT;\n\n#################################################################################\n#################################################################################\n#################################################################################\n\nfrom tkinter import filedialog\nfrom tkinter import *\nroot = Tk()\nroot.withdraw()\n# pthF = filedialog.askdirectory()\n\n\npthF = filedialog.askopenfilename()\n\nf=pthF.split('/')[len(pthF.split('/'))-1]\n\nDirectorypathF=pthF.replace(f,\"\")[:-1]\n\n\n\n# f=pthF.split('/')[len(pthF.split('/'))-1]\n# DirectorypathF=pthF.replace(f,'');\nos.chdir(DirectorypathF)\n\nside='Front';\n\nColorList= CalcWaveFromRawData(pthF,side,Panel).getColors();\n\nLocatorIndex= CalcWaveFromRawData(pthF,side,Panel).GetLocatorIndex(ColorForDisplay);\n# FlatList= CalcWaveFromRawData(pthF,side,Panel,ColorForDisplay).getNumberOfFlats();\nif CIScurve:\n cisBACKold,cisFRONTold=CIScurveFromRawData(pthF).GetCIScurveOldVersion()\n \n \n if len(cisFRONTold) == 0:\n cisBACKold,cisFRONTold=CIScurveFromRawData(pthF).GetCIScurveOldVersion_SecondTry()\n\n cisBACKnew,cisFRONTnew=CIScurveFromRawData(pthF).GetCIScurveNewVersion()\n\n if len(cisFRONTnew) == 0:\n cisBACKnew,cisFRONTnew=CIScurveFromRawData(pthF).GetCIScurveNewVersion_secondTry()\n \n \n \nif registrationBetweenWavePrints:\n DFdicPerClrFRONT = CalcRegistrationFromWaveData(pthF,side,Panel,ColorList,MainColor,StartCycle).DeltaForCycleAndColor() \n try:\n DFdicPerClrBACK = CalcRegistrationFromWaveData(pthF,'Back',Panel,ColorList,MainColor,StartCycle).DeltaForCycleAndColor() \n except:\n 1\n\n\nWaveRawDataDicFRONT=CalcWaveFromRawData(pthF,side,Panel).CreateDicOfWaveRawData();\n# WaveDataWithMaxFilterDicFRONT=CalcWaveFromRawData(pthF,side,Panel).FilterWaveDataDic()\nWaveDataWithMaxFilterDicFRONT=CalcWaveFromRawData(pthF,side,Panel).FilterWaveDataDicTEST(WaveRawDataDicFRONT)\n\nPHlocFRONT= CalcWaveFromRawData(pthF,side,Panel).CalcPHlocation(ColorForDisplay)\ntry:\n WaveRawDataDicBACK=CalcWaveFromRawData(pthF,'Back',Panel).CreateDicOfWaveRawData();\n WaveDataWithMaxFilterDicBACK=CalcWaveFromRawData(pthF,'Back',Panel).FilterWaveDataDic()\n PHlocBACK= CalcWaveFromRawData(pthF,'Back',Panel).CalcPHlocation(ColorForDisplay)\n\nexcept:\n 1\n\n\n\n################ Calc offset and tilt\n\nPHoffSetFRONT,PHtiltFRONT,PHoffsetPerHFRONT,PHtiltPerHFRONT=CalcMeanAndTilt(WaveRawDataDicFRONT,WaveDataWithMaxFilterDicFRONT,PHlocFRONT)\n\ntry:\n PHoffSetBACK,PHtiltBACK,PHoffsetPerHBACK,PHtiltPerHBACK=CalcMeanAndTilt(WaveRawDataDicBACK,WaveDataWithMaxFilterDicBACK,PHlocBACK)\nexcept:\n 1\n############\n\n#################### Calc curev, filetr, offset, tilt after correction\n\nWaveRawDataDicAfterCorrFRONT,WaveDataWithMaxFilterDicAfterCorrFRONT,CorrectionArrFRONT=RepareDistortions(WaveRawDataDicFRONT,WaveDataWithMaxFilterDicFRONT,ColorList).correctWaveRawData();\ntry:\n WaveRawDataDicAfterCorrBACK,WaveDataWithMaxFilterDicAfterCorrBACK,CorrectionArrBACK=RepareDistortions(WaveRawDataDicBACK,WaveDataWithMaxFilterDicBACK,ColorList).correctWaveRawData();\nexcept:\n 1\n\nPHoffSetFRONTAfterCorr,PHtiltFRONTAfterCorr,PHoffsetPerHFRONTAfterCorr,PHtiltPerHFRONTAfterCorr=CalcMeanAndTilt(WaveRawDataDicAfterCorrFRONT,WaveDataWithMaxFilterDicAfterCorrFRONT,PHlocFRONT)\n\ntry:\n PHoffSetBACKAfterCorr,PHtiltBACKAfterCorr,PHoffsetPerHBACKAfterCorr,PHtiltPerHBACKAfterCorr=CalcMeanAndTilt(WaveRawDataDicAfterCorrBACK,WaveDataWithMaxFilterDicAfterCorrBACK,PHlocBACK)\nexcept:\n 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#######################################################################################\n#######################################################################################\n#############################PLOT############################################\n#############################Wave RawData Per Cycle###################################\n##### Front\nif plotPerCycle:\n offSetType='Average All'\n PlotTitle='- Left Side Offset WAVE RAW DATA --->'+f +' offSetType='+offSetType; # Can modify Plot title\n fileName=f+\" Left Side WaveResult_RawDataPerCycle Panel Number \"; # Can modify File nmae\n side='Front'\n # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n figLeftsideFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerCycle(PlotTitle,offSetType,fileName,Panel)\n \n \n offSetType='Average Left Right' \n PlotTitle='- Right Side Offset WAVE RAW DATA --->'+f+' offSetType='+offSetType;# Can modify Plot title\n fileName=f+\" Right Side WaveResult_RawDataPerCycle Panel Number \";# Can modify File nmae\n side='Front'\n db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n figRightsideFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerCycle(PlotTitle,offSetType,fileName,Panel)\n \n \n \n PlotTitle='- STD Side Offset WAVE RAW DATA --->'+f# Can modify Plot title\n fileName=f+\" STD SideOffset_ WaveResult_RawDataPerColor Panel Number \"# Can modify File nmae\n side='Front'\n figSTDFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowSTDforRawWaveWithOffset(PlotTitle,fileName,Panel)\n \n #### Back\n try:\n offSetType='Average All'\n PlotTitle='- Left Side Offset WAVE RAW DATA --->'+f+' offSetType='+offSetType;# Can modify Plot title\n fileName=f+\" Left Side WaveResult_RawDataPerCycle Panel Number \";# Can modify File nmae\n side='Back'\n # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n figLeftsideBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerCycle(PlotTitle,offSetType,fileName,Panel)\n \n \n offSetType='Average Left Right' \n PlotTitle='- Right Side Offset WAVE RAW DATA --->'+f+' offSetType='+offSetType;# Can modify Plot title\n fileName=f+\" Right Side WaveResult_RawDataPerCycle Panel Number \";# Can modify File nmae\n side='Back'\n # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n figRightsideBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerCycle(PlotTitle,offSetType,fileName,Panel)\n \n # offSetType='Middle'\n # PlotTitle='- Middle Offset WAVE RAW DATA --->'+f;# Can modify Plot title\n # fileName=f+\" Middle WaveResult_RawDataPerCycle Panel Number \";# Can modify File nmae\n # side='Back'\n # # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n # figMiddlesideBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerCycle(PlotTitle,offSetType,fileName,Panel)\n \n PlotTitle='- STD Side Offset WAVE RAW DATA --->'+f# Can modify Plot title\n fileName=f+\" STD SideOffset_ WaveResult_RawDataPerColor Panel Number \"# Can modify File nmae\n side='Back'\n figSTDBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowSTDforRawWaveWithOffset(PlotTitle,fileName,Panel)\n except:\n 1\n\n########################################################################################\n#############################Wave RawData Per Panel###################################\n##### Front\nif plotPerPanel:\n offSetType='Average All'\n PlotTitle='- offSetType='+offSetType+' WAVE RAW DATA (For one Cycle)--->'+f;\n fileName=f+'- offSetType='+offSetType+\" WaveResult_RawDataPerPanel \";\n side='Front'\n # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n figLeftsideFRONTperPanel=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerPanel(PlotTitle,offSetType,fileName,CycleNumber)\n \n \n \n offSetType='Average Left Right' \n PlotTitle='- offSetType='+offSetType+' WAVE RAW DATA (For one Cycle)--->'+f;\n fileName=f+'- offSetType='+offSetType+\" WaveResult_RawDataPerPanel \";\n side='Front'\n # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n figRightsideFRONTperPanel=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerPanel(PlotTitle,offSetType,fileName,CycleNumber)\n \n # offSetType='Middle'\n # PlotTitle='- Middle Offset WAVE RAW DATA (For one Cycle)--->'+f;\n # fileName=f+\" Right Side WaveResult_RawDataPerPanel \";\n # side='Front'\n # # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n # figMiddlesideFRONTperPanel=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerPanel(PlotTitle,offSetType,fileName,CycleNumber)\n \n \n #### Back\n try:\n offSetType='Average All'\n PlotTitle='- offSetType='+offSetType+' WAVE RAW DATA (For one Cycle)--->'+f;\n fileName=f+'- offSetType='+offSetType+\" WaveResult_RawDataPerPanel \";\n side='Back'\n # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n figLeftsideBACKperPanel=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerPanel(PlotTitle,offSetType,fileName,CycleNumber)\n \n \n \n offSetType='Average Left Right' \n PlotTitle='- offSetType='+offSetType+' WAVE RAW DATA (For one Cycle)--->'+f;\n fileName=f+'- offSetType='+offSetType+\" WaveResult_RawDataPerPanel \";\n side='Back'\n # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n figRightsideBACKperPanel=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerPanel(PlotTitle,offSetType,fileName,CycleNumber)\n \n # offSetType='Middle'\n # PlotTitle='- Middle Offset WAVE RAW DATA (For one Cycle)--->'+f;\n # fileName=f+\" Right Side WaveResult_RawDataPerPanel \";\n # side='Back'\n # # db=CalcWaveFromRawData(pthF,side,Panel).ArrangeRawDataForAnalize(ColorForDisplay);\n # figMiddlesideBACKperPanel=PlotGraphPlotly(pthF,side,Panel,ColorList).ShowWaveRawData_SubOffset_PerPanel(PlotTitle,offSetType,fileName,CycleNumber)\n \n except:\n 1\n\n########################################################################################\n#############################Plot Wave Data SUB Average Per Panel Per Cycle###################################\n##### Front\nif WaveDataSUBAverage_PerPanel_PerCycle:\n offSetType='Average All' ;\n PlotTitle='- Wave Behavior- Avi Method --->'+f+' offSetType='+offSetType;\n fileName=f+\" Wave Behavior- Avi Method Offsettype_\"+offSetType;\n side='Front'\n figWaveDataSubAveragePerPanet=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotWaveDataSUBAveragePerPanelPerCycle_withMAX_MIN_diff(WaveRawDataDicFRONT,offSetType,PlotTitle,fileName);\n\n offSetType='Average Left Right' ;\n PlotTitle='- Wave Behavior- Avi Method --->'+f+' offSetType='+offSetType;\n fileName=f+\" Wave Behavior- Avi Method Offsettype_\"+offSetType;\n side='Front'\n figWaveDataSubAveragePerPanet=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotWaveDataSUBAveragePerPanelPerCycle_withMAX_MIN_diff(WaveRawDataDicFRONT,offSetType,PlotTitle,fileName);\n \n #####Back\n try:\n offSetType='Average All' #\n PlotTitle='- Wave Behavior- Avi Method --->'+f+' offSetType='+offSetType;\n fileName=f+\" Wave Behavior- Avi Method Offsettype_\"+offSetType;\n \n side='Back'\n figWaveDataSubAveragePerPanet=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotWaveDataSUBAveragePerPanelPerCycle_withMAX_MIN_diff(WaveRawDataDicBACK,offSetType,PlotTitle,fileName);\n \n offSetType='Average Left Right' # \n PlotTitle='- Wave Behavior- Avi Method --->'+f+' offSetType='+offSetType;\n fileName=f+\" Wave Behavior- Avi Method Offsettype_\"+offSetType;\n side='Back'\n figWaveDataSubAveragePerPanet=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotWaveDataSUBAveragePerPanelPerCycle_withMAX_MIN_diff(WaveRawDataDicBACK,offSetType,PlotTitle,fileName);\n\n except:\n 1\n\n##################################################################################\n################################CIS Curve \n######FRONT\nif CIScurve:\n\n try:\n PlotTitle='FRONT CIS curve (old version)--->'+f;\n fileName=f+' old ';\n side='Front';\n cisCurve=cisFRONTold\n figCISFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotCIScurve(cisCurve,PlotTitle,fileName);\n except:\n 1\n \n #######BACK\n try:\n PlotTitle='BACK CIS curve (old version)--->'+f;\n fileName=f+' old ';\n side='Back';\n cisCurve=cisBACKold\n figCISBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotCIScurve(cisCurve,PlotTitle,fileName); \n except:\n 1\n\n try:\n PlotTitle='FRONT CIS curve (new version)--->'+f;\n fileName=f+' new ';\n side='Front';\n cisCurve=cisFRONTnew\n figCISFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotCIScurve(cisCurve,PlotTitle,fileName);\n except:\n 1\n \n #######BACK\n try:\n PlotTitle='BACK CIS curve (new version)--->'+f;\n fileName=f+' new ';\n side='Back';\n cisCurve=cisBACKnew\n figCISBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotCIScurve(cisCurve,PlotTitle,fileName); \n except:\n 1\n\n##################################################################################\n################################Registration Between Wave Prints\n######FRONT & BACK\nif registrationBetweenWavePrints: ##Yuval method\n try:\n fileName=f\n side='Front'\n figRegistrationBetweenWavePrintsFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotRegistrationBetweenWavePrints(DFdicPerClrFRONT,MainColor,rgistBtwPntStartCycle,rgistBtwPntEndCycle,fileName)\n \n side='Back'\n figRegistrationBetweenWavePrintsBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotRegistrationBetweenWavePrints(DFdicPerClrBACK,MainColor,rgistBtwPntStartCycle,rgistBtwPntEndCycle,fileName)\n except:\n 1\n##################################################################################\n################################ Residue (Wave- S.Go filter) Before and After Aplied AVRG Correction\n######FRONT \nif BeforAndAfterCorr:\n PlotTitle=' Residue (Wave- S.Go filter) before and after correction ---> '+f;\n fileName=f+'Residue _Wave sub S.Go filter_ before and after correction_';\n side='Front';\n figCorrAvrBeforeAndAfterFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotDesidueBeforAfterAndAverageCorr(WaveRawDataDicFRONT,WaveRawDataDicAfterCorrFRONT,WaveDataWithMaxFilterDicFRONT,WaveDataWithMaxFilterDicAfterCorrFRONT,CorrectionArrFRONT,PlotTitle,fileName)\n \n ######BACK\n try:\n \n PlotTitle=' Residue (Wave- S.Go filter) before and after correction ---> '+f;\n fileName=f+'Residue _Wave sub S.Go filter_ before and after correction_';\n side='Back';\n figCorrAvrBeforeAndAfterBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotDesidueBeforAfterAndAverageCorr(WaveRawDataDicBACK,WaveRawDataDicAfterCorrBACK,WaveDataWithMaxFilterDicBACK,WaveDataWithMaxFilterDicAfterCorrBACK,CorrectionArrBACK,PlotTitle,fileName)\n except:\n 1\n\n##################################################################################\n################################Wave Data Before and After Aplied AVRG Correction\n######FRONT \nif BeforAndAfterCorr:\n PlotTitle=' wave raw data before and after correction ---> '+f;\n fileName=f+'wave raw data before and after correction_';\n side='Front';\n figClrBeforeAndAfterFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotWaveDataAfterApliedAVRGCorrection(WaveRawDataDicFRONT,WaveRawDataDicAfterCorrFRONT,CorrectionArrFRONT,PlotTitle,fileName)\n \n ######BACK\n try:\n \n PlotTitle=' wave raw data before and after correction ---> '+f;\n fileName=f+'wave raw data before and after correction_';\n side='Back';\n figClrBeforeAndAfterBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotWaveDataAfterApliedAVRGCorrection(WaveRawDataDicBACK,WaveRawDataDicAfterCorrBACK,CorrectionArrBACK,PlotTitle,fileName)\n except:\n 1\n\n#################################################################################\n##################################Plot DX Wave DataResidue After correction\n########FRONT\nif WaveFilterResidue_dxPlot:\n PlotTitle=' After Correction Wave Data S.Golay = '+ str(MaxWaveWindow)+'---> '+f\n fileName=f+' After Correction Wave Data S.Golay _'+ str(MaxWaveWindow)\n side='Front';\n figWaveResidueAfterCorrFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotWaveDataResidue(WaveRawDataDicAfterCorrFRONT,WaveDataWithMaxFilterDicAfterCorrFRONT,PHlocFRONT,PHoffSetFRONTAfterCorr,PHtiltFRONTAfterCorr,PlotTitle,fileName)\n ########BACK\n try:\n PlotTitle=' After Correction Wave Data S.Golay = '+ str(MaxWaveWindow)+'---> '+f\n fileName=f+' After Correction Wave Data S.Golay _'+ str(MaxWaveWindow)\n side='Back';\n figWaveResidueAfterCorrBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotWaveDataResidue(WaveRawDataDicAfterCorrBACK,WaveDataWithMaxFilterDicAfterCorrBACK,PHlocBACK,PHoffSetBACKAfterCorr,PHtiltBACKAfterCorr,PlotTitle,fileName)\n except:\n 1 \n\n\n#################################################################################\n####################### Table: Offset Table - After correction\n#############Front\nif PlotTables:\n PlotTitle=' offset (Correction-For simplex) table S.Golay = '+ str(MaxWaveWindow)+'---> '+f\n fileName=f+\" Offset Table\"\n side='Front';\n TableOffsetAfterCorrFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotOffsetTabel(PHoffsetPerHFRONTAfterCorr,PlotTitle,fileName)\n \n \n #####Back\n \n try:\n PlotTitle=' offset (Correction-For simplex) table S.Golay = '+ str(MaxWaveWindow)+'---> '+f\n fileName=f+\" Offset Table\"\n side='Back';\n TableOffsetAfterCorrBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotOffsetTabel(PHoffsetPerHBACKAfterCorrAfterCorr,PlotTitle,fileName)\n except:\n 1 \n \n #################################################################################\n ####################### Table: Tilt Table - After correction\n #############Front\n \n PlotTitle=' Tilt table S.Golay = '+ str(MaxWaveWindow)+'---> '+f\n fileName=f+\" Tilt Table\"\n side='Front';\n TableTiltAfterCorrFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotTiltTable(PHtiltPerHFRONTAfterCorr,ColorLevelsTilt,DivideByNumTilt,PlotTitle,fileName)\n \n #####Back\n try:\n PlotTitle=' Tilt table S.Golay = '+ str(MaxWaveWindow)+'---> '+f\n fileName=f+\" Tilt Table\"\n side='Back';\n TableTiltAfterCorrBACK=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotTiltTable(PHtiltPerHBACKAfterCorr,ColorLevelsTilt,DivideByNumTilt,PlotTitle,fileName)\n except:\n 1\n \n #################################################################################\n ####################### Table: FRONT -BACK Delta - After correction\n \n try:\n PlotTitle='Delta offset table S.Golay = '+ str(MaxWaveWindow)+'---> '+f\n fileName=f+\" Delta Offset Table\"\n side='Front';\n TableFRONT_BACK_AverageAfterCorrFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotFRONT_BACKDeltaTable(PHoffsetPerHFRONTAfterCorr,PHoffsetPerHBACKAfterCorr,DivideByNum,ColorLevels,PlotTitle,fileName);\n except:\n 1\n \n #################################################################################\n ####################### Table: FRONT -BACK Average - After correction\n try:\n PlotTitle='Correction table S.Golay = '+ str(MaxWaveWindow)+'---> '+f\n fileName=f+\" FRONT -BACK Average Table\"\n side='Front';\n TableFRONT_BACK_AverageAfterCorrFRONT=PlotGraphPlotly(pthF,side,Panel,ColorList).PlotFRONT_BACKAverageTable(PHoffsetPerHFRONTAfterCorr,PHoffsetPerHBACKAfterCorr,PlotTitle,fileName);\n except:\n 1\n\n\n\n\n\n#########################################################################################\n\n# jobData=CIScurveFromRawData(pthF).LoadRawData()\n# sub='CisCurvatureDataBasedOnWaveFormat=';\n# indices = []\n\n# for line_num, line in enumerate(jobData):\n# if len(line)>1:\n# if sub in line[0]:\n# indices.append(line_num) \n \n# cisFRONT = list(map(float, jobData[indices[0]][1:]))\n# if len(indices)>1:\n# cisBACK = list(map(float, jobData[indices[1]][1:]))\n","repo_name":"ireneGelfeld/AQM","sub_path":"WaveCalibrationAnalyzer_refactored_2.py","file_name":"WaveCalibrationAnalyzer_refactored_2.py","file_ext":"py","file_size_in_byte":68084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74877851376","text":"n=int(input(' how many row do you want?\\n'))\ncount=n\nfor row in range(n,0,-1):\n #print(row)\n for space in range(0,n-row):\n print(' ',end='')\n count=count-1\n for star in range(0,row+count):\n print('*',end='')\n print('')\n","repo_name":"rhr612/pattern","sub_path":"inverse piramid.py","file_name":"inverse piramid.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5944505988","text":"'''\r\nA Python script that uses data from Scryfall to automatically import MtG cards into Kanka.io. (Trademarks belong to their respective owners; see accompanying documentation for more detail.)\r\n\r\nCredit for \"do you even POST, bro?\" goes to https://www.w3schools.com/python/ref_requests_post.asp \r\n@author: ElectricalAptitude\r\n@author: SirTechSpec\r\n\r\n@created 2020.05.27\r\n@change 2020.05.27 sts ported from https://repl.it/@ElectricalAptit/kankfall#main.py so as to test Kanka interactions without making the API key quite so public.\r\n@change 2020.05.27 sts fixed git nonsense; set to UTF-8; adjustments to pull from constants.py\r\n\r\n'''\r\n\r\nfrom builtins import int\r\nimport requests\r\nimport constants\r\n\r\n\r\n# import other stuff??\r\n# test cases: gibberish, forest, Arcbound Ravager, Baral, snow-covered swamp, Armor of Faith, Kaervek's Torch\r\n#set program constants\r\ndebug = False\r\nweAreLive = True\r\n\r\n#import program constants from backup constants file\r\nSCRYFALL_URL = constants.SCRYFALL_URL\r\nRAVNICA_SETS = constants.RAVNICA_SETS\r\nKALADESH_SETS = constants.KALADESH_SETS\r\nTHEROS_SETS = constants.THEROS_SETS\r\n\r\nKANKA_CHAR_URL = constants.KANKA_CHAR_URL\r\nKANKA_ITEM_URL = constants.KANKA_ITEM_URL\r\nKANKA_LOC_URL = constants.KANKA_LOC_URL\r\n#TODO: read from cfg if available, inc. which sets/locations to use\r\n#TODO: use parameters if available\r\n\r\n#don't do magic numbers kids they're bad for you\r\n#TODO: get location IDs straight from Kanka\r\nkaladeshLocationID = 174953\r\nravnicaLocationID = 174971\r\ntherosLocationID = 176830\r\n#TODO: get tag IDs straight from Kanka\r\nkaladeshTagID = 54084\r\nravnicaTagID = 54085\r\ntherosTagID = 54177\r\nkankfallTagID = 55903\r\n#TODO: get race IDs straight from Kanka\r\nraceIDs = {\"Aetherborn\":67177, \"Centaur\":67037, \"Construct\":68699, \"Dwarf\":67077, \"Elf\":67493, \"Goblin\":66961, \"Human\":66977, \"Vedalken\":66954}\r\n\r\n\r\nmyToken = \"abc123\"\r\nwith open(\"cfg/token.auth\") as tokenFile:\r\n myToken = tokenFile.read()\r\nkankaHeaders={\"Authorization\":\"Bearer \"+myToken, \"Content-type\":\"application/json\"}\r\n\r\nwhile True:\r\n #run program\r\n #initialize variables at the start of each run\r\n cardName = cardSet = cardArtist = cardImgurl = cardFlavor = cardTypeLine = cardTypeParts = cardType = cardSubtype = kankaType = \"\"\r\n selectedCard = {}\r\n planeLocationID = 0\r\n entryLocation = 0\r\n tags = []\r\n kanka_request_URL = \"\"\r\n kankaPayload = {}\r\n postResult = \"\"\r\n\r\n desiredCardName = input(\"Type in the name of the card: \") # in python 3, input gives us a str automatically\r\n desiredCardName = desiredCardName.replace(\" \", \"+\")\r\n scry_URL = SCRYFALL_URL+desiredCardName+\"&unique=prints\"\r\n\r\n scry_result = requests.get(scry_URL) #returns a dict with one entry, 'data', whose data is an array of dicts, each of which is one card.\r\n if scry_result.ok==False:\r\n print(str(scry_result.status_code)+\": \"+scry_result.reason)\r\n continue\r\n resultJson=scry_result.json()[\"data\"] #so resultJson is an array of dicts (cards)\r\n print(\"Cards were found from the following sets:\")\r\n currentCardIndex = 0\r\n for card in resultJson: #card here is now an actual card\r\n thisCardSet = card[\"set_name\"]\r\n print(\"[\"+str(currentCardIndex)+\"]: \"+thisCardSet)\r\n if thisCardSet in RAVNICA_SETS or thisCardSet in KALADESH_SETS or thisCardSet in THEROS_SETS:\r\n selectedCard = card\r\n break\r\n currentCardIndex += 1\r\n if selectedCard == {}: continue #if no matches, try again\r\n print(\"Selected card number \"+str(currentCardIndex))\r\n cardName = selectedCard[\"name\"]\r\n cardSet = selectedCard[\"set_name\"]\r\n cardImgurl = selectedCard[\"image_uris\"][\"art_crop\"]\r\n if \"flavor_text\" in selectedCard:\r\n cardFlavor = selectedCard[\"flavor_text\"]\r\n if \"artist\" in selectedCard:\r\n cardArtist = selectedCard[\"artist\"]\r\n cardTypeLine = selectedCard[\"type_line\"]\r\n cardTypeParts = cardTypeLine.split(\"—\")\r\n cardType = cardTypeParts[0].strip()\r\n if len(cardTypeParts) > 1:\r\n cardSubtype = cardTypeParts[1].strip()\r\n\r\n #let's do a quick check\r\n print(\"Name: \" + cardName)\r\n print(\"Set: \" + cardSet)\r\n print(\"Imgurl: \" + cardImgurl)\r\n print(\"Flavor: \" + cardFlavor)\r\n print(\"Type: \" + cardType) #somewhat optimistic\r\n print(\"Subtype: \" + cardSubtype) #should be empty string if n/a\r\n print(\"Does this look right?\")\r\n shallIContinue = input(\"N for no, otherwise hit Enter: \")\r\n if shallIContinue.lower() == \"n\" or shallIContinue.lower() == \"no\":\r\n continue #this means don't continue, start over\r\n\r\n #Card input's all finished, woo! Now to some sorting and preparing for output.\r\n\r\n #what manner of beastie are you\r\n if cardType == \"Artifact\" or cardType == \"Legendary Artifact\":\r\n if cardSubtype == \"Vehicle\":\r\n kankaType = \"Location\"\r\n else:\r\n kankaType = \"Item\"\r\n if \"Creature\" in cardType: #could be Artifact Creature, Legendary Creature, Enchantment Creature, or who knows what\r\n kankaType = \"Character\"\r\n if \"Land\" in cardType :\r\n kankaType = \"Location\"\r\n #quick sanity check\r\n if kankaType == \"\":\r\n print(\"Not sure what to do with this. The type came in as \\\"\" + cardTypeLine + \"\\\", which failed to parse.\")\r\n continue\r\n\r\n # For all cards, a valid question is: which setting?\r\n if cardSet in KALADESH_SETS:\r\n planeLocationID = kaladeshLocationID\r\n tags.append(kaladeshTagID)\r\n #TODO: there are quite a few more Ravnica ones\r\n elif cardSet in (RAVNICA_SETS):\r\n planeLocationID = ravnicaLocationID\r\n tags.append(ravnicaTagID)\r\n elif cardSet in THEROS_SETS:\r\n planeLocationID = therosLocationID\r\n tags.append(therosTagID)\r\n # turns out you can't do if statements inside a constructor, which means this field needs to be validated up here\r\n if planeLocationID == 0:\r\n print(\"Onoez! This card from set \"+cardSet+\" doesn't seem to match any of the locations on file!\")\r\n continue\r\n\r\n # Having set/acquired all the information we're going to feed to our functions, let's define them.\r\n #now if we were really doing the thing properly, we'd always have our constructors take arguments and use those arguments in the constructors. right now, we're using global variables, which is a no-no. but I'm not in the mood to do all that typing right now.\r\n\r\n #INFO: the only REQUIRED field for any of these is \"name\". So, let's not include any fields we're not specifying in the constructor.\r\n\r\n # turns out locationID isn't the same for each entity type, though, so we need to define figuring that out first\r\n def determineEntryLocation():\r\n locationDetermined = False\r\n while locationDetermined == False:\r\n print(\"Include a location?\")\r\n locationAnswer = input(\"Input location ID for custom, Y to use the default location for the plane the card was found on, or N for no location: \")\r\n locationDetermined = True #give them the benefit of the doubt\r\n if locationAnswer.lower() == \"n\":\r\n print(\"Understood. No location will be specified.\")\r\n chosenLocation = 0\r\n elif locationAnswer.lower() == \"y\":\r\n chosenLocation = planeLocationID\r\n elif locationAnswer.isdecimal():\r\n chosenLocation = int(locationAnswer)\r\n else:\r\n print(\"That wasn't one of the options. Maybe you made a typo or something?\")\r\n locationDetermined = False #this will cause the While loop to restart\r\n\r\n return chosenLocation\r\n\r\n def createKankaCharacter():\r\n # these need to be initialized every time, hence them being defined here\r\n # incoming data includes: tags[], cardName, cardFlavor, cardImgurl, cardSubtype\r\n personalityName=[\"Attitude\", \"Values\", \"Accent\", \"Renown\"] #we might want to change that per-card later\r\n personalityEntry=[\"Unknown\", \"Unknown\", \"Unknown\", \"Unknown\"] #ditto\r\n race=\"\"\r\n raceID = 0\r\n charTitle = \"\" # @UnusedVariable (not really, I just don't trust it to re-initialize properly otherwise)\r\n charType = \"\"\r\n\r\n # The first word of the creature subtype has a decent chance of being the race, with one exception\r\n if cardType == \"Artifact Creature\":\r\n raceCandidate = \"Construct\"\r\n else:\r\n raceCandidate = cardSubtype.split(\" \")[0]\r\n if raceCandidate == \"Elven\":\r\n raceCandidate = \"Elf\"\r\n if raceCandidate in raceIDs: #else leave both race and raceID empty\r\n race = raceCandidate\r\n raceID = raceIDs[race]\r\n\r\n # replacing \"\" with \"\" should be no problem if we didn't find a race\r\n charTitle = cardSubtype.replace(race, \"\", 1).strip()\r\n if debug:\r\n print(cardType)\r\n print(cardSubtype, \"-->\", raceCandidate, \"-->\", race, \": \", raceID)\r\n print(charTitle)\r\n input()\r\n\r\n while charType == \"\":\r\n charTypeResponse = input(\"Character type? M for Model, I for Individual (awaiting customization), anything else will be input directly: \")\r\n if charTypeResponse.lower() == \"m\":\r\n charType = \"Model\"\r\n elif charTypeResponse.lower() == \"i\":\r\n charType = \"Individual\"\r\n #elif charTypeResponse.lower() == \"p\": charType = \"NPC\"\r\n #else: print(\"seriously it really has to be m or i\")\r\n else:\r\n charType = charTypeResponse\r\n # end loop - successfully setting charType moves on\r\n\r\n kankaCharacter = {\r\n \"name\" : cardName,\r\n \"title\": charTitle,\r\n # \"age\" : \"\",\r\n # \"sex\" : \"\",\r\n \"entry\" : cardFlavor + \"

Behavior so far: None



Artist Credit: \"+cardArtist+\"\",\r\n \"type\" : charType,\r\n # \"family_id\" : \"\",\r\n \"tags\" : tags,\r\n \"is_dead\" : False,\r\n \"is_private\" : False,\r\n \"image_url\" : cardImgurl,\r\n \"personalityName\" : personalityName,\r\n \"personalityEntry\" : personalityEntry\r\n }\r\n if raceID > 0:\r\n kankaCharacter.update({\"race_id\" : raceID})\r\n entryLocation = determineEntryLocation()\r\n if entryLocation > 0:\r\n kankaCharacter.update({\"location_id\":entryLocation})\r\n return kankaCharacter\r\n\r\n # now on to items\r\n\r\n def createKankaItem():\r\n kankaItem = {\r\n \"name\" : cardName,\r\n \"entry\" : cardFlavor + \"

Artist Credit: \"+cardArtist+\"\",\r\n # \"character_id\" : , # int - the item's owner\r\n \"tags\" : tags,\r\n \"is_private\" : False,\r\n \"image_url\" : cardImgurl\r\n }\r\n if cardSubtype != \"\":\r\n kankaItem.update({\"type\":cardSubtype})\r\n entryLocation = determineEntryLocation()\r\n if entryLocation > 0:\r\n kankaItem.update({\"location_id\":entryLocation})\r\n return kankaItem\r\n\r\n def createKankaLocation():\r\n kankaLocation = {\r\n \"name\" : cardName,\r\n \"entry\" : cardFlavor + \"

Artist Credit: \"+cardArtist+\"\",\r\n \"tags\" : tags,\r\n \"is_private\" : False,\r\n \"image_url\" : cardImgurl,\r\n }\r\n if cardSubtype != \"\":\r\n kankaLocation.update({\"type\":cardSubtype})\r\n entryLocation = determineEntryLocation()\r\n if entryLocation > 0:\r\n kankaLocation.update({\"parent_location_id\":entryLocation})\r\n return kankaLocation\r\n\r\n #let's call some functions\r\n if kankaType == \"Character\":\r\n kankaPayload = createKankaCharacter()\r\n kanka_request_URL = KANKA_CHAR_URL\r\n elif kankaType == \"Location\":\r\n kankaPayload = createKankaLocation()\r\n kanka_request_URL = KANKA_LOC_URL\r\n elif kankaType == \"Item\":\r\n kankaPayload = createKankaItem()\r\n kanka_request_URL = KANKA_ITEM_URL\r\n \r\n\r\n # PUSH THE BIG RED BUTTON\r\n if weAreLive: #I'm gone, baby, solid gone.\r\n print(\"Submitting...\") #acknowledge slight delay\r\n postResult = requests.post(kanka_request_URL, headers=kankaHeaders, json=kankaPayload)\r\n # So, how did it gooooooooo?\r\n print(postResult.status_code, postResult.reason)\r\n if postResult.ok:\r\n print(postResult.text)\r\n else: # NOT YET BALOO\r\n print(kanka_request_URL)\r\n print(kankaHeaders)\r\n print(kankaPayload)\r\n# kankaJSON = kankaResult.json()\r\n# kankaContents = kankaJSON[\"data\"]\r\n# for x in kankaContents:\r\n# print(x, kankaContents[x])\r\n #wheee! let's do it again!\r\n continue\r\n # TODO: some kind of condition for not doing it again I guess\r\n # break\r\n #end of program main loop\r\n","repo_name":"ElectricalAptitude/kankfall","sub_path":"kankfall.py","file_name":"kankfall.py","file_ext":"py","file_size_in_byte":12902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35579076599","text":"import Levenshtein as lev\nimport pandas as pd\n\nfrom levenshtein import get_levenshtein_distance\n\ndef get_nearest_university_name(universities,\n new_university_name,\n method=get_levenshtein_distance):\n universities['lev_distance'] = universities['name'].apply(\n lambda x: method(new_university_name, x)\n )\n min_distance_university_idx = universities['lev_distance'].idxmin()\n\n return (\n universities.loc[min_distance_university_idx]['name'],\n universities.loc[min_distance_university_idx]['lev_distance']\n )\n\ndef test_levenshtein_distance():\n universities_collection = pd.DataFrame([\n \"Saint Joseph's College of Indiana\",\n \"Saint John's College\",\n \"College of Saint Joseph in Vermont\",\n \"University of Saint Joseph\",\n \"Saint Josephs College\",\n \"Mount St. Joseph University\",\n \"Saint John's University Thailand\",\n ], columns=['name'])\n\n new_university = \"Saint Joe's College (ME)\"\n\n print('New university:', new_university)\n print()\n\n # use own implementation in levenshtein.py\n print('--- Own implementation ---')\n nearest_university_name, lev_distance = get_nearest_university_name(\n universities_collection,\n new_university,\n get_levenshtein_distance\n )\n print('Nearest universtity name', nearest_university_name)\n print('Levenshtein Distance:', int(lev_distance))\n print()\n\n # use python-Levenshtein package\n print('--- python-Levenshtein package ---')\n nearest_university_name, lev_distance = get_nearest_university_name(\n universities_collection,\n new_university,\n lev.distance\n )\n print('Nearest universtity name', nearest_university_name)\n print('Levenshtein Distance:', int(lev_distance))\n\n\nif __name__ == '__main__':\n test_levenshtein_distance()\n","repo_name":"shielamms/nlp","sub_path":"01-levenshtein_distance/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24964511451","text":"from torch import nn \nimport torch \nfrom .mlp import MLP\n\n\ndef off_diagonal(x):\n # return a flattened view of the off-diagonal elements of a square matrix\n n, m = x.shape\n assert n == m\n return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()\n\n\nclass BarlowTwins(nn.Module):\n def __init__(self, backbone, projector_dims, lambd=5e-3):\n super().__init__()\n self.backbone = backbone\n self.mlp = MLP(projector_dims)\n\n # normalization layer for the representations z1 and z2\n self.bn = nn.BatchNorm1d(projector_dims[-1], affine=False)\n\n def forward(self, y1, y2):\n z1 = self.projector(self.backbone(y1))\n z2 = self.projector(self.backbone(y2))\n\n # empirical cross-correlation matrix\n c = self.bn(z1).T @ self.bn(z2)\n\n # sum the cross-correlation matrix between all gpus\n c.div_(self.args.batch_size)\n torch.distributed.all_reduce(c)\n\n on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()\n off_diag = off_diagonal(c).pow_(2).sum()\n loss = on_diag + self.args.lambd * off_diag\n return loss","repo_name":"pfrwilson/medAI","sub_path":"medAI/modeling/barlow_twins.py","file_name":"barlow_twins.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15495302568","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0005_user_has_completed_ofpd'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='has_completed_ofpd',\n field=models.BooleanField(verbose_name='\\n Check this box if you have completed the Oregon\\n Forest Pest Detector training, offered by Oregon State\\n Extension.', default=False),\n ),\n migrations.AlterField(\n model_name='user',\n name='is_active',\n field=models.BooleanField(verbose_name='Is Manager (can login and manage reports)', default=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='is_staff',\n field=models.BooleanField(verbose_name='Is Admin (can do anything)', default=False),\n ),\n ]\n","repo_name":"PSU-OIT-ARC/oregoninvasiveshotline","sub_path":"oregoninvasiveshotline/users/migrations/0006_auto_20151013_1331.py","file_name":"0006_auto_20151013_1331.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13478362032","text":"''' #############\n#### CLI Parsing ####\n''' #############\nimport sys\n\nif len(sys.argv) < 2:\n print(\"Please provide the path to file\")\n exit(1)\n\npath = sys.argv[1]\n\nif \"txt.bz2\" not in path:\n print(\"The file should have txt.bz2 format\")\n exit(2)\n\n''' ############\n#### Processing ####\n''' ############\nimport bz2\n\nwith open(path, 'rb') as file, open(path[0:-len(\".bz2\")], 'w', encoding=\"utf8\", errors=\"ignore\") as output:\n decompressor = bz2.BZ2Decompressor()\n chunk_size = 100 * 1024\n for chunk in iter(lambda: file.read(chunk_size), b''):\n data = decompressor.decompress(chunk)\n output.write(data.decode(encoding=\"utf8\", errors='ignore'))\n","repo_name":"Sand3r-/Computational-Tools-For-Big-Data","sub_path":"challenge1/unpack_index_file.py","file_name":"unpack_index_file.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70055743538","text":"\"\"\"\na1.py\nWilliam C. Morris\n\n\"\"\"\nimport math\nimport random\n\n\"\"\" FUNC: double_exps(n) \"\"\"\ndef double_exps(n):\n justification = len(str(2**(2**n)))\n for i in range(1, n+1):\n math_result = str(2**(2**i))\n print(i, math_result.rjust(justification))\n\n\"\"\" FUNC: guess_number(level) \"\"\"\ndef guess_number(level):\n player_guesses = 0\n upper_bound = math.pow(level, 2)\n r = random.randint(level, upper_bound)\n\n player_input = input(str(\"I'm thinking of a number between \" + str(level) \\\n + \" and \" + str(upper_bound) + \"\\n\"))\n\n while True:\n if (int(player_input) > r):\n player_input = input(\"Your guess: \" + str(player_input) + \\\n \"\\nThat's too high.\\n\")\n player_guesses += 1\n elif (int(player_input) < r):\n player_input = input(\"Your guess: \" + str(player_input) + \\\n \"\\nThat's too low.\\n\")\n player_guesses += 1\n else:\n player_guesses += 1\n print(\"That's it!\\nIt took you\", player_guesses, \\\n \"tries to find my number.\\nYou where lucky!\")\n break\n\n\"\"\" FUNC: right_justify(s, n) \"\"\"\ndef right_justify(s, n):\n s_length = len(s)\n if (len(s) >= n):\n return s\n else:\n return s.rjust(n)\n\n# Never use anything bigger than 16.\ndouble_exps(5)\nguess_number(4)\nprint( right_justify('12', 4) )\n","repo_name":"d4rkh4re/d4rkh4re","sub_path":"college/a290/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"18738565698","text":"from absl.testing import parameterized\nimport tensorflow as tf\nimport uncertainty_baselines as ub\nfrom uncertainty_baselines.datasets import base\n\n\nclass CriteoDatasetTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('Train', base.Split.TRAIN),\n ('Validation', base.Split.VAL),\n ('Test', base.Split.TEST))\n def testDatasetSize(self, split):\n batch_size = 9\n eval_batch_size = 5\n dataset_builder = ub.datasets.CriteoDataset(\n batch_size=batch_size,\n eval_batch_size=eval_batch_size,\n shuffle_buffer_size=20)\n dataset = dataset_builder.build(split).take(1)\n element = next(iter(dataset))\n features = element['features']\n labels = element['labels']\n\n expected_batch_size = (\n batch_size if split == base.Split.TRAIN else eval_batch_size)\n features_length = len(features)\n feature_shape = features[list(features)[0]].shape\n labels_shape = labels.shape\n self.assertEqual(feature_shape, (expected_batch_size,))\n self.assertEqual(features_length, 39)\n self.assertEqual(labels_shape, (expected_batch_size,))\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"gpleiss/uncertainty-baselines","sub_path":"uncertainty_baselines/datasets/criteo_test.py","file_name":"criteo_test.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"1637173081","text":"\"\"\"\n@author: wangye(Wayne)\n@license: Apache Licence\n@file: Maximum Difference by Remapping a Digit.py\n@time: 20230330\n@contact: wang121ye@hotmail.com\n@site: wangyendt@github.com\n@software: PyCharm\n\n# code is far away from bugs.\n\"\"\"\n\n\nclass Solution:\n def minMaxDifference(self, num: int) -> int:\n s = str(num)\n mx = int(s.replace(s[0], '9'))\n for i in range(10):\n if str(i) in s:\n mx = max(mx, int(s.replace(str(i), '9')))\n mn = s.replace(s[0], '0')\n return int(mx) - int(mn)\n\n\nso = Solution()\nprint(so.minMaxDifference(num=867))\n","repo_name":"wangyendt/LeetCode","sub_path":"Biweekly Contests/51-100/biweek 98/2566. Maximum Difference by Remapping a Digit/Maximum Difference by Remapping a Digit.py","file_name":"Maximum Difference by Remapping a Digit.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"72808852979","text":"\"\"\"\nImplementation of Tic-Tac-Toe\nAllows for reverse Tic-Tac-Toe in which getting three squares\nin a row results in a loss\n\"\"\"\nfrom copy import deepcopy\nfrom random import choice\nimport sys\n\nimport pygame as pg\n\n# Constants to represent states of the board and of individual squares\nEMPTY = ' '\nPLAYERX = 'X'\nPLAYERO = 'O'\nDRAW = 'Draw'\n\n# Constants related to display\nWIDTH = 960\nHEIGHT = 783\nMARGIN_X = 203 # Margins are based on board image size\nMARGIN_Y = 140\nSCREEN_OFFSET = 100 # To account for gap to left and top of board image\nSQUARE_SIZE = 218 # Size of squares on board image\nSMALL_FONT_SIZE = 25\nMEDIUM_FONT_SIZE = 100\nLARGE_FONT_SIZE = 200\nBLUE = (0, 0, 255)\nRED = (255, 0, 0)\nGRAY = (247, 247, 247)\nBLACK = (0, 0, 0)\n\n# Directions\nRIGHT = 0\nDOWN = 1\nDOWN_RIGHT = 2\nUP_RIGHT = 3\n\n# Offsets for moving through board in a given direction\nOFFSETS = {RIGHT: (0, 1),\n DOWN: (1, 0),\n DOWN_RIGHT: (1, 1),\n UP_RIGHT: (-1, 1)}\n\n# Constants for Monte Carlo simulator\nNTRIALS = 2500 # Number of trials to run\nSCORE_COMP = 1.0 # Score for squares played by the current player\nSCORE_OTHER = 1.0 # Score for squares played by the other player\n\n\nclass TTTBoard:\n \"\"\"\n Class that represents a Tic-Tac-Toe-Board\n \"\"\"\n\n def __init__(self, dim, reverse=False, board=None):\n \"\"\"\n Initialize the board object with the given dimensions and\n whether the game should be reversed\n \"\"\"\n self._dim = dim\n self._reverse = reverse\n if board is not None:\n self._board = board\n else:\n self._board = [[EMPTY for row in range(dim)] for col in range(dim)]\n\n def __str__(self):\n \"\"\"\n Returns string representation of the board\n E.g.\n 'X | O | X\n ---------\n | X | O\n ---------\n O | | X'\n \"\"\"\n return_string = ''\n for row in range(self._dim):\n for col in range(self._dim):\n if col == 0:\n return_string = f\"{return_string}{self.get_square(row, col)} |\"\n elif col < self._dim - 1:\n return_string = f\"{return_string} {self.get_square(row, col)} |\"\n elif col == self._dim - 1:\n return_string = f\"{return_string} {self.get_square(row, col)}\"\n if row < self._dim - 1:\n return_string = f\"{return_string}\\n---------\\n\"\n return return_string\n\n def get_dim(self):\n \"\"\"\n Returns the dimensions of the board\n \"\"\"\n return self._dim\n\n def get_square(self, row, col):\n \"\"\"\n Returns the contents of a square on the board\n \"\"\"\n return self._board[row][col]\n\n def get_empty_squares(self):\n \"\"\"\n Returns a list of (row, col) tuples for all empty squares\n \"\"\"\n return_list = [(row, col) for row in range(self._dim) for col in range(\n self._dim) if self.get_square(row, col) == EMPTY]\n return return_list\n\n def get_board(self):\n \"\"\"\n Returns a copy of the board\n \"\"\"\n return deepcopy(self)\n\n def move(self, row, col, player):\n \"\"\"\n Place player marker on the board at position (row, col).\n player should be one of the constants PLAYERX or PLAYERO\n Does nothing if board square is not empty.\n Returns true if a move is made\n \"\"\"\n if self._board[row][col] == EMPTY:\n self._board[row][col] = player\n return (row, col)\n\n def check_win(self, row, col, player):\n \"\"\"\n Takes position and player of last move so we don't check the entire board each time\n Returns a constant associated with the state of them game\n PLAYERX if PLAYERX wins\n PLAYERO if PLAYERO wins\n DRAW if it's a tie\n None if game is still in progress\n \"\"\"\n # Check each direction from the last move to see if there's a win\n for dummy_direction, offset in OFFSETS.items():\n run_size = 0\n # Check both directions (e.g. up and down, right and left)\n for i in range(-self._dim + 1, self._dim):\n next_pos = (row + i * offset[0], col + i * offset[1])\n # Make sure the indices refer to a location in the grid\n if 0 <= next_pos[0] < self._dim and 0 <= next_pos[1] < self._dim:\n if self.get_square(next_pos[0], next_pos[1]) == player:\n run_size += 1\n # Return the winning player depending on whether game is set to reverse\n if run_size == self._dim and not self._reverse:\n return player\n elif run_size == self._dim and self._reverse:\n if player == PLAYERO:\n return PLAYERX\n else:\n return PLAYERO\n # Return None if game is still in progress and DRAW if game is tied\n if any(EMPTY in row for row in self._board):\n return None\n else:\n return DRAW\n\n\ndef mc_trial(board, player):\n \"\"\"\n Plays a game of Tic-Tac-Toe using the current board state as the starting point\n Returns the winner\n \"\"\"\n comp = player\n other = None\n if comp == PLAYERX:\n other = PLAYERO\n else:\n other = PLAYERX\n\n trial_winner = None\n in_progress = True\n while in_progress:\n # Alternate between each player, selecting a random move, check for win/draw\n for idx in [comp, other]:\n idx_move = choice(board.get_empty_squares())\n board.move(idx_move[0], idx_move[1], idx)\n if board.check_win(idx_move[0], idx_move[1], idx) is not None:\n trial_winner = board.check_win(idx_move[0], idx_move[1], idx)\n in_progress = False\n break\n return trial_winner\n\n\ndef mc_update_scores(scores, board, player, winner):\n \"\"\"\n Takes a grid of scores with the same dimensions as the Tic-Tac-Toe board, the\n computer player, and the winner of a trial. Updates scores for each position.\n\n Positions of the winning player have their scores increased whereas positions\n of the losing player have their scores decreases.\n\n No scoring is done if the game is a draw.\n \"\"\"\n if winner == DRAW:\n return\n\n # This block here is only needed if we want different scoring values for\n # the winner and loser\n winner_increment = None\n loser_decrement = None\n if winner == player:\n winner_increment = SCORE_COMP\n loser_decrement = SCORE_OTHER\n else:\n winner_increment = SCORE_OTHER\n loser_decrement = SCORE_COMP\n\n for row in range(board.get_dim()):\n for col in range(board.get_dim()):\n if board.get_square(row, col) == winner:\n scores[row][col] += winner_increment\n elif board.get_square(row, col) != (winner and EMPTY):\n scores[row][col] -= loser_decrement\n\n\ndef get_best_move(board, scores):\n \"\"\"\n Determines the best move given the current board and scoring from the Monte Carlo trials\n \"\"\"\n # Determine the highest scoring empty square\n max_score = 0\n for pos in board.get_empty_squares():\n if scores[pos[0]][pos[1]] > max_score:\n max_score = scores[pos[0]][pos[1]]\n\n # Make a list of empty squares that have the max_score\n best_empty_squares = [pos for pos in board.get_empty_squares(\n ) if scores[pos[0]][pos[1]] == max_score]\n\n return choice(best_empty_squares)\n\n\ndef mc_move(board, player, trials):\n \"\"\"\n Determines the best move based on repeated simulations\n \"\"\"\n scores = [[0 for row in range(board.get_dim())]\n for col in range(board.get_dim())]\n while trials > 0:\n clone = board.get_board()\n winner = mc_trial(clone, player)\n mc_update_scores(scores, clone, player, winner)\n trials -= 1\n return get_best_move(board, scores)\n\n\ndef draw(screen, board, board_image, board_rects, button_rects, text, winner):\n \"\"\"\n Draws the current board state\n \"\"\"\n # Draw the board\n screen.fill(GRAY)\n screen.blit(board_image, (100, 100))\n x_width, x_height = text[0].get_rect().width, text[0].get_rect().height\n o_width, o_height = text[1].get_rect().width, text[1].get_rect().height\n\n # Draw any X's and O's\n for row in range(board.get_dim()):\n for col in range(board.get_dim()):\n # The X's and O's have different dimensions, hence why\n # I'm determining their positions separately\n if board.get_square(row, col) == PLAYERX:\n pos_x = board_rects[row][col].x + \\\n (SQUARE_SIZE - x_width) // 2\n # Because the middle squares on the board image are smaller\n # than the others I couldn't position everything correctly\n # horizonally and vertically without using a \"random\" offset\n # namely, the factor of 0.85\n pos_y = board_rects[row][col].y + \\\n (SQUARE_SIZE - 0.85 * x_height) // 2\n screen.blit(text[0], (pos_x, int(pos_y))) # blit expects ints\n elif board.get_square(row, col) == PLAYERO:\n pos_x = board_rects[row][col].x + \\\n (SQUARE_SIZE - o_width) // 2\n pos_y = board_rects[row][col].y + \\\n (SQUARE_SIZE - 0.85 * o_height) // 2\n screen.blit(text[1], (pos_x, int(pos_y)))\n\n # Draw buttons\n for idx, rect in enumerate(button_rects):\n screen.blit(text[idx + 2], rect)\n\n # If the game is over draw result message\n font = pg.font.Font('freesansbold.ttf', MEDIUM_FONT_SIZE)\n result = None\n if winner == PLAYERX:\n result = 'X wins'\n elif winner == PLAYERO:\n result = 'O wins'\n elif winner == DRAW:\n result = 'Draw'\n result_text = font.render(result, True, BLACK)\n pos_x = SCREEN_OFFSET + (WIDTH - SCREEN_OFFSET -\n result_text.get_rect().width) // 2\n pos_y = 10 # Just so its not right at the top of the screen\n screen.blit(result_text, (pos_x, pos_y))\n\n pg.display.flip()\n\n\ndef reset_game(reverse):\n \"\"\"\n Resets the game\n \"\"\"\n board = TTTBoard(3, reverse)\n return board\n\n\ndef main():\n \"\"\"\n Run the game\n \"\"\"\n pg.init()\n\n # Create screen and load game assets\n screen = pg.display.set_mode((WIDTH, HEIGHT))\n board_image = pg.image.load('game_board.png')\n\n # Create font objects for X's and O's\n font_large = pg.font.Font('freesansbold.ttf', LARGE_FONT_SIZE)\n text_x = font_large.render('X', True, BLUE)\n text_o = font_large.render('O', True, RED)\n\n board = TTTBoard(3)\n\n # Create Rect objects for the board squares\n board_rects = [[pg.Rect(MARGIN_X + row * SQUARE_SIZE, MARGIN_Y +\n col * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE)\n for row in range(board.get_dim())] for col in range(board.get_dim())]\n\n # Create Rect objects for buttons\n font_small = pg.font.Font('freesansbold.ttf', SMALL_FONT_SIZE)\n new_game_text = font_small.render('New Game', True, BLACK)\n new_game_rect = new_game_text.get_rect()\n new_game_rect.x, new_game_rect.y = SMALL_FONT_SIZE // 2, SMALL_FONT_SIZE // 2\n\n switch_text = font_small.render('Switch Symbol', True, BLACK)\n switch_rect = switch_text.get_rect()\n switch_rect.x, switch_rect.y = SMALL_FONT_SIZE // 2, SMALL_FONT_SIZE // 2 + MARGIN_Y\n\n reverse_text = font_small.render('Reverse', True, BLACK)\n reverse_rect = reverse_text.get_rect()\n reverse_rect.x, reverse_rect.y = SMALL_FONT_SIZE // 2, SMALL_FONT_SIZE // 2 + 2 * MARGIN_Y\n button_rects = [new_game_rect, switch_rect, reverse_rect]\n\n # Create a tuple of all the text objects to pass to the draw method\n texts = (text_x, text_o, new_game_text, switch_text, reverse_text)\n\n # Initial some varibles for book keeping\n winner = None\n player_turn = False\n player_move = None\n reverse = False\n comp = PLAYERX\n player = PLAYERO\n\n # Main game logic\n while True:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n elif event.type == pg.MOUSEBUTTONUP:\n coords = event.pos\n # The new_game, switch, and reverse buttons don't do anything\n # if a game is in progress\n if new_game_rect.collidepoint(coords) and winner is not None:\n board = reset_game(reverse)\n winner = None\n player_move = None\n # Sets the player to go first if they're X\n if player == PLAYERX:\n player_turn = True\n elif switch_rect.collidepoint(coords) and winner is not None:\n temp = player\n player = comp\n comp = temp\n elif reverse_rect.collidepoint(coords) and winner is not None:\n reverse = not reverse\n for row in range(board.get_dim()):\n for col in range(board.get_dim()):\n if board_rects[row][col].collidepoint(coords):\n player_move = (row, col)\n break\n\n draw(screen, board, board_image, board_rects, button_rects, texts, winner)\n\n if not player_turn and not winner:\n comp_move = mc_move(board, comp, NTRIALS)\n board.move(comp_move[0], comp_move[1], comp)\n result = board.check_win(comp_move[0], comp_move[1], comp)\n if result is not None:\n winner = result\n else:\n player_turn = True\n elif player_move and not winner:\n # Only update if the call to move returns a result\n # this prevents clicking on a filled space from counting as a move\n if board.move(player_move[0], player_move[1], player):\n result = board.check_win(\n player_move[0], player_move[1], player)\n player_move = None\n player_turn = False\n if result is not None:\n winner = result\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nmoore32/coursera-fundamentals-of-computing-work","sub_path":"2 Principles of Computing/Week 3/Mini-Project/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":14384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27481025353","text":"from geant4_pybind import *\nimport csv\nimport configparser\nimport os\nimport datetime\nimport calendar\nimport sys\nimport random\nimport math\nfrom pyEcoMug import EcoMug\nfrom util_math_functions import *\n\n\nclass ActionInitialization(G4VUserActionInitialization):\n \n def __init__(self, detectorConstruction):\n super().__init__()\n self.fDetector = detectorConstruction\n \n def BuildForMaster(self):\n runAction = RunAction()\n self.SetUserAction(runAction)\n \n def Build(self):\n fileman = FileManager.GetFileManager()\n ptree = fileman.GetPropTree()\n inputMethod = int(ptree.get(\"input\",\"method\"))\n self.SetUserAction(PrimaryGeneratorAction())\n runAction = RunAction()\n self.SetUserAction(runAction)\n\n\nclass DetectorConstruction(G4VUserDetectorConstruction):\n \n def __init__(self, useCubeFile=False, cubeFileName=\"cubeInfo.txt\"):\n super().__init__()\n self.detLog = None\n self.useCubeFile = useCubeFile\n self.cubeFileName = cubeFileName # The corresponding file defines the properties of the cube in the scene\n \n def Construct(self):\n man = G4NistManager.Instance()\n matAir = man.FindOrBuildMaterial(\"G4_AIR\")\n\n # Materials for concrete\n matH = man.FindOrBuildMaterial(\"G4_H\")\n matO = man.FindOrBuildMaterial(\"G4_O\")\n matNA = man.FindOrBuildMaterial(\"G4_Na\")\n matAl = man.FindOrBuildMaterial(\"G4_Al\")\n matSi = man.FindOrBuildMaterial(\"G4_Si\")\n matCa = man.FindOrBuildMaterial(\"G4_Ca\")\n matFe = man.FindOrBuildMaterial(\"G4_Fe\") \n\n # Make concrete\n densityConcrete = 2.3*g/cm3\n ncompsConcrete = 7\n mat_concrete = G4Material(\"concrete\", densityConcrete, ncompsConcrete)\n mat_concrete.AddMaterial(matH, 0.168038)\n mat_concrete.AddMaterial(matO, 0.563183)\n mat_concrete.AddMaterial(matNA, 0.021365)\n mat_concrete.AddMaterial(matAl, 0.021343)\n mat_concrete.AddMaterial(matSi, 0.203231)\n mat_concrete.AddMaterial(matCa, 0.018595)\n mat_concrete.AddMaterial(matFe, 0.004246)\n \n # steel for frontdoor\n matC = man.FindOrBuildMaterial(\"G4_C\")\n \n densitySteel = 7.82*g/cm3\n ncompsSteel = 2\n mat_steel = G4Material(\"steel\", densitySteel, ncompsSteel)\n mat_steel.AddMaterial(matC, 0.022831)\n mat_steel.AddMaterial(matFe, 0.977169)\n \n # steel concrete for walls \n frac_steel = 300.0 /(densitySteel/(g/cm3)*1e3)\n frac_concrete = 1.0 - frac_steel\n density_wall = frac_concrete*densityConcrete + frac_steel*densitySteel\n ncompsWall = 2\n matWall = G4Material(\"steelconcrete\",density_wall, ncompsWall)\n matWall.AddMaterial(mat_concrete, frac_concrete)\n matWall.AddMaterial(mat_steel, frac_steel)\n \n #------------------------------- WORLD VOLUME, filled with air------------------------------------#\n checkOverlaps = True\n placement = G4ThreeVector(0,0,0)\n \n axisX = G4ThreeVector(1.0,0,0)\n axisY = G4ThreeVector(0,1.0,0)\n axisZ = G4ThreeVector(0,0,1.0)\n \n solidWorld = G4Sphere(\"World\", 0.0*cm, 22.0*m,\n 0.0*deg, 360.0*deg,\n 0.0*deg, 180.0*deg)\n logicWorld = G4LogicalVolume(solidWorld, matAir, \"World\")\n physWorld = G4PVPlacement(None, \n G4ThreeVector(),\n logicWorld,\n \t\t\t \"World \",\n None,\n False,\n 0)\n \n #-------------------------------------BUNKER---------------------------------------------------#\n #--------------------bunker measures-------------------------------#\n length = 30.78 *m\n width = 15.0 *m\n \n pash1 = 1.102 *m\n pash4 = 1.615 *m\n \n pRMin = 9.71 *m\n pRMax = 10.81*m \n \n sidewidth = (12.68/2.0)*m\n sideheight = 5.072*m\n \n \n \n #------------------parts of bunker---------------------------------#\n #------roof------#\n heightRoof = pash1/2 #constructors take half\n widthRoof = width/2\n lengthRoof = length/2\n zPosRoof = 3.97*m + heightRoof\n \n concSolid = G4Box(\"concSolid\",widthRoof,lengthRoof,heightRoof)\n concLog = G4LogicalVolume(concSolid, matWall, \"roofLog\")\n placementconc = G4ThreeVector(0.0, 0.0, zPosRoof)\n \n concPhysical = G4PVPlacement(None, #no rotation\n placementconc, #at\n concLog, #its logical volume\n \"Conc\", #its name\n logicWorld, #its mother volume\n False, #no boolean operation\n 0, #copy number\n checkOverlaps)\n \n #------sides-----#\n v0=G4TwoVector(0*m, 0*m)\n v1=G4TwoVector(0*m, 0*m+sideheight)\n v2=G4TwoVector(sidewidth, 0*m)\n v2_2=G4TwoVector(0*m-sidewidth, 0*m)\n vertices = G4TwoVectorVector([v0,v1,v2,v2,v0,v1,v2,v2])\n vertices_2 = G4TwoVectorVector([v0,v1,v2_2,v2_2,v0,v1,v2_2,v2_2])\n sideSolid = G4GenericTrap(\"bunker side\", length/2.0, vertices)\n sideSolid_2 = G4GenericTrap(\"bunker side\", length/2.0, vertices_2)\n \n sideLog = G4LogicalVolume(sideSolid, matWall, \"wallLog\")\n sideLog_2 = G4LogicalVolume(sideSolid_2, matWall, \"wallLog\")\n\n rotA = G4RotationMatrix()\n rotA.rotate(-90*deg, axisX)\n \n sidePhysical1 = G4PVPlacement(rotA, \n G4ThreeVector(widthRoof,0.0,0.0), \n sideLog, \n \"side1\", \n logicWorld, \n False, \n 0, \n checkOverlaps);\n \n sidePhysical2 = G4PVPlacement(rotA, \n G4ThreeVector(-widthRoof,0.0,0.0), #at\n sideLog_2, #its logical volume\n \"side2\", #its name\n logicWorld, #its mother volume\n False, #no boolean operation\n 0, #copy number\n checkOverlaps)\n \n #-----backwall------#\n backWallthickness = pash1/2.0\n backWallheight = (3.97/2.0)*m\n backSolid = G4Box(\"concSolid\",widthRoof, backWallthickness,backWallheight) \n backLog = G4LogicalVolume(backSolid, matWall, \"backLog\")\n placementBackWall=G4ThreeVector(0,-lengthRoof+backWallthickness,backWallheight)\n \n backPhysical = G4PVPlacement(None, #no rotation\n placementBackWall, #at\n backLog, #its logical volume\n \"back\", #its name\n logicWorld, #its mother volume\n False, #no boolean operation\n 0, #copy number\n checkOverlaps);\n \n #-----front door-----#\n frontLog = G4LogicalVolume(backSolid, mat_steel, \"frontLog\"); \n dooroffset = 3.0*m\n placementFrontDoor=G4ThreeVector(0,+lengthRoof-dooroffset-backWallthickness,backWallheight)\n \n frontPhysical = G4PVPlacement(None, #no rotation\n placementFrontDoor, #at\n frontLog, #its logical volume\n \"front\", #its name\n logicWorld, #its mother volume\n False, #no boolean operation\n 0, #copy number\n checkOverlaps)\n \n \n \n #----------------------------DETECTOR------------------------------#\n detMaterial = man.FindOrBuildMaterial(\"G4_PLASTIC_SC_VINYLTOLUENE\")\n detSide = 1*m \n detHeight = 2.5*mm \n detSolid = G4Box(\"detSolid\",detSide, detSide,detHeight)\n self.detLog = G4LogicalVolume(detSolid, detMaterial, \"detLog\")\n\n # Detector placements\n detLowerOut = G4PVPlacement(None,\n G4ThreeVector(0.0,0.0,-0.0*m), \n self.detLog,\n \"Lower Detector Out\",\n logicWorld,\n False,\n 3,\n checkOverlaps)\n detLowerIn = G4PVPlacement(None,\n G4ThreeVector(0.0,0.0,-0.0*m+2*detHeight),\n self.detLog,\n \"Lower Detector In\",\n logicWorld,\n False,\n 2,\n checkOverlaps)\n detUpperOut = G4PVPlacement(None,\n G4ThreeVector(0.0,0.0,0.8*m-2*detHeight),\n self.detLog,\n \"Upper Detector Out\",\n logicWorld,\n False,\n 1,\n checkOverlaps)\n detUpperIn = G4PVPlacement(None,\n G4ThreeVector(0.0,0.0,0.8 \n*m),\n self.detLog,\n \"Upper Detector In\",\n logicWorld,\n False,\n 0,\n checkOverlaps)\n \n #-------------------------Cubes for testing--------------------------#\n if self.useCubeFile:\n with open(self.cubeFileName, \"r\") as cubeInfo:\n reader = csv.reader(cubeInfo)\n next(reader) # Skip the first line, it's just column titles\n count = 0\n for row in reader:\n centerX = float(row[0])*m\n centerY = float(row[1])*m\n centerZ = float(row[2])*m\n sideLengthHalf = float(row[3])*m\n mat = man.FindOrBuildMaterial(\"G4_\"+row[4])\n cubeSolid = G4Box(\"cube\"+str(count), sideLengthHalf, sideLengthHalf, sideLengthHalf)\n cubeLog = G4LogicalVolume(cubeSolid, mat, \"cubeLog\"+str(count))\n cubePhysical = G4PVPlacement(None,\n G4ThreeVector(centerX, centerY, centerZ),\n cubeLog,\n \"cubePhys\"+str(count),\n logicWorld,\n False,\n 0,\n checkOverlaps)\n count += 1\n else:\n matPu = man.FindOrBuildMaterial(\"G4_Pu\")\n matPb = man.FindOrBuildMaterial(\"G4_Fe\")\n cubeX = 0.05*m\n cubeY = cubeX\n cubeZ = cubeX\n \n cubeSolid = G4Box(\"cubeSolid\",cubeX, cubeY,cubeZ)\n cubeLogPb = G4LogicalVolume(cubeSolid, matPb, \"cubeLogFe\")\n cubeLogPu = G4LogicalVolume(cubeSolid, matPu, \"cubeLogPu\")\n cylSolid = G4Tubs(\"cylSolid\", 0, 0.05*m, 0.05*m, 0, 360.0*deg)\n cylLogPb = G4LogicalVolume(cylSolid, matPb, \"cylLogFe\")\n cylLogPu = G4LogicalVolume(cylSolid, matPu, \"cylLogPu\")\n \n cubePhysicalPb = G4PVPlacement(None, #no rotation\n G4ThreeVector(0.3*m, 0.3*m, 0.4*m),\n cubeLogFe, #its logical volume\n \"cubePhysFe\", #its name\n logicWorld, #its mother volume\n False, #no boolean operation\n 0, #copy number\n checkOverlaps) #overlaps checking\n \n cubePhysicalPu = G4PVPlacement(None, #no rotation\n G4ThreeVector(-0.3*m, -0.3*m, 0.4*m), # bad at detecting \"far\" from source\n cubeLogPu, #its logical volume\n \"cubePhysPu\", #its name\n logicWorld, #its mother volume\n False, #no boolean operation\n 0, #copy number\n checkOverlaps) #overlaps checking\n\n cylPhysicalPb = G4PVPlacement(None, #no rotation\n G4ThreeVector(0.3*m, -0.3*m, 0.4*m),\n cylLogFe, #its logical volume\n \"cylPhysFe\", #its name\n logicWorld, #its mother volume\n False, #no boolean operation\n 0, #copy number\n checkOverlaps) #overlaps checking\n\n cylPhysicalPu = G4PVPlacement(None, #no rotation\n G4ThreeVector(-0.3*m, 0.3*m, 0.4*m),\n cylLogPu, #its logical volume\n \"cylPhysPu\", #its name\n logicWorld, #its mother volume\n False, #no boolean operation\n 0, #copy number\n checkOverlaps) #overlaps checking\n \n return physWorld\n\n \n def ConstructSDandField(self):\n muonSensDet = MuonSensitiveDetector(\"MuonSensitiveDetector\", 0.8*m, 0*m, 0, False)\n if self.detLog != None:\n self.detLog.SetSensitiveDetector(muonSensDet)\n \n \nclass FileManager:\n \n fileManInstance = None\n fptree = None\n \n def __init__(self):\n FileManager.fileManInstance = None\n FileManager.fptree = None\n\n def GetFileManager():\n if FileManager.fileManInstance is None:\n FileManager.fileManInstance = FileManager()\n FileManager.fptree = configparser.ConfigParser()\n return FileManager.fileManInstance\n \n def ReadIniFile(self,filename):\n FileManager.fptree.read(filename)\n \n def CreateResultsDir(self):\n name = FileManager.fptree.get(\"output\",\"foldername\")\n path = FileManager.fptree.get(\"output\",\"pathname\")\n pathname = path+name\n # slightly different\n try:\n os.mkdir(pathname)\n print(\"results are stored at \"+pathname)\n except:\n print(\"failed to create results directory, maybe it already exists?\")\n \n def WriteIniFile(self,filename):\n method = int(FileManager.fptree.get(\"input\",\"method\"))\n name = FileManager.fptree.get(\"output\",\"foldername\")\n path = FileManager.fptree.get(\"output\",\"pathname\")\n pathname = path+name\n with open(pathname+\"/\"+filename, \"w\") as config_file:\n FileManager.fptree.write(config_file)\n \n \n def GetPropTree(self):\n return FileManager.fptree\n \n def AddValuePropTree(self, section, option, val):\n if not FileManager.fptree.has_section(section):\n FileManager.fptree.add_section(section)\n FileManager.fptree.set(section, option, val)\n\n\nclass PrimaryGeneratorAction(G4VUserPrimaryGeneratorAction):\n \n def __init__(self):\n super().__init__()\n self.fparticleTable = G4ParticleTable.GetParticleTable()\n self.fparticleGun = G4ParticleGun()\n self.fmuonGen = EcoMug()\n fileman = FileManager.GetFileManager()\n ptree = fileman.GetPropTree()\n\n geometry = float(ptree.get(\"ecomug\",\"geometry\"))\n centerX = float(ptree.get(\"ecomug\",\"centerX\"))\n centerY = float(ptree.get(\"ecomug\",\"centerY\"))\n centerZ = float(ptree.get(\"ecomug\",\"centerZ\"))\n\n skySizeX = 0.0\n skySizeY = 0.0\n radius = 0.0\n\n if (geometry == 1): # plane surface\n self.fmuonGen.SetUseSky()\n skySizeX = float(ptree.get(\"sky\",\"sizeX\"))\n skySizeY = float(ptree.get(\"sky\",\"sizeY\"))\n self.fmuonGen.SetSkySize([skySizeX, skySizeY]) # x and y size of plane\n self.fmuonGen.SetSkyCenterPosition([centerX,centerY,centerZ])\n elif geometry == 2: # half-sphere\n self.fmuonGen.SetUseHSphere()\n radius = float(ptree.get(\"halfSphere\",\"radius\"))\n self.fmuonGen.SetHSphereRadius(radius)\n self.fmuonGen.SetHSphereCenterPosition([centerX,centerY,centerZ])\n else:\n print(\"---Problem with EcoMugGeometry!---\")\n\n particleName = \"\"\n self.fparticleAntiMu = self.fparticleTable.FindParticle(\"mu-\")\n self.fparticleMu = self.fparticleTable.FindParticle(\"mu+\")\n\n def GeneratePrimaries(self, anEvent):\n particle = None\n self.fmuonGen.Generate()\n\n muonPos = self.fmuonGen.GetGenerationPosition()\n muon_ptot = self.fmuonGen.GetGenerationMomentum()\n muon_theta = self.fmuonGen.GetGenerationTheta()\n muon_phi = self.fmuonGen.GetGenerationPhi()\n muon_charge = self.fmuonGen.GetCharge()\n \n if(muon_charge > 0):\n particle = self.fparticleMu\n else:\n particle = self.fparticleAntiMu\n self.fparticleGun.SetParticleDefinition(particle)\n self.fparticleGun.SetParticlePosition(G4ThreeVector(muonPos[0]*m,muonPos[1]*m, muonPos[2]*m))\n self.fparticleGun.SetParticleMomentum(G4ThreeVector(\n muon_ptot*math.sin(muon_theta)*math.cos(muon_phi)*GeV, #from spherical to cartesian coodinates\n muon_ptot*math.sin(muon_theta)*math.sin(muon_phi)*GeV,\n muon_ptot*math.cos(muon_theta)*GeV\n ))\n self.fparticleGun.GeneratePrimaryVertex(anEvent)\n\n\nclass RunAction(G4UserRunAction):\n \n def BeginOfRunAction(self,aRun):\n G4RunManager.GetRunManager().SetRandomNumberStore(False)\n fileman = FileManager.GetFileManager()\n ptree = fileman.GetPropTree()\n\n name = ptree.get(\"output\",\"foldername\")\n path = ptree.get(\"output\",\"pathname\")\n pathname = path+name\n\n # fix later\n eventsInRun = aRun.GetNumberOfEventToBeProcessed()\n fileman.AddValuePropTree(\"input\",\"nparticles\", str(eventsInRun))\n\n man = G4AnalysisManager.Instance()\n\n # Set up file to output results\n datetimeNow = datetime.datetime.now()\n formatted = datetime.datetime.strftime(datetimeNow, '%a%b%d%H%M')\n filename = pathname+\"/results\"+formatted+\".csv\"\n man.OpenFile(filename)\n man.SetVerboseLevel(1)\n man.CreateNtuple(\"Hits\",\"hits\")\n \n man.CreateNtupleIColumn(\"detectorNo\") # Which detector it hit (actual detector or inside the block)\n man.CreateNtupleDColumn(\"fX\") # PoCA x-coordinate\n man.CreateNtupleDColumn(\"fY\") # y-coordinate\n man.CreateNtupleDColumn(\"fZ\") # z-coordinate\n man.CreateNtupleDColumn(\"theta\") # Scattering angle\n man.FinishNtuple(0)\n \n def EndOfRunAction(self,run):\n nofEvents = run.GetNumberOfEvent()\n if (nofEvents == 0):\n return\n detConstruction = G4RunManager.GetRunManager().GetUserDetectorConstruction()\n man = G4AnalysisManager.Instance() \n man.Write()\n man.CloseFile()\n print(\"\\n-----End of Run-----\")\n \n \nclass MuonSensitiveDetector(G4VSensitiveDetector):\n def __init__(self, SDname, topZ, bottomZ, detectorNo, noise=False):\n super().__init__(SDname)\n self.eventIDtoFirstHitInfo = dict() # Store position+motion vector to calculate PoCA\n self.height = topZ - bottomZ\n self.detectorNo = detectorNo\n self.topZ = topZ\n self.addNoise = noise\n\n def ProcessHits(self,step, ROhist):\n pName = step.GetTrack().GetDefinition().GetParticleName()\n if (pName[0:2] == \"mu\"):\n preCopyNo = step.GetPreStepPoint().GetTouchable().GetVolume().GetCopyNo()\n postCopyNo = step.GetPostStepPoint().GetTouchable().GetVolume().GetCopyNo()\n evt = G4RunManager.GetRunManager().GetCurrentEvent().GetEventID()\n # Going through top layer (entering detector)\n if preCopyNo == 0 and postCopyNo == 1:\n posPreVec = step.GetPreStepPoint().GetPosition() \n posPreTuple = (posPreVec.getX(), posPreVec.getY(), posPreVec.getZ())\n dirVec = step.GetTrack().GetMomentumDirection() \n dirTuple = (dirVec.getX(), dirVec.getY(), dirVec.getZ())\n self.eventIDtoFirstHitInfo[evt] = DetectorHitTrajectory(posPreTuple, dirTuple)\n # Exiting detector (bottom layer)\n elif preCopyNo == 2 and postCopyNo == 3:\n # Find the same muon's information when entering the detector\n if evt in self.eventIDtoFirstHitInfo.keys():\n posPostVec = step.GetPostStepPoint().GetPosition()\n posPostTuple = (posPostVec.getX(), posPostVec.getY(), posPostVec.getZ())\n dirVec = step.GetTrack().GetMomentumDirection()\n dirTuple = (dirVec.getX(), dirVec.getY(), dirVec.getZ())\n firstHitPosPre = self.eventIDtoFirstHitInfo[evt].posPre # Position of muon when entering detector\n firstHitDir = self.eventIDtoFirstHitInfo[evt].direction # Direction of muon when entering detector\n # Get point of closest approach\n closestApproach = POCA(firstHitPosPre, firstHitDir, posPostTuple, dirTuple)\n if self.addNoise:\n closestApproach = (closestApproach[0]+random.gauss(0.0,1.0)/10, closestApproach[1]+random.gauss(0.0,1.0)/10, closestApproach[2]+random.gauss(0.0,1.0)/10) \n approxFirstTrajectory = subtract(closestApproach, firstHitPosPre)\n approxSecondTrajectory = subtract(posPostTuple, closestApproach)\n scatteringAngle = angleBetween(approxFirstTrajectory, approxSecondTrajectory)\n # Remove outliers, many outliers are near the top of the detector, good hits are closer to the material than the top of the detector\n if 1.5 < scatteringAngle < 30 and self.topZ - closestApproach[2] > self.height / 10:\n aMan = G4AnalysisManager.Instance()\n aMan.FillNtupleIColumn(0, self.detectorNo)\n aMan.FillNtupleDColumn(1, closestApproach[0])\n aMan.FillNtupleDColumn(2, closestApproach[1])\n aMan.FillNtupleDColumn(3, closestApproach[2])\n aMan.FillNtupleDColumn(4, scatteringAngle)\n aMan.AddNtupleRow(0) \n \n return True \n\n \nclass DetectorHitTrajectory:\n \n def __init__(self, posPre, dirVec):\n self.posPre = posPre\n self.direction = dirVec\n\n \ndef main():\n start = datetime.datetime.now()\n # Can test with a set seed\n random.seed(1234)\n\n fileman = FileManager.GetFileManager()\n fileman.ReadIniFile(\"inputMuons.ini\")\n fileman.CreateResultsDir()\n\n runManager = G4RunManagerFactory.CreateRunManager(G4RunManagerType.Default)\n\n macFileName = sys.argv[1]\n # cubesFileName = sys.argv[2]\n \n det = DetectorConstruction(useCubeFile=True)\n runManager.SetUserInitialization(det)\n\n physicsList = QBBC()\n physicsList.SetVerboseLevel(1)\n runManager.SetUserInitialization(physicsList)\n\n runManager.SetUserInitialization(ActionInitialization(det))\n\n visManager = G4VisExecutive()\n visManager.Initialize()\n\n UImanager = G4UImanager.GetUIpointer()\n \n command = \"/control/execute \"\n UImanager.ApplyCommand(command+macFileName)\n \n stop = datetime.datetime.now()\n duration = (stop - start).seconds//60\n\n fileman.AddValuePropTree(\"output\",\"duration\", str(duration))\n fileman.AddValuePropTree(\"input\",\"seed\", \"1234\")\n fileman.WriteIniFile(\"parameters.ini\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"adityapatwardhan1/bunker-simulation","sub_path":"muonsBunker/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":25737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18441783250","text":"class Solution:\n def maxKelements(self, nums: List[int], k: int) -> int:\n pq = [-num for num in nums]\n heapify(pq)\n result = 0\n while k:\n num = -heappop(pq)\n heappush(pq, -math.ceil(num / 3))\n result += num\n k -= 1\n return result\n \n","repo_name":"huytq000605/GrindLC","sub_path":"Priority Queue/Maximal Score After Applying K Operations/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19719003382","text":"# this will have 3 neurons\n\ninputs=[1.0,2.0,3.0,2.5]\n\nweights1=[0.2,0.8,-0.5,1]\nweights2=[0.5,-0.91,0.26,-0.5]\nweights3=[-0.26,-0.27,0.17,0.87]\n\nbias1=2\nbias2=3\nbias3=0.5\n\noutputs=[\n\n #neuron 1 \n (inputs[0]*weights1[0]+\n inputs[1]*weights1[1]+\n inputs[2]*weights1[2]+\n inputs[3]*weights1[3]+bias1),\n \n #neuron 2\n (inputs[0]*weights2[0]+\n inputs[1]*weights2[1]+\n inputs[2]*weights2[2]+\n inputs[3]*weights2[3]+bias2),\n\n #neuron 3\n (inputs[0]*weights3[0]+\n inputs[1]*weights3[1]+\n inputs[2]*weights3[2]+\n inputs[3]*weights3[3]+bias3)\n \n ]\nprint(\"#1 Outputs of Neurons: \",outputs)\n\n# Iterating using Loop\n\ninputs=[1.0,2.0,3.0,2.5]\nweights=[[0.2,0.8,-0.5,1],\n [0.5,-0.91,0.26,-0.5],\n [-0.26,-0.27,0.17,0.87]]\nbias=[2,3,0.5]\n\nlayer_outputs=[]\n\nfor neuron_weights, neuron_bias in zip(weights,bias):\n \n neuron_output=0\n \n for n_inputs, weights in zip(inputs,neuron_weights):\n neuron_output=n_inputs*weights\n \n neuron_output+=neuron_bias\n\n layer_outputs.append(neuron_output)\n\nprint(\"#2 Outputs of Neurons: \",layer_outputs)\n ","repo_name":"returnzero1-0/my_deream_1","sub_path":"All_Programming/next_gen_tech/neural_net/layer_of_neuron.py","file_name":"layer_of_neuron.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"10373434970","text":"# faça uma estrutura com tudo que aprendeu\n\n#armazenar as notas de 10 alunos em uma lista. a nota \n#cada alumo sera informada pelo teclado\n\nn_alunos = 10\nnotas = []\nfor i in range(n_alunos):\n nota = float(input(\"digite nota: \"))\n notas.append(nota)\n print(notas)\n\nsoma = 0\nfor indice in range(len(notas)):\n #print(indice, end=\">>>>\")\n #print(notas[indice])\n soma = soma + notas[indice]\n #print(f\"soma parcial {soma}\")\n\nmedia = soma/len(notas)\nprint(f\"media final={media}\")","repo_name":"flowingTunic/alissom","sub_path":"aula do alison 2° periodo estrutura de dados 1/01_Vetores/aula02/exercicio_dever.py","file_name":"exercicio_dever.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26712976415","text":"heads = 0\ntails = 0\nnumber = int(input('Enter the number to flip coin :\\n'))\n\nfor i in range(0, number):\n import random\n coin = random.randint(0,1)\n if coin < 0.5:\n print(\"Tails\")\n tails = tails+1\n else:\n print(\"Heads\")\n heads = heads+1\nHeadsPercentage = heads/number*100\nTailsPercentage = tails/number*100\nprint(\"Total Percentage of Head \", HeadsPercentage)\nprint(\"Total Percentage of Tails \", TailsPercentage)","repo_name":"biradarajay1904/Python_Basic_Programs","sub_path":"filpcoin.py","file_name":"filpcoin.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16767408894","text":"'''\n *\n *\tProblem: \"DRM Messages\"\n *\n'''\n\nSTARTASCII = 65\nNUMLETTERS = 26\n\n'''-----getRotValue-----'''\ndef getRotValue(input):\n\trotValue = 0\n\tfor c in input:\n\t\trotValue += ord(c) - STARTASCII\n\trotValue = rotValue % NUMLETTERS\n\treturn rotValue\n\t\n'''-----rotate-----'''\ndef rotate(input):\n\trotValue = getRotValue(input)\n\trotated = \"\"\n\tfor c in input:\n\t\tvalue = ord(c) - STARTASCII\n\t\tnewValue = STARTASCII + ((value + rotValue) % NUMLETTERS)\n\t\trotated += chr(newValue)\n\treturn(rotated)\n\t\n'''-----merge-----'''\ndef merge(inputA, inputB):\n\tmerged = \"\"\n\tfor i in range(len(inputA)):\n\t\tc = ord(inputA[i:i+1]) - STARTASCII\n\t\trotateBy = ord(inputB[i:i+1]) - STARTASCII\n\t\tnewC = ( (c + rotateBy) % NUMLETTERS ) + STARTASCII\n\t\tmerged += chr(newC)\n\treturn merged\n\t\n'''-----main-----'''\ninput = input()\t\t\t\n\n# step 1: divide\nlenHalf = len(input) // 2\nfirstHalf = input[:lenHalf]\nsecondHalf = input[lenHalf:]\n\n# step 2: rotate\nrotatedFirst = rotate(firstHalf)\nrotatedSecond = rotate(secondHalf)\n\n# step 3: merge\nresult = merge(rotatedFirst, rotatedSecond)\n\nprint(result)","repo_name":"HQovaizi/Kattis-Solutions","sub_path":"Python/drmmessages.py","file_name":"drmmessages.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43436749523","text":"import pickle\nfrom matplotlib import pyplot as plt\nfrom os import listdir\nimport torch\nimport io\nimport numpy as np\nfrom models.utils import *\nfrom dm_control import suite\nfrom collections import defaultdict\nimport pandas as pd\n\n\nclass CPU_Unpickler(pickle.Unpickler):\n def find_class(self, module, name):\n if module == 'torch.storage' and name == '_load_from_bytes':\n return lambda b: torch.load(io.BytesIO(b), map_location='cpu')\n else: return super().find_class(module, name)\n\ndef plot_dict(dictionary, dom, task):\n for i, (k, (v, args)) in enumerate(dictionary[dom+task].items()):\n xaxis = torch.arange(v.size(1))*args.eval_freq*1000\n s_i = v.mean(dim=0).mean(dim=-1)# - i*50\n plt.plot(xaxis, s_i, linewidth=3, label=f'alpha1: {args.alpha}, lmbd: {args.lmbd}, context: {args.chunk_length}, lr: {args.lr}, nh: {args.hidden_dims}', color=f'C{i}')\n plt.legend()\n plt.grid()\n plt.title(dom+ ' - ' + task)\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n #if dom == 'cheetah':\n plt.tight_layout()\n #plt.savefig(f'dmc_mf_results/figs/{dom+task}.png')\n plt.show()\n\ndef plot_dict_sac(dictionary, dom, task):\n for i, (k, (v, args)) in enumerate(dictionary[dom+task].items()):\n xaxis = torch.arange(v.size(1))*args.eval_freq*1000\n s_i = v.mean(dim=0).mean(dim=-1)# - i*50\n plt.plot(xaxis, s_i, linewidth=3, label=f'alpha: {args.alpha}, context: {args.max_chunk_length}', color=f'C{i}')\n plt.legend()\n plt.grid()\n plt.title(dom+ ' - ' + task)\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n #if dom == 'cheetah':\n plt.tight_layout()\n #plt.savefig(f'dmc_mf_results/figs/{dom+task}.png')\n plt.show()\n\n\ndef get_eval_stats(eval_tensor):\n mean_esample = eval_tensor.mean(dim=-1)\n mean = mean_esample.mean(dim=0)\n\n iqr = np.percentile(mean_esample, [80, 20], axis=0)\n lower = iqr[0]\n upper = iqr[1]\n std = mean_esample.std(dim=0)\n return mean, lower, upper#lower, upper#iqr\n\ndef get_best(dictionary, doms, tasks):\n best_d = {}\n for dom, task in zip(doms, tasks):\n best_score = 0\n #best_model = 0\n for i, (k, (v, args)) in enumerate(dictionary[dom+task].items()):\n #xaxis = torch.arange(v.size(1))*args.eval_freq*1000\n s_i = v.mean(dim=0).mean(dim=-1)# - i*50\n if s_i[-1] > best_score:\n best_model = (v, args)\n best_score = s_i[-1]\n\n best_d[dom+task] = (best_model)#= {}\n #best_d[dom+task][0] = best_model#[1]\n return best_d\n\n\ndef get_all_scores():\n\n doms = ['cheetah', 'walker', 'hopper', 'hopper', 'quadruped', 'fish', 'reacher', 'acrobot']\n tasks = ['run', 'run', 'hop', 'stand', 'walk', 'swim', 'hard', 'swingup']\n\n files = listdir('results/rpc/')\n scoredict_lz = defaultdict(dict)\n scoredict_sac = defaultdict(dict)\n score_dict_rpc = defaultdict(dict)\n score_dict_transformer = defaultdict(dict)\n score_dict_miracle = defaultdict(dict)\n score_dict_pretrained = defaultdict(dict)\n #agent_dict = defaultdict(dict)\n results_folders = ['lzsac', 'lzsac_old', 'sac', 'sac_old', 'transformer', 'rpc', 'miracle', 'sac_pretrained']\n dicts = [scoredict_lz, scoredict_lz, scoredict_sac, scoredict_sac, score_dict_transformer, score_dict_rpc, score_dict_miracle, score_dict_pretrained]\n\n for i, (folder, results_dict) in enumerate(zip(results_folders, dicts)):\n files = listdir(f'results/{folder}/')\n for file in files:\n for (dom, task) in zip(doms, tasks):\n with open(f'results/{folder}/{file}', 'rb') as f:\n #agents, all_scores, args = CPU_Unpickler(f).load()#pickle.load(f)#CPU_Unpickler(f).load()#CPU_Unpickler(f).load()#pickle.load(f)#CPU_Unpickler(f).load()\n out = CPU_Unpickler(f).load()\n if len(out) == 3:\n agents, all_scores, args = out\n else:\n print(folder, file)\n (rewards, all_scores, agents, compression_sizes, args) = out#CPU_Unpickler(f).load()\n\n if dom == args.dom_name and task in args.task_name:#if dom in file and task in file:\n if not hasattr(args, 'lmbd'):\n args.lmbd = None\n if not hasattr(args, 'hidden_dims'):\n args.hidden_dims = args.hidden\n if not hasattr(args, 'eval_freq'):\n args.eval_freq = 20#args.hidden\n results_dict[dom+task][str(args.alpha)+str(args.lmbd)+str(args.hidden_dims)] = (all_scores, args)\n\n all_score_dicts = [scoredict_lz, score_dict_transformer, scoredict_sac, score_dict_rpc, score_dict_miracle, score_dict_pretrained]\n return all_score_dicts\n\n\n\ndef plot_all_learning_curves(model_names, colors, best_dicts, fig_name=None, lwd=3, alpha=0.1, n_rows=2, n_cols=4,basp=1, v_marg=10, labelsize=30, tick_size=18, w_l=True, snfs=24):\n doms = ['hopper', 'hopper', 'quadruped', 'walker', 'cheetah', 'fish', 'acrobot', 'reacher']\n tasks = ['stand', 'hop', 'walk', 'run', 'run', 'swim', 'swingup', 'hard']\n # model_names = ['LZ-SAC', 'SPAC', 'SAC', 'MIRACLE', 'RPC']\n # colors = ['#0251bf', '#6cacf0', '#e85415', '#f08e65', 'purple']\n n_rows = 2\n n_cols = 4\n fig, axs = plt.subplots(n_rows, n_cols, figsize=(20, v_marg), subplot_kw=dict(box_aspect=basp))\n plot_idx = torch.cartesian_prod(*[torch.arange(n_rows), torch.arange(n_cols)])\n plt.tick_params(axis='both', which='major', labelsize=15)\n for i, (dom, task, p_idx) in enumerate(zip(doms, tasks, plot_idx)):\n idx1, idx2 = p_idx.tolist()\n for j, (name, color) in enumerate(zip(model_names, colors)):\n BD = best_dicts[name]\n returns, args = BD[dom+task]\n xaxis = torch.arange(returns.size(1))*args.eval_freq*1000\n scores, upper, lower = get_eval_stats(returns)\n axs[idx1, idx2].plot(xaxis, scores, linewidth=lwd, label=f'{name}', color=f'{color}')\n axs[idx1, idx2].fill_between(xaxis,lower, upper, color=f'{color}', alpha=alpha)\n\n axs[idx1, idx2].tick_params(axis='both', which='major', labelsize=tick_size)\n axs[idx1, idx2].set_title(f'{dom} {task}', fontsize=labelsize)\n axs[idx1, idx2].xaxis.offsetText.set_fontsize(snfs)\n if idx2 == 0:\n axs[idx1, idx2].set_ylabel('Episode return', fontsize=labelsize)\n if idx1 == 1:\n axs[idx1, idx2].set_xlabel('Step', fontsize=labelsize)\n\n\n if w_l:#\n axs[0, 0].legend(prop={'size': 17}, framealpha=1, loc = \"upper left\")\n plt.tight_layout()\n if fig_name != None:\n plt.savefig(f'figures/rpc/{fig_name}.png')\n","repo_name":"tankred-saanum/simple_priors","sub_path":"results_analysis/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":6782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12357861363","text":"from tkinter import *\nfrom tkinter import messagebox, ttk\n#librerias para el envio del documento pdf por correo electronico\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n# Librerias para el manejo de las fechas\nfrom datetime import datetime\n# Librerias para la conexión con la base de datos Mongo\nfrom pymongo import MongoClient\n# Librerias para la creación del archivo en pdf\nfrom reportlab.pdfgen import canvas\n\n\n#definicion de la ventana principal\nraiz = Tk()\n#definición del menu de la aplicación\nVarNombres = StringVar\nrb_opcion = IntVar()\nresidentes=[]\nresidente=[]\nlista_pagos=[]\nproyecto = 0\n#Esto es temporal mientras realizo la consulta a mongo para traer el valor de las casas. \ncasas = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14']\n\ndef conexion_mongo():\n \"\"\"Funcion que brinda la conexion con la BD Mongo\"\"\"\n #Apertura de la conexión con la BD de Mongo\n cliente = MongoClient('mongodb://localhost:27017/')\n #La Base de Datos se llama Balcones\n db = cliente.Balcones\n return db\n\ndef generate_pdf_cuota():\n \n db = conexion_mongo()\n collection_Detalle_factura_tmp = db.Detalle_factura_tmp\n ejey = 0\n # Create the canvas\n nombre_archivo = \"Cuota_Casa_\" + casa_facturacion + \"_\" + str(numero_recibo) + \".pdf\"\n pdf_canvas = canvas.Canvas(nombre_archivo, pagesize=(400,550))\n\n # Cuadrante superior donde esta el nombre del conjunto\n pdf_canvas.setFillColorRGB(211/255, 211/255, 211/255) \n pdf_canvas.roundRect(10, 490, 380, 50, 4, stroke=1, fill=1)\n \n #Segundo cuadrante\n pdf_canvas.setFillColorRGB(210/255, 210/255, 225/255)\n pdf_canvas.roundRect(10, 442, 380, 45, 4, stroke=1, fill=1)\n\n #Cuadrante central\n pdf_canvas.setFillColorRGB(210/255, 255/255, 210/255) \n pdf_canvas.roundRect(10, 10, 380, 428, 4, stroke=1, fill=1)\n\n #Cuandrante del numero del comprobante\n pdf_canvas.setFillColorRGB(255/255, 255/255, 255/255) \n pdf_canvas.roundRect(250, 445, 135, 40, 4, stroke=1, fill=1)\n\n pdf_canvas.line(10,156,390,156)\n pdf_canvas.line(10,416,390,416)\n #pdf_canvas.setDash(4,3)\n #pdf_canvas.line(100,158,260,158)\n\n pdf_canvas.setFillColorRGB(0,0,0)\n pdf_canvas.setFont(\"Helvetica-Bold\", 14)\n pdf_canvas.drawString(60, 515, \"DUQUE OCHOA PROPIEDAD HORIZONTAL\")\n pdf_canvas.setFont(\"Helvetica\", 8)\n pdf_canvas.drawString(160, 505, \"Nit: 900.740.113-3\")\n pdf_canvas.drawString(150, 495, \"Cra 8a # 20a-138 / 148\")\n\n #pdf_canvas.setFont(\"Courier\", 10)\n pdf_canvas.setFont(\"Helvetica-Bold\", 10)\n pdf_canvas.drawString(255, 468, \"CUOTA ADMINISTRACIÓN\")\n pdf_canvas.setFillColorRGB(255,0,0)\n pdf_canvas.setFont(\"Helvetica-Bold\", 14)\n pdf_canvas.drawString(310, 450, str(numero_recibo))\n\n pdf_canvas.setFont(\"Times-Bold\", 10)\n pdf_canvas.setFillColorRGB(0, 0, 0)\n pdf_canvas.drawString(25, 470 , \"Fecha: \")\n pdf_canvas.drawString(145, 470, \"Ciudad: \")\n pdf_canvas.drawString(50, 450, \"Casa No:\")\n pdf_canvas.setFont(\"Times-Bold\", 12)\n pdf_canvas.drawString(25, 420, \"Ref Concepto Valor Fecha\")\n pdf_canvas.drawString(25, 160, \"Resumen\")\n\n pdf_canvas.setFont(\"Courier-Bold\", 10)\n pdf_canvas.drawString(25, 140, \"Pagos Recibidos\")\n pdf_canvas.drawString(25, 125, \"Cuotas pagadas\")\n pdf_canvas.drawString(25, 110, \"Cuotas atrasadas\")\n pdf_canvas.drawString(25, 95, \"Saldo en mora\")\n pdf_canvas.drawString(25, 80, \"Cargos del mes\")\n pdf_canvas.drawString(25, 65, \"Cuotas facturadas\")\n pdf_canvas.drawString(25, 50, \"Total a pagar\")\n\n pdf_canvas.setFont(\"Courier-Oblique\", 7.5)\n pdf_canvas.drawString(25, 30, \"\"\"\n Para pagos Numero de cuenta de Ahorros Banco Davivienda: 462500020963\n \"\"\")\n pdf_canvas.drawString(25, 20, \"Este es su cuota de administración para pago en los 5 primeros dias del mes\")\n \n #llenado de la informacion\n pdf_canvas.setFont(\"Courier-Bold\", 14)\n pdf_canvas.drawString(100, 450, casa_facturacion)\n pdf_canvas.setFont(\"Courier-Bold\", 10)\n pdf_canvas.drawString(60, 470, fecha_facturacion)\n pdf_canvas.drawString(195, 470, \"Cajica\")\n\n #parte del detalle de los movimientos.\n ejey = 400 \n for document in collection_Detalle_factura_tmp.find({'Casa': casa_facturacion}):\n valor_tmp = document['Referencia']\n pdf_canvas.drawString(25, ejey, str(valor_tmp))\n valor_tmp = document['Concepto']\n pdf_canvas.drawString(60, ejey, valor_tmp)\n valor_tmp = document['Valor']\n pdf_canvas.drawString(260, ejey, str(valor_tmp))\n pdf_canvas.drawString(325, ejey, \"02/09/2023\")\n ejey -= 19\n\n #llenado de la parte de resumen\n pdf_canvas.setFont(\"Courier-Bold\", 10)\n resumen_total_a_pagar = resumen_saldo_cuotas_atrasadas + resumen_cargos_del_mes\n resumen_cuotas_pendientes = resumen_cuotas_atrasadas + 1 \n pdf_canvas.drawString(175, 140, str(resumen_valor_pagado))\n pdf_canvas.drawString(175, 125, str(resumen_cuotas_pagadas))\n pdf_canvas.drawString(175, 110, str(resumen_cuotas_atrasadas))\n pdf_canvas.drawString(175, 95, str(resumen_saldo_cuotas_atrasadas))\n pdf_canvas.drawString(175, 80, str(resumen_cargos_del_mes))\n pdf_canvas.drawString(175, 65, str(resumen_cuotas_pendientes))\n pdf_canvas.drawString(175, 50, str(resumen_total_a_pagar))\n\n # Save the PDF\n pdf_canvas.save()\n #Borramos la tabla temporal para que no vuelvan a aparecer\n collection_Detalle_factura_tmp.delete_many({})\n\ndef facturacion():\n \"\"\"Metodo que me genera la facturacion mes a mes de los residentes\"\"\"\n db = conexion_mongo()\n global casa_facturacion\n global factura_valor_pago\n global numero_recibo\n global factura_fecha\n global factura_tipo\n global pago_encontrado\n global resumen_cuotas_pagadas\n global resumen_valor_pagado\n global resumen_cuotas_atrasadas\n global resumen_saldo_cuotas_atrasadas\n global resumen_cargos_del_mes\n global fecha_facturacion\n\n collection_pagos = db.Pagos\n collection_recibos = db.Recibos\n collection_configuracion = db.Configuracion\n collection_estados = db.Estados\n collection_detalle_facturas_tmp = db.Detalle_factura_tmp\n collection_movimientos = db.Movimientos\n \n #Tabla configuracion para saber cual es el ultimo recibo generado. \n consulta_tabla = collection_configuracion.find_one(\n {\n 'Ultimo_recibo_factura': {'$gt': 0}\n },\n {'Ultimo_recibo_factura': 1, '_id': 0}\n )\n numero_recibo = consulta_tabla['Ultimo_recibo_factura']\n\n consulta_mes = collection_configuracion.find_one(\n {\n 'Estado_mes': 'Activo'\n },\n {'Mes': 1, 'Numero_mes': 1, '_id': 0}\n )\n mes_actual = consulta_mes['Mes']\n numero_mes_actual = consulta_mes['Numero_mes']\n\n consulta_anno = collection_configuracion.find_one(\n {\n 'Estado_anno_actual': 'Activo'\n },\n {'Anno_actual': 1, '_id': 0}\n )\n anno_actual = consulta_anno['Anno_actual']\n fecha_facturacion = '01/'+ str(numero_mes_actual) +'/'+anno_actual\n\n consulta_val_cuota = collection_configuracion.find_one(\n {\n 'Estado_valor_cuota': 'Activo'\n },\n {'Valor_cuota_administracion': 1, '_id': 0}\n )\n valor_cuota = consulta_val_cuota['Valor_cuota_administracion']\n\n #Punto principal del proceso de facturacion\n for facturacion1 in casas:\n casa_facturacion = facturacion1\n numero_recibo +=1\n #Se inician los valores de las facturas por cada ciclo que se realiza.\n amortizacion = 0\n factura_valor_pago = 0\n resumen_cuotas_pagadas = 0\n resumen_valor_pagado = 0\n resumen_cuotas_atrasadas = 0\n resumen_saldo_cuotas_atrasadas = 0\n resumen_cargos_del_mes = 0\n pago_encontrado = False\n #valido cual es el estado de la casa\n print(f'Comienza facturacion casa - {facturacion1}')\n \n #Se valida el estado de la casa\n for estados in collection_estados.find({'Casa': facturacion1}):\n estado_casa = estados['Estado']\n print(f\"Se busca el estado actual de la casa que es {estado_casa}.\")\n \n ultima_factura_casa = estados['Ultima_factura']\n print(f'Ultima factura generada es la {ultima_factura_casa}')\n print(f\"El valor pendiente de pago es: {estados['Deuda_pendiente']} \")\n \n facturas_pendientes_estados = estados['Facturas_pendientes']\n print(f'Facturas pendientes: {facturas_pendientes_estados}')\n \n #Si ingresa al for es que encontro un pago realizado dentro del mes\n #Este for tambien comienza a hacer el proceso de cruce de los pagos realizados contra las cuotas pendientes.\n for document in collection_pagos.find({'Casa': facturacion1, 'Estado': 'Pendiente'}): \n pago_encontrado = True\n resumen_cuotas_pagadas += 1\n resumen_valor_pagado += document['Valor']\n print('Se encontro pago realizado en el mes.')\n \n amortizacion = document['Valor']\n factura_fecha = document['Fecha']\n\n #ingresare un registro en la tabla de detalle facturas para despues escribirlos en el documento\n concepto = 'Pago recibido ' + document['Tipo']\n collection_detalle_facturas_tmp.insert_one({'Casa': facturacion1,\n 'Referencia': document['No Recibo'],\n 'Concepto': concepto,\n 'Valor': document['Valor'],\n 'Fecha': document['Fecha']\n })\n \n for document1 in collection_recibos.find({'Casa': facturacion1, 'Estado': 'Pendiente'}):\n\n #Se comienza a realizar la amortizacion del pago realizado con la deuda pendiente\n print(f'El valor de la amortizacion antes: {amortizacion}')\n amortizacion -= document1['Valor']\n print(f'El valor de la amortizacion despues: {amortizacion}')\n \n #Comienza la amortizacion del pago realizado.\n if amortizacion == 0:\n #Al estar al dia solo debe tener un recibo pendiente de pago por el cual este es el unico que sera \n #actualizado\n collection_recibos.update_one({'Casa': facturacion1,\n 'Estado': 'Pendiente',\n 'Recibo': document1['Recibo'] },\n {'$set': {\n 'Estado': 'Pagada'\n }}) \n collection_pagos.update_one({'Casa': facturacion1,\n 'No Recibo': document['No Recibo']\n },\n {'$set': {'Estado': 'Procesada'}\n }\n )\n collection_movimientos.insert_one({'Num_pago': document['No Recibo'],\n 'Valor': document['Valor'],\n 'Num_recibo': document1['Recibo'],\n 'Valor_recibo': document1['Valor'],\n 'Pendiente_recibo': 0,\n 'Saldo_a_favor': 0\n }) \n concepto = 'Cancelacion cuota ' + document1['Mes']\n collection_detalle_facturas_tmp.insert_one({'Casa': facturacion1,\n 'Referencia': document1['Recibo'],\n 'Concepto': concepto,\n 'Valor': document1['Valor'],\n 'Fecha': document1['Fecha']\n })\n collection_estados.update_one({'Casa':facturacion1},\n {'$set': {'Estado': 'Al dia',\n 'Deuda_pendiente': 0,\n 'Ultimo_pago': document['No Recibo'],\n 'Valor_ultimo_pago': document1['Valor'],\n 'Valor_ultimo_recibo': document['Valor']} }\n )\n elif amortizacion < 0:\n amortizacion = amortizacion * (-1)\n collection_estados.update_one({'Casa':facturacion1},\n {'$set': {'Estado': 'En mora',\n 'Deuda_pendiente': amortizacion,\n 'Ultimo_pago': document['No Recibo'],\n 'Valor_ultimo_pago': document1['Valor'],\n 'Valor_ultimo_recibo': document['Valor']} }\n )\n collection_recibos.update_one({'Casa': facturacion1,\n 'Estado': 'Pendiente',\n 'Recibo': document1['Recibo'] },\n {'$set': {\n 'Valor': amortizacion\n }})\n collection_pagos.update_one({'Casa': facturacion1,\n 'No Recibo': document['No Recibo']\n },\n {'$set': {'Estado': 'Procesada'}\n }\n )\n collection_movimientos.insert_one({'Num_pago': document['No Recibo'],\n 'Valor': document['Valor'],\n 'Num_recibo': document1['Recibo'],\n 'Valor_recibo': document1['Valor'],\n 'Pendiente_recibo': amortizacion,\n 'Saldo_a_favor': 0\n })\n concepto = 'Amortización cuota ' + document1['Mes']\n collection_detalle_facturas_tmp.insert_one({'Casa': facturacion1,\n 'Referencia': document1['Recibo'],\n 'Concepto': concepto,\n 'Valor': document1['Valor'],\n 'Fecha': document1['Fecha']\n })\n elif amortizacion > 0:\n collection_recibos.update_one({'Casa': facturacion1,\n 'Estado': 'Pendiente',\n 'Recibo': document1['Recibo'] },\n {'$set': {\n 'Estado': 'Pagada'\n }})\n collection_movimientos.insert_one({'Num_pago': document['No Recibo'],\n 'Valor': document['Valor'],\n 'Num_recibo': document1['Recibo'],\n 'Valor_recibo': document1['Valor'],\n 'Pendiente_recibo': 0,\n 'Saldo_a_favor': amortizacion\n })\n collection_estados.update_one({'Casa':facturacion1},\n {'$set': {'Estado': 'Saldo a favor',\n 'Deuda_pendiente': 0,\n 'Saldo_a_favor': amortizacion,\n 'Ultimo_pago': document['No Recibo'],\n 'Valor_ultimo_pago': document1['Valor'],\n 'Valor_ultimo_recibo': document['Valor']} }\n )\n concepto = 'Cancelación cuota ' + document1['Mes']\n collection_detalle_facturas_tmp.insert_one({'Casa': facturacion1,\n 'Referencia': document1['Recibo'],\n 'Concepto': concepto,\n 'Valor': document1['Valor'],\n 'Fecha': document1['Fecha']\n })\n collection_pagos.update_one({'Casa': facturacion1,\n 'No Recibo': document['No Recibo']\n },\n {'$set': {'Valor': amortizacion}\n }\n )\n \n for document in collection_recibos.find({'Casa': facturacion1, 'Estado': 'Pendiente'}):\n resumen_cuotas_atrasadas += 1\n resumen_saldo_cuotas_atrasadas += document['Valor']\n concepto = 'Cuota pendiente mes ' + document['Mes']\n collection_detalle_facturas_tmp.insert_one({'Casa': facturacion1,\n 'Referencia': document['Recibo'],\n 'Concepto': concepto,\n 'Valor': document['Valor'],\n 'Fecha': document['Fecha']\n })\n\n #Genero el registro de la factura en la tabla de recibos\n resumen_cargos_del_mes = valor_cuota - amortizacion\n collection_recibos.insert_one({'Casa': facturacion1,\n 'Recibo': numero_recibo,\n 'Fecha': '2023-10-01',\n 'Anno': anno_actual,\n 'Valor': valor_cuota - amortizacion,\n 'Mes': mes_actual,\n 'Estado': 'Pendiente'\n })\n concepto = 'Facturación cuota ' + mes_actual\n collection_detalle_facturas_tmp.insert_one({'Casa': facturacion1,\n 'Referencia': numero_recibo,\n 'Concepto': concepto,\n 'Valor': valor_cuota - amortizacion,\n 'Fecha': fecha_facturacion\n })\n generate_pdf_cuota()\n collection_detalle_facturas_tmp.delete_many({'Casa': facturacion1})\n\ndef ventana():\n \"\"\"Metodo que crea la ventana principal de la aplicación\"\"\"\n global frame4\n raiz.title(\"Balcones de Capellania\")\n raiz.geometry(\"850x400\")\n barra_menu = Menu(raiz)\n menu_bancos = Menu(barra_menu, tearoff=0)\n menu_facturacion = Menu(barra_menu, tearoff=0)\n menu_configuracion = Menu(barra_menu, tearoff=0)\n #Componentes del menu\n barra_menu.add_cascade(label=\"Bancos\", menu=menu_bancos)\n barra_menu.add_cascade(label=\"Facturación\", menu=menu_facturacion)\n barra_menu.add_cascade(label=\"Configuracion\", menu=menu_configuracion)\n\n menu_bancos.add_command(label=\"Estado Bancos\")\n menu_facturacion.add_command(label=\"Generar facturación\", command=facturacion)\n menu_configuracion.add_command(label=\"Configuración\")\n raiz.config(menu=barra_menu)\n frame4 = LabelFrame(raiz, text=\" CUADRO PRINCIPAL \")\n frame4.grid(row=0, column=1, rowspan=4)\n frame4.config(width=680, height=370)\nclass ConexionMongo:\n \"\"\"Clase que me permite realizar las conexiones a la BD de datos de mongoDB\"\"\"\n def __init__(self) -> None:\n pass\nclass Predio:\n \"\"\"Clase que me brinda las opciones de los predios del conjunto\"\"\"\n print('Ingresa a la clase de Predio')\nclass Persona:\n \"\"\"Clase que me brinda las opciones para crear personas dentro del conjunto\"\"\"\n def __init__(self, nombre, tipid, numid):\n self.nombre = nombre\n self.tipid = tipid\n self.numid = numid\n\n print(\"Este es temporal\")\n\ndef generate_pdf_soporte_pago():\n \"\"\"Metodo que genera formato de soporte de pago\"\"\"\n # Create the canvas\n \n valor_residente = combo_residente.get()\n valor_col5 = combo_casa.get()\n valor_col6 = val_pago.get()\n valor_col7 = pago_fecha.get()\n valor_col8 = val_concepto.get()\n valor_col9 = val_numtrans.get()\n valor_col10 = val_banco.get()\n\n nombre_fichero = 'Recibo' + str(no_recibo) + '.pdf'\n pdf_canvas = canvas.Canvas(nombre_fichero, pagesize=(400,300))\n \n # Cuadrante superior donde esta el nombre del conjunto\n pdf_canvas.setFillColorRGB(211/255, 211/255, 211/255) \n pdf_canvas.roundRect(10, 240, 380, 50, 4, stroke=1, fill=1)\n \n #Cuadrante central\n pdf_canvas.setFillColorRGB(210/255, 255/255, 210/255) \n pdf_canvas.roundRect(10, 10, 380, 177, 4, stroke=1, fill=1)\n \n pdf_canvas.setFillColorRGB(210/255, 210/255, 225/255)\n pdf_canvas.roundRect(10, 192, 380, 45, 4, stroke=1, fill=1)\n\n #Cuadrante del numero del comprobante\n pdf_canvas.setFillColorRGB(255/255, 255/255, 255/255) \n pdf_canvas.roundRect(255, 195, 125, 40, 4, stroke=1, fill=1)\n\n #Cuadrado del campo Efectivo\n pdf_canvas.setFillColorRGB(255/255, 255/255, 255/255) \n pdf_canvas.roundRect(82, 96, 15, 15, 4, stroke=1, fill=1)\n\n #Cuadrado del campo Consignación\n pdf_canvas.setFillColorRGB(255/255, 255/255, 255/255) \n pdf_canvas.roundRect(209, 96, 15, 15, 4, stroke=1, fill=1)\n\n #Cuadrado del campo Transferencia\n pdf_canvas.setFillColorRGB(255/255, 255/255, 255/255) \n pdf_canvas.roundRect(342, 96, 15, 15, 4, stroke=1, fill=1)\n\n pdf_canvas.setDash(4,3)\n pdf_canvas.line(100,158,280,158)\n pdf_canvas.line(100,122,280,122)\n\n pdf_canvas.setFillColorRGB(0,0,0)\n pdf_canvas.setFont(\"Helvetica-Bold\", 14)\n pdf_canvas.drawString(60, 265, \"DUQUE OCHOA PROPIEDAD HORIZONTAL\")\n pdf_canvas.setFont(\"Helvetica\", 8)\n pdf_canvas.drawString(160, 255, \"Nit: 900.740.113-3\")\n pdf_canvas.drawString(150, 245, \"Cra 8a # 20a-138 / 148\")\n \n #pdf_canvas.setFont(\"Courier\", 10)\n pdf_canvas.setFont(\"Helvetica-Bold\", 10)\n pdf_canvas.drawString(270, 218, \"SOPORTE DE PAGO\")\n pdf_canvas.setFillColorRGB(255,0,0)\n pdf_canvas.setFont(\"Helvetica-Bold\", 14)\n pdf_canvas.drawString(310, 200, str(no_recibo))\n \n pdf_canvas.setFont(\"Times-Bold\", 12)\n pdf_canvas.setFillColorRGB(0, 0, 0)\n pdf_canvas.drawString(45, 220 , \"Fecha: \")\n pdf_canvas.drawString(45, 200, \"Ciudad: \")\n pdf_canvas.drawString(295, 160, \"Casa No:\")\n pdf_canvas.setFont(\"Times-Roman\", 14)\n pdf_canvas.drawString(25, 160, \"Pagado Por: \")\n pdf_canvas.setFont(\"Times-Roman\", 12)\n pdf_canvas.drawString(25, 125, \"La Suma de:\")\n \n pdf_canvas.drawString(25, 100, \"Efectivo Consignacion Transferencia\")\n \n pdf_canvas.setFont(\"Helvetica-Bold\", 10)\n pdf_canvas.drawString(25, 75, \"Observaciones:\")\n pdf_canvas.drawString(25, 50, \"Transacción No:\")\n pdf_canvas.drawString(195, 50, \"Entidad Financiera:\")\n pdf_canvas.setFont(\"Courier-Oblique\", 7.5)\n pdf_canvas.drawString(25, 30, \"Numero de cuenta de Ahorros Banco Davivienda: 462500020963\")\n pdf_canvas.drawString(25, 20, \"Este soporte es su comprobante del pago realizado a la administración\")\n \n #llenado de la informacion\n pdf_canvas.setFont(\"Courier-Bold\", 10)\n pdf_canvas.drawString(95, 220, str(valor_col7))\n pdf_canvas.drawString(95, 200, \"Cajicá\")\n pdf_canvas.setFont(\"Courier-Bold\", 14)\n pdf_canvas.drawString(350, 160, str(valor_col5))\n pdf_canvas.setFont(\"Courier-Bold\", 10)\n pdf_canvas.drawString(105, 160, valor_residente)\n pdf_canvas.setFont(\"Courier-Bold\", 14)\n pdf_canvas.drawString(135, 125, str(valor_col6))\n pdf_canvas.drawString(110, 50, valor_col9)\n pdf_canvas.drawString(295, 50, valor_col10)\n if tipo == 'Efectivo':\n pdf_canvas.drawString(85, 100, 'x')\n elif tipo == 'Consignación':\n pdf_canvas.drawString(212, 100, 'x')\n else:\n pdf_canvas.drawString(345, 100, 'x')\n\n pdf_canvas.setFont(\"Courier\", 8)\n pdf_canvas.drawString(105, 75, valor_col8)\n \n # Save the PDF\n pdf_canvas.save()\n\ndef generate_pdf_recibo_administracion():\n \"\"\"Metodo que genera formato de \"\"\"\n # Create the canvas\n pdf_canvas = canvas.Canvas(\"Recibo.pdf\", pagesize=(400,300))\n \n # Cuadrante superior donde esta el nombre del conjunto\n pdf_canvas.setFillColorRGB(211/255, 211/255, 211/255) \n pdf_canvas.roundRect(10, 240, 380, 50, 4, stroke=1, fill=1)\n \n #Cuadrante central\n pdf_canvas.setFillColorRGB(210/255, 255/255, 210/255) \n pdf_canvas.roundRect(10, 10, 380, 177, 4, stroke=1, fill=1)\n \n pdf_canvas.setFillColorRGB(210/255, 210/255, 225/255)\n pdf_canvas.roundRect(10, 192, 380, 45, 4, stroke=1, fill=1)\n\n #Cuadrante del numero del comprobante\n pdf_canvas.setFillColorRGB(255/255, 255/255, 255/255) \n pdf_canvas.roundRect(255, 195, 125, 40, 4, stroke=1, fill=1)\n\n pdf_canvas.setDash(4,3)\n pdf_canvas.line(100,158,260,158)\n pdf_canvas.line(100,122,260,122)\n\n pdf_canvas.setFillColorRGB(0,0,0)\n pdf_canvas.setFont(\"Helvetica-Bold\", 14)\n pdf_canvas.drawString(60, 265, \"DUQUE OCHOA PROPIEDAD HORIZONTAL\")\n pdf_canvas.setFont(\"Helvetica\", 8)\n pdf_canvas.drawString(160, 255, \"Nit: 900.740.113-3\")\n pdf_canvas.drawString(150, 245, \"Cra 8a # 20a-138 / 148\")\n\n #pdf_canvas.setFont(\"Courier\", 10)\n pdf_canvas.setFont(\"Helvetica-Bold\", 10)\n pdf_canvas.drawString(270, 218, \"SOPORTE DE PAGO\")\n pdf_canvas.setFillColorRGB(255,0,0)\n pdf_canvas.setFont(\"Helvetica-Bold\", 14)\n pdf_canvas.drawString(310, 200, \"560\")\n \n pdf_canvas.setFont(\"Times-Bold\", 12)\n pdf_canvas.setFillColorRGB(0, 0, 0)\n pdf_canvas.drawString(45, 220 , \"Fecha: \")\n pdf_canvas.drawString(45, 200, \"Ciudad: \")\n pdf_canvas.drawString(280, 160, \"Casa No:\")\n pdf_canvas.setFont(\"Times-Roman\", 14)\n pdf_canvas.drawString(25, 160, \"Pagado Por: \")\n pdf_canvas.setFont(\"Times-Roman\", 12)\n pdf_canvas.drawString(25, 125, \"La Suma de:\")\n\n pdf_canvas.drawString(25, 100, \"Efectivo Consignacion Transferencia\")\n\n pdf_canvas.setFont(\"Helvetica-Bold\", 10)\n pdf_canvas.drawString(25, 70, \"Transacción No:\")\n pdf_canvas.drawString(25, 50, \"Entidad Financiera:\")\n pdf_canvas.setFont(\"Courier-Oblique\", 7.5)\n pdf_canvas.drawString(25, 30, \"Numero de cuenta de Ahorros Banco Davivienda: 462500020963\")\n pdf_canvas.drawString(25, 20, \"Este soporte es su comprobante del pago realizado a la administración\")\n\n #llenado de la informacion\n pdf_canvas.setFont(\"Courier-Bold\", 10)\n pdf_canvas.drawString(95, 220, \"05/09/2023\")\n pdf_canvas.drawString(95, 200, \"Cajica\")\n pdf_canvas.setFont(\"Courier-Bold\", 14)\n pdf_canvas.drawString(335, 160, \"14\")\n pdf_canvas.setFont(\"Courier-Bold\", 10)\n pdf_canvas.drawString(105, 160, \"Luis Fernando Gonzalez\")\n pdf_canvas.setFont(\"Courier-Bold\", 14)\n pdf_canvas.drawString(135, 125, \"100.000\")\n pdf_canvas.drawString(130, 70, \"xxxxxxxx\")\n pdf_canvas.drawString(130, 50, \"xxxxxxxx\")\n\n # Save the PDF\n pdf_canvas.save()\n\ndef envio_correo():\n \"\"\" Metodo que envia documentos adjuntos por correo electrónico\"\"\"\n # Configuración de la cuenta de Gmail\n global correo_envio_residente\n valor_residente = combo_residente.get()\n \n sender_email = \"german.pemberty@gmail.com\"\n sender_password = \"ottmedkbarfpgcfw\"\n\n # Destinatario\n recipient_email = \"german.pemberthy@outlook.com\"\n #recipient_email = correo_envio_residente\n\n # Crear el mensaje\n message = MIMEMultipart()\n message[\"From\"] = sender_email\n message[\"To\"] = recipient_email\n message[\"Subject\"] = \"Prueba de envio\"\n\n # Cuerpo del correo\n body = \"Señor residente \" + valor_residente + \" la administración envia en el adjunto el comprobante del ultimo pago realizado\"\n message.attach(MIMEText(body, \"plain\"))\n\n # Adjuntar el documento\n file_path = \"C:/Users/germa/Desktop/SitioWebEducacion/\" + 'Recibo' + str(no_recibo) + '.pdf' # Reemplaza con la ruta de tu documento\n filename = 'Recibo' + str(no_recibo) + '.pdf' # Nombre que tendrá el archivo adjunto en el correo\n attachment = open(file_path, \"rb\")\n\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header(\"Content-Disposition\", f\"attachment; filename= {filename}\")\n\n message.attach(part)\n\n # Iniciar la conexión con el servidor SMTP de Gmail\n try:\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(sender_email, sender_password)\n\n # Enviar el correo\n server.sendmail(sender_email, recipient_email, message.as_string())\n messagebox.showinfo(title = \"Mensaje\", message = \"Correo enviado con éxito\")\n print(\"Correo enviado con éxito\")\n\n except Exception as envio1:\n messagebox.showinfo(title = \"Nuevo elemento seleccionado\", message = \"Error al enviar el correo:\")\n print(f\"Error al enviar el correo: {str(envio1)}\")\n\n finally:\n # Cerrar la conexión con el servidor\n server.quit()\n\ndef consulta_residente(event, combo_casa):\n \"\"\"Funcion para la consulta de residentes\"\"\"\n seleccion = combo_casa.get()\n db = conexion_mongo()\n collection_personas = db.Personas\n for document in collection_personas.find({'Casa': seleccion}):\n if document['Estado'] == 'Activo':\n residentes.append(document['Tipo'])\n residentes.append(document['Estado'])\n residentes.append(document['Telefono']['Activo'])\n residentes.append(document['Personas']['Nombres'])\n residentes.append(document['Personas']['Apellidos'])\n residentes.append(document['Identificacion']['Tipo'])\n residentes.append(document['Identificacion']['Numero'])\n residente.append(document['Personas']['Nombres'] + ' ' + document['Personas']['Apellidos'])\n combo_residente[\"values\"]=residente\n combo_residente.config(state='enabled', justify=CENTER)\n residente.clear()\n \n #Aca se buscan los recibos que estan pendientes de pago para mostrarlos en pantalla. \n collection_recibos = db.Recibos\n for document in collection_recibos.find({'Casa': seleccion}):\n if document['Estado'] == ' Pendiente ':\n valor_col1 = document['Recibo']\n valor_col2 = document['Valor']\n valor_col3 = document['Mes']\n valor_col4 = document['Anno']\n mi_arbol.insert('', 'end', values=(valor_col1, valor_col2, valor_col3, valor_col4))\n\ndef realizar_pago():\n \"\"\"Metodo para gestionar pago ingresado por el front\"\"\"\n #Este es un ejemplo de como se carga un registro en el treeview\n global tipo\n global no_recibo\n valor_pendiente_pago = 0\n valor_timestamp = datetime.now()\n valor_col1 = combo_casa.get()\n valor_col2 = val_pago.get()\n valor_pagado = int(valor_col2)\n valor_col3 = pago_fecha.get()\n valor_col4 = combo_residente.get()\n valor_col5 = val_concepto.get()\n valor_col6 = rb_opcion.get()\n #valor_col7 = val_numtrans.get()\n #valor_col8 = val_banco.get()\n\n db = conexion_mongo()\n collection_pagos = db.Pagos\n collection_personas = db.Personas\n collection_recibos = db.Recibos\n collection_movimientos_dinero = db.Movimientos_Dinero\n\n if valor_col6 == 1:\n tipo = 'Efectivo'\n elif valor_col6 == 2:\n tipo = 'Consignación'\n else:\n tipo = 'Transferencia'\n\n# print(f'tipo: {valor_col6}')\n# mi_arbol.insert('', 'end', values=(valor_col1, valor_col2, valor_col3))\n\n for document in collection_pagos.find():\n no_recibo = document['No Recibo']\n\n no_recibo +=1\n\n collection_pagos.insert_one({'No Recibo': no_recibo,\n 'Casa': int(valor_col1), \n 'Fecha': valor_timestamp,\n 'Anno': 2023,\n 'Valor': valor_pagado,\n 'Tipo': tipo,\n 'Observaciones': valor_col5,\n 'Recibi De': valor_col4,\n 'Estado': 'Pendiente'\n })\n #recorro la tabla de movimientos de dinero para determinar los ultimos valores que se tienen.\n for document in collection_movimientos_dinero.find():\n valor_despues_del_movimiento = document['Valor_despues_del_movimiento']\n en_efectivo = document['En_efectivo']\n en_banco = document['En_banco']\n\n if tipo == 'Efectivo':\n en_efectivo = en_efectivo + valor_pagado\n else:\n en_banco = en_banco + valor_pagado\n\n valor_antes_del_movimiento = valor_despues_del_movimiento\n valor_despues_del_movimiento = valor_despues_del_movimiento + valor_pagado\n\n collection_movimientos_dinero.insert_one({\n 'Tipo': 'Credito',\n 'Observacion': 'Pago casa',\n 'Valor': valor_pagado,\n 'Valor_antes_del_movimiento': valor_antes_del_movimiento,\n 'Valor_despues_del_movimiento': valor_despues_del_movimiento,\n 'En_efectivo': en_efectivo,\n 'En_banco': en_banco,\n 'Fecha_movimiento': valor_timestamp\n })\n\n for document in collection_personas.find({'Casa': int(valor_col1)}):\n if document['Personas']['Nombres'] == valor_col4:\n correo_envio_residente = (document['correos']['Activo'])\n print(f'Aqui voy por el correo: {correo_envio_residente}')\n \n#Aqui se llama al metodo para generar el pdf\n generate_pdf_soporte_pago()\n\n #Aqui se llama al metodo para enviar el correo electronico con el pdf recien generado. \n #envio_correo()\n\ndef registrar_pagos():\n \"\"\" Metodo para gestionar los pagos \"\"\"\n #Se limpia los widgets que se encuentren en el frame previemante.\n for widget in frame4.winfo_children():\n widget.destroy()\n\n global combo_casa\n Label(frame4, text=\"Casa\", anchor='center').grid(row=0, column=0)\n combo_casa = ttk.Combobox(frame4, values=casas)\n combo_casa.grid(row=1, column=0, padx=20, pady=10)\n combo_casa.config(width=15, justify=CENTER)\n combo_casa.bind(\"<>\", lambda event: consulta_residente(event, combo_casa))\n\n global combo_residente\n Label(frame4, text = \"Recibi de: \", anchor='e').grid(row=0, column=1)\n combo_residente = ttk.Combobox(frame4)\n combo_residente.grid(row=1, column=1, padx=5, pady=10)\n combo_residente.config(width=35, state='disabled')\n \n global val_pago\n Label(frame4, text=\"Valor pagado: \").grid(row=0,column=2)\n val_pago = Entry(frame4)\n val_pago.config(width=20, justify=CENTER)\n val_pago.grid(row=1, column=2, padx=8, pady=10)\n\n global pago_fecha\n Label(frame4, text=\"Fecha del pago:\").grid(row=0,column=3)\n pago_fecha = Entry(frame4)\n pago_fecha.grid(row=1, column=3, padx=10, pady=10)\n pago_fecha.config(justify=CENTER)\n\n global val_concepto\n Label(frame4, text='Observaciones:').grid(row=2,column=0)\n val_concepto = Entry(frame4)\n val_concepto.maxlimit = 10\n val_concepto.grid(row=2, column=1, columnspan=3, padx=5, pady=10)\n val_concepto.config(width=82)\n \n \n global val_banco\n Label(frame4, text='Banco:').grid(row=3,column=0)\n val_banco=Entry(frame4)\n val_banco.config(width=35)\n val_banco.grid(row=3,column=1, padx=3, pady=10)\n \n global val_numtrans\n Label(frame4, text='Numero: ').grid(row=3,column=2)\n val_numtrans=Entry(frame4)\n val_numtrans.config(width=20)\n val_numtrans.grid(row=3,column=3, padx=5,pady=10)\n \n global rb1\n global rb2\n global rb3\n rb1= Radiobutton(frame4, text=\"Efectivo\", variable= rb_opcion, value=1 )\n rb1.grid(row=4, column=0, pady=10)\n rb2= Radiobutton(frame4, text=\"Consignacion\", variable=rb_opcion, value=2 )\n rb2.grid(row=4, column=1, pady=10)\n rb3= Radiobutton(frame4, text=\"Transferencia\", variable=rb_opcion, value=3 )\n rb3.grid(row=4, column=2, pady=10)\n rb_opcion.set(1)\n\n boton_agregar = Button(frame4, text=\"Pagar\", command=realizar_pago)\n boton_agregar.grid(row=5, column=0, columnspan=4, sticky= W+E,pady=10)\n boton_agregar.config(fg='black', bg='#158645', cursor='hand2', activebackground='yellow')\n\n #Tabla\n global mi_arbol\n mi_arbol = ttk.Treeview(frame4, height=5, columns=('Col1', 'Col2', 'Col3', 'Col4'))\n mi_arbol.grid(row=6, column=0, columnspan=4, pady=10)\n mi_arbol.heading('#0', text=\"Id\")\n mi_arbol.heading('Col1', text=\"Recibo\")\n mi_arbol.heading('Col2', text=\"Valor Recibo\")\n mi_arbol.heading('Col3', text=\"Mes\")\n mi_arbol.heading('Col4', text=\"Año\")\n mi_arbol.column('#0', width=45)\n mi_arbol.column('Col1', width=110, anchor='center')\n mi_arbol.column('Col2', width=150, anchor='center')\n mi_arbol.column('Col3', width=210, anchor='center')\n mi_arbol.column('Col4', width=110, anchor='center')\n\ndef consultar_pagos():\n \"\"\"Metodo que abre una ventana emergente y me permite consultar los ultimos pagos realizados por un predio\"\"\"\n \n ventana_emergente_pagos = Toplevel()\n ventana_emergente_pagos.title(\" Ventana Pagos \")\n \n frameb = LabelFrame(ventana_emergente_pagos, text=\"Registro de pagos Balcones\")\n frameb.grid(row=0, column=0, pady=5)\n #frame.grid(row=0, column=0, columnspan=2, pady=5)\n\n Label(frameb, text=\"Nombres\").grid(row=1, column=0)\n nombre = Entry(frameb)\n nombre.grid(row=1, column=1)\n nombre.focus()\n \n Label(frameb, text=\"Apellidos\").grid(row=2,column=0)\n apellidos = Entry(frameb)\n apellidos.grid(row=2, column=1)\n\n btn_guardar = Button(ventana_emergente_pagos, text=\"Guardar\", command=envio_correo)\n btn_guardar.grid(row=3, column=0, sticky= W+E)\n\n #Tabla\n miArbol = ttk.Treeview(ventana_emergente_pagos, height=10, columns=2)\n miArbol.grid(row=4, column=0)\n miArbol.heading(\"#0\", text=\"Nombre\")\n miArbol.heading(\"#1\", text=\"Pagos\")\n\ndef menu_pagos():\n \"\"\"Metodo que expone el boton para guardar un pago en la Base de datos\"\"\"\n frame = LabelFrame(raiz, text=\" Modulo Pagos \")\n frame.config(width=100, height=100)\n frame.grid(row=0, column=0, padx=15, pady=10)\n # -- Pagos --\n #Label(frame, text=\"Registrar Pagos\").grid(row=1, column=0)\n boton1 = Button(frame, text=\"Registrar Pagos\", width=15, command=registrar_pagos)\n boton1.grid(row=2, column=0, sticky= W+E, padx=5, pady=5)\n boton1.config(fg='black', bg='#158645', cursor='hand2', activebackground='yellow')\n\n boton2 = Button(frame,text=\"Consultar Pagos\", command=consultar_pagos)\n boton2.grid(row=4, columnspan=2, padx= 5, pady=5, sticky= W+E)\n boton2.config(cursor=\"hand2\")\n\ndef guardar_persona():\n \"\"\"Funcion que me permite guardar en la BD la persona\"\"\"\n valor_nombre = nombre_per.get()\n valor_casas =combo_casas.get()\n valor_apellido = apellidos.get()\n valor_tipoid = combo_tipoid.get()\n valor_correo = correo.get()\n valor_numid = num_id.get()\n valor_telefono = telefono.get()\n valor_tipoper = combo_tipo.get()\n valor_timestamp = datetime.now()\n print(valor_timestamp)\n\n db = conexion_mongo()\n collection_personas = db.Personas\n collection_personas.insert_one({'Telefono':{'Activo': valor_telefono},\n 'Casa': valor_casas, \n 'Estado': 'Activo',\n 'Tipo': valor_tipoper,\n 'Actualizado': valor_timestamp,\n 'Creado': valor_timestamp,\n 'Identificacion': {\n 'Tipo': valor_tipoid,\n 'Numero': valor_numid\n },\n 'correos': {'Activo': valor_correo},\n 'Personas': {'Nombres': valor_nombre,\n 'Apellidos': valor_apellido} \n })\n miArbol.insert('', 'end', \n values=(valor_nombre + valor_apellido, valor_tipoid, valor_numid, valor_correo, valor_telefono,valor_tipoper, 'Activo'))\n mi_boton.config(state='disabled')\n\ndef ventana_personas():\n \"\"\"Funcion para gestionar las personas\"\"\"\n \n for widget in frame4.winfo_children():\n widget.destroy()\n #ventana_emergente_pagos = Toplevel()\n #ventana_emergente_pagos.title(\" Ventanas Personas \")\n \n #frameb = LabelFrame(frame4, text=\" Registro de personas \")\n #frameb.grid(row=0, column=0, pady=5)\n #frame.grid(row=0, column=0, columnspan=2, pady=5)\n\n global nombre_per\n Label(frame4, text=\"Nombres: \").grid(row=0, column=0, padx=5, pady=5)\n nombre_per = Entry(frame4)\n nombre_per.grid(row=0, column=1)\n nombre_per.focus()\n \n Label(frame4, text=\"Apellidos: \").grid(row=0,column=2, padx=5, pady=5)\n global apellidos\n apellidos = Entry(frame4)\n apellidos.grid(row=0, column=3)\n\n Label(frame4, text=\"Tipo Id: \").grid(row=1,column=0, padx=5, pady=5)\n global combo_tipoid\n combo_tipoid = ttk.Combobox(frame4, values=('CC', 'CE', 'TI', 'NIT'), width=5)\n combo_tipoid.grid(row=1, column=1)\n\n Label(frame4, text=\"Identificacion: \").grid(row=1,column=2, padx=5, pady=5)\n global num_id\n num_id = Entry(frame4)\n num_id.grid(row=1, column=3)\n\n Label(frame4, text=\"Telefono: \").grid(row=2,column=0, padx=5, pady=5)\n global telefono\n telefono = Entry(frame4)\n telefono.grid(row=2, column=1)\n\n Label(frame4, text=\"Correo: \").grid(row=2,column=2, padx=5, pady=5)\n global correo\n correo = Entry(frame4)\n correo.grid(row=2, column=3)\n correo.config(width=30)\n\n Label(frame4, text=\"Casa: \").grid(row=3,column=0, padx=5, pady=5)\n global combo_casas\n combo_casas = ttk.Combobox(frame4, values=casas, width=5)\n combo_casas.grid(row=3, column=1)\n\n Label(frame4, text=\"Tipo: \").grid(row=3,column=2, padx=5, pady=5)\n global combo_tipo\n combo_tipo = ttk.Combobox(frame4, values=('Propietario', 'Arrendatario'), width=15)\n combo_tipo.grid(row=3, column=3)\n\n global mi_boton\n mi_boton = Button(frame4, text=\"Guardar\", command= guardar_persona)\n mi_boton.grid(row=4, column=0, sticky= W+E)\n mi_boton.config(cursor='hand2')\n\n #Tabla\n global miArbol\n miArbol = ttk.Treeview(frame4, height=5, \n columns=('Col_Nombre', 'Col_TId', 'Col_numid', 'Col_Correo', 'Col_Tel', 'Col_Tip', 'Col_Est'))\n miArbol.grid(row=6, column=0, columnspan=4)\n miArbol.heading(\"#0\", text=\"Id\")\n miArbol.heading('Col_Nombre', text=\"Nombres\")\n miArbol.heading('Col_TId', text=\"Tipo Id\")\n miArbol.heading('Col_numid', text=\"Num Id\")\n miArbol.heading('Col_Correo', text=\"Correo\")\n miArbol.heading('Col_Tel', text=\"Teléfono\")\n miArbol.heading('Col_Tip', text=\"Tipo\")\n miArbol.heading('Col_Est', text=\"Estado\")\n miArbol.column('#0', width=10)\n miArbol.column('Col_Nombre', width=140)\n miArbol.column('Col_TId', width=50)\n miArbol.column('Col_numid', width=70)\n miArbol.column('Col_Correo', width=170)\n miArbol.column('Col_Tel', width=80)\n miArbol.column('Col_Tip', width=70)\n miArbol.column('Col_Est', width=70)\n\ndef llenar_tabla(mi_arbol):\n \"\"\"Funcion que sirve para llenar la tabla en la ventana de residentes\"\"\"\n if len(residentes) > 0:\n mi_arbol.insert('', 0, text=0, values=(residentes[4], residentes[3]))\n\ndef actualizar_residente():\n \"\"\"Metodo para actualizar un ressidente\"\"\"\n for widget in frame4.winfo_children():\n widget.destroy()\n \n Label(frame4, text=\"Casa: \").grid(row=0, column=0)\n combo_casa = ttk.Combobox(frame4, values=casas)\n combo_casa.grid(row=0, column=1)\n combo_casa.config(width=7)\n combo_casa.bind(\"<>\", lambda event: consulta_residente(event, combo_casa))\n\n #Tabla\n miArbol = ttk.Treeview(frame4, height=10, columns=3)\n miArbol.grid(row=3, column=0, columnspan=3)\n miArbol.heading(\"#0\", text=\"Nombre\")\n miArbol.heading(\"#1\", text=\"Apellidos\")\n miArbol.heading(\"#2\", text=\"identificacion\")\n if len(residentes) > 0:\n llenar_tabla(miArbol)\n\n mi_boton1 = Button(frame4, text=\"Consultar\", command= lambda: llenar_tabla(miArbol))\n mi_boton1.grid(row=0, column=2)\n mi_boton1.config(cursor='hand2')\n\ndef menu_personas():\n \"\"\"Metodo que ejecuta la pantalla de personas \"\"\"\n frame2 = LabelFrame(raiz, text=\" Modulo Residentes \")\n #frame2.config(width=100, height=100, bg=\"blue\")\n frame2.config(width=100, height=100)\n frame2.grid(row=1, column=0, padx=10, pady=5)\n # -- Pagos --\n #Label(frame2, text=\"Registrar Persona\").grid(row=1, column=0)\n\n Button(frame2, text=\"Registrar Residente\", width=15, \n command=ventana_personas).grid(row=0, column=0, sticky= W+E, padx=5, pady=5)\n\n #Label(frame2, text=\"Consultar Persona\").grid(row=3, column=0)\n boton1 = Button(frame2,text=\"Consultar Residente\")\n boton1.grid(row=1, columnspan=2, padx= 5, pady=5, sticky= W+E)\n boton1.config()\n\n Button(frame2,text=\"Actualizar Residente\", command=actualizar_residente).grid(row=2, columnspan=2, padx= 5, pady=5, sticky= W+E)\n\ndef crear_proyecto():\n \"\"\"Funcion para crear proyectos\"\"\"\n for widget in frame4.winfo_children():\n widget.destroy()\n #for child in raiz.winfo_children():\n # print(child)\n\n Label(frame4, text=\"Crear un proyecto\").grid(row=0, column=0)\n Button(frame4, text=\"prueba\").grid(row=0, column=1)\n\ndef consultar_proyecto():\n \"\"\"Metodo para consultar los datos de un proyecto\"\"\"\n for widget in frame4.winfo_children():\n widget.destroy()\n Label(frame4, text=\"Registrar un proyecto\").grid(row=0, column=0)\n Button(frame4, text=\"prueba\").grid(row=0, column=1)\n\ndef registrar_proyecto():\n \"\"\"Metodo para registrar un proyecto\"\"\"\n for widget in frame4.winfo_children():\n widget.destroy()\n \n Label(frame4, text=\"Espacio para registrar todos los movimientos de un proyecto\").grid(row=0, column=0)\n Button(frame4, text=\"prueba\").grid(row=1, column=1)\n\ndef menu_proyectos():\n \"\"\"Metodo que ejecuta la pantalla principal\"\"\"\n frame3 = LabelFrame(raiz, text=\" Modulo Proyectos \")\n #frame3.config(width=100, height=100, bg=\"green\")\n frame3.config(width=100, height=100)\n frame3.grid(row=2, column=0, padx=10, pady=10)\n # -- Proyectos --\n\n Button(frame3, text=\"Crear Proyecto\", width=15, \n command=crear_proyecto).grid(row=0, column=0, sticky= W+E, padx=5, pady=5)\n \n Button(frame3, text=\"Registrar Proyecto\", width=15, \n command=registrar_proyecto).grid(row=1, column=0, sticky= W+E, padx=5, pady=5)\n \n Button(frame3,text=\"Consultar Proyecto\", \n command=consultar_proyecto).grid(row=4, columnspan=2, padx= 5, pady=5, sticky= W+E)\n\nventana()\n\nmenu_pagos()\n\nmenu_personas()\n\nmenu_proyectos()\n\nraiz.mainloop()\n\"\"\" Fin del proceso\"\"\"","repo_name":"gpemberthy/SitioWebEducacion","sub_path":"main_Balcones.py","file_name":"main_Balcones.py","file_ext":"py","file_size_in_byte":49112,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73655401457","text":"import pygame\nfrom vector import *\nimport object\n\n\nscreen_width = 1280\nscreen_height = 720\n\nclass Boatman:\n mCastLaunchPoint: Vector2\n mCurrentLineTarget: Vector2\n mCurrentBobberLocation: Vector2\n\n def __init__(self, pos=Vector2(772, 600)):\n self.mPos = pos\n self.mVelocity = Vector2(0, 0)\n self.mAcceleration = Vector2(0, 0)\n self.mDrag = Vector2(0,0)\n self.mDragCoefficient = 0.995\n self.mBobInBoat = False\n self.mBobCastingFrames = {}\n # 1 = right, -1 = left\n self.mCurrentCastDirection = 1\n self.mCurrentCastFrame = 1\n self.mAnimationTimer = 0\n for i in range(1, 7):\n self.mBobCastingFrames[(1, i)] = pygame.image.load(\"Bob_F_\" + str(i) + \".png\")\n self.mBobCastingFrames[(-1, i)] = pygame.image.load(\"Bob_F_L_\" + str(i) + \".png\")\n self.mBoatImage = pygame.image.load(\"BoatScaled.png\")\n self.mImage = self.mBoatImage\n self.mMoveSpeed = 200\n self.mMaxSpeed = 300\n self.mLineCastStrength = 500\n self.mNextCastTimer = 0\n self.mCastTime = 2.0\n self.mHalfCastTime = self.mCastTime / 2\n self.mMaxSpeedSq = self.mMaxSpeed * self.mMaxSpeed\n self.mWidth = self.mBobCastingFrames[(self.mCurrentCastDirection, self.mCurrentCastFrame)].get_width()\n self.mHalfWidth = self.mWidth / 2\n self.mHeight = self.mBobCastingFrames[(self.mCurrentCastDirection, self.mCurrentCastFrame)].get_height()\n self.mHalfHeight = self.mHeight / 2\n self.mFrameHeightDiff = self.mHeight - self.mImage.get_height()\n self.mBobberSize = 5\n self.mCurrentLineTarget = None\n self.mCurrentBobberLocation = None\n self.mCastLaunchPoint = None\n self.mCastingOut = False\n self.mReelingIn = False\n self.mCastingVector = None\n self.mAllowMovement = False\n self.mCaughtSomething = False\n self.mTargetReticleLocation = Vector2(screen_width/2, screen_height/2)\n self.mTargetReticleSpeed = 15\n self.mTargetReticleSize = 25\n self.mBobberCollisionBox = object.QuickAndDirtyCollisionRect(self.mPos - Vector2(self.mBobberSize, self.mBobberSize), self.mBobberSize * 2, self.mBobberSize * 2)\n\n self.mPoleLocation = {(1, 1): Vector2(178, 63),\n (1, 2): Vector2(168, 15),\n (1, 3): Vector2(161, 9),\n (1, 4): Vector2(161, 9),\n (1, 5): Vector2(168, 15),\n (1, 6): Vector2(177, 62),\n (-1, 1): Vector2(46, 19),\n (-1, 2): Vector2(81, 23),\n (-1, 3): Vector2(53, 10),\n (-1, 4): Vector2(53, 10),\n (-1, 5): Vector2(53, 10),\n (-1, 6): Vector2(36, 63)}\n self.mCurrentAnimationNumber = -1\n self.mCurrentAnimationStep = 0\n self.mAnimating = False\n # 0 = casting\n # 1 = reeling\n # 2 = back to static\n self.mAnimations = [[(1, 0.05), (2, 0.1), (3, -1)],\n [(3, 0.08), (5, -1)],\n [(5, 0.05), (1, -1)]]\n\n def update(self, delta_time):\n if self.mAllowMovement:\n if self.mAnimating:\n self.mAnimationTimer -= delta_time\n if self.mAnimationTimer < 0:\n self.mCurrentAnimationStep += 1\n self.mAnimationTimer = self.mAnimations[self.mCurrentAnimationNumber][self.mCurrentAnimationStep][1]\n self.mCurrentCastFrame = self.mAnimations[self.mCurrentAnimationNumber][self.mCurrentAnimationStep][0]\n if self.mAnimationTimer == -1:\n self.mAnimating = False\n if self.mCastingOut or self.mReelingIn:\n self.mNextCastTimer -= delta_time\n if self.mNextCastTimer < 0:\n self.mNextCastTimer = 0\n self.mCurrentLineTarget = None\n self.mReelingIn = False\n self.mCastingOut = False\n self.mCastLaunchPoint = None\n self.mCaughtSomething = False\n self.mBobberCollisionBox.set_pos(Vector2(-1000,-1000))\n self.play_animation(2)\n self.mVelocity += self.mAcceleration * delta_time\n self.mVelocity *= self.mDragCoefficient\n if self.mVelocity.magnitudeSq > self.mMaxSpeedSq:\n self.mVelocity = self.mVelocity.normalized * self.mMaxSpeed\n self.mAcceleration = Vector2(0, 0)\n self.mPos += self.mVelocity * delta_time\n if self.x < 0 and self.mVelocity.x < 0:\n self.mVelocity.x *= -1\n if self.x > screen_width - self.mWidth and self.mVelocity.x > 0:\n self.mVelocity.x *= -1\n if self.y < 0 and self.mVelocity.y < 0:\n self.mVelocity.y *= -1\n if self.y > screen_height - self.mHeight and self.mVelocity.y > 0:\n self.mVelocity.y *= -1\n if self.mReelingIn:\n # replace this w/ a different, less linear function for fast followed by slow reel in\n percentReeled = (self.mHalfCastTime - self.mNextCastTimer) / self.mHalfCastTime\n self.mCurrentBobberLocation = self.mCurrentLineTarget - (percentReeled * (self.mCurrentLineTarget - self.fishing_pole_tip))\n if self.mCastingOut:\n percentCast = (self.mCastTime - self.mNextCastTimer) / self.mNextCastTimer\n if self.mNextCastTimer < self.mHalfCastTime:\n self.mCastingOut = False\n self.mReelingIn = True\n self.play_animation(1)\n percentCast = 1.0\n self.mCurrentBobberLocation = self.mCastLaunchPoint + (percentCast * self.mCastingVector)\n else:\n self.mVelocity = Vector2(0, 0)\n self.mNextCastTimer = 0\n self.mCastingOut = False\n self.mReelingIn = False\n self.mAcceleration = Vector2(0, 0)\n self.mCurrentLineTarget = None\n self.mCastLaunchPoint = None\n if self.mCastingOut or self.mReelingIn:\n self.mBobberCollisionBox.set_pos(self.mCurrentBobberLocation - Vector2(self.mBobberSize, self.mBobberSize))\n\n def checkBobberCollision(self, box):\n if not self.mCaughtSomething and self.mBobberCollisionBox.collides(box):\n self.mCurrentLineTarget = box.center\n self.mCurrentBobberLocation = self.mCurrentLineTarget\n self.mCaughtSomething = True\n if self.mCastingOut:\n self.mNextCastTimer = self.mHalfCastTime\n self.mCastingOut = False\n self.mReelingIn = True\n return True\n return False\n\n def add_force(self, force_vec2):\n if self.mAllowMovement:\n self.mAcceleration += force_vec2\n\n def draw(self, screen):\n if self.mBobInBoat:\n self.mImage = self.mBobCastingFrames[(self.mCurrentCastDirection, self.mCurrentCastFrame)]\n screen.blit(self.mImage, (int(self.x), int(self.y)))\n if self.mCurrentLineTarget is not None:\n pygame.draw.line(screen, (255, 255, 255), self.fishing_pole_tip.i2, self.mCurrentBobberLocation.i2, 3)\n pygame.draw.circle(screen, (255, 40, 80), self.mCurrentBobberLocation.i2, 5)\n pygame.draw.circle(screen, (0, 0, 255), self.mTargetReticleLocation.i2, self.mTargetReticleSize, 2)\n pygame.draw.line(screen, (0, 0, 255), (self.mTargetReticleLocation + Vector2(0, self.mTargetReticleSize)).i2, (self.mTargetReticleLocation - Vector2(0, self.mTargetReticleSize)).i2, 2)\n pygame.draw.line(screen, (0, 0, 255), (self.mTargetReticleLocation + Vector2(self.mTargetReticleSize, 0)).i2, (self.mTargetReticleLocation - Vector2(self.mTargetReticleSize, 0)).i2, 2)\n\n def cast_at(self, target_vector, directly_at_location=False):\n if self.mAllowMovement:\n if self.mCurrentLineTarget is None:\n self.mNextCastTimer = self.mCastTime\n if directly_at_location:\n self.mCurrentLineTarget = target_vector\n else:\n self.mCurrentLineTarget = self.fishing_pole_tip + (target_vector * self.mLineCastStrength)\n self.mCurrentBobberLocation = self.fishing_pole_tip\n self.mCastingVector = self.mCurrentLineTarget - self.mCurrentBobberLocation\n if self.mCastingVector.x < 0:\n self.mCurrentCastDirection = -1\n else:\n self.mCurrentCastDirection = 1\n self.mCastLaunchPoint = self.mCurrentBobberLocation\n self.mCastingOut = True\n self.mReelingIn = False\n self.play_animation(0)\n\n def cast_to(self, target_pos_vector):\n self.cast_at(target_pos_vector, True)\n\n def cast_line(self):\n if not self.mCastingOut and not self.mReelingIn:\n self.cast_to(self.mTargetReticleLocation)\n elif self.mCastingOut:\n if self.mNextCastTimer - self.mHalfCastTime > 0:\n self.play_animation(1)\n self.mNextCastTimer = self.mCastTime - self.mNextCastTimer\n self.mCastingOut = False\n self.mReelingIn = True\n\n def put_bob_in_boat(self):\n if not self.mBobInBoat:\n self.mBobInBoat = True\n self.mPos -= Vector2(0, self.mFrameHeightDiff)\n\n def take_bob_out_of_boat(self):\n self.mBobInBoat = False\n self.mPos += Vector2(0, self.mFrameHeightDiff)\n self.mImage = self.mBoatImage\n\n def turn_on_movement(self):\n self.mAllowMovement = True\n self.put_bob_in_boat()\n\n def turn_off_movement(self):\n self.mAllowMovement = False\n\n def set_target_reticle(self, screen_pos_vector):\n self.mTargetReticleLocation = screen_pos_vector\n\n def move_target_reticle(self, change_vector):\n self.mTargetReticleLocation += change_vector * self.mTargetReticleSpeed\n if self.mTargetReticleLocation.x < 0:\n self.mTargetReticleLocation.x = 0\n if self.mTargetReticleLocation.y < 0:\n self.mTargetReticleLocation.y = 0\n if self.mTargetReticleLocation.x > screen_width:\n self.mTargetReticleLocation.x = screen_width\n if self.mTargetReticleLocation.y > screen_height:\n self.mTargetReticleLocation.y = screen_height\n\n def play_animation(self, anim_number):\n self.mAnimating = True\n self.mCurrentAnimationNumber = anim_number\n self.mCurrentAnimationStep = 0\n self.mAnimationTimer = self.mAnimations[anim_number][0][1]\n\n @property\n def x(self):\n return self.mPos.x\n\n @x.setter\n def x(self, new_x):\n self.mPos.x = new_x\n\n @property\n def y(self):\n return self.mPos.y\n\n @y.setter\n def y(self, new_y):\n self.mPos.y = new_y\n\n @property\n def fishing_pole_tip(self):\n return self.mPos + self.mPoleLocation[(self.mCurrentCastDirection, self.mCurrentCastFrame)]\n\n @property\n def bobber_location(self):\n return self.mCurrentBobberLocation\n\n","repo_name":"AudaciousAxolotl/UltimateFishingChampionship","sub_path":"Boatman.py","file_name":"Boatman.py","file_ext":"py","file_size_in_byte":11366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16093709150","text":"import random\nimport random\nnumberOfStreaks = 0\nfor experimentNumber in range(10000):\n # Code that creates a list of 100 'heads' or 'tails' values.\n coinOutput = []\n for i in range(100):\n h = random.randint(0,1)\n if h==0:\n coinOutput.append('H')\n elif h ==1:\n coinOutput.append('T')\n\n # Code that checks if there is a streak of 6 heads or tails in a row.\n coinStreak = 0 \n for i in range(100):\n if coinOutput[i:i+6]==['H','H','H','H','H','H'] or coinOutput[i:i+6]==['T', 'T', 'T', 'T', 'T', 'T']:\n coinStreak+=1\n if coinStreak==6:\n numberOfStreaks+=1\nprint('Chance of streak: %s%%' % (numberOfStreaks / 10000))","repo_name":"manoj1749/automate-the-boring-stuff-with-python","sub_path":"Chapter-04/Practice Projects/CoinFlipStreaks.py","file_name":"CoinFlipStreaks.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"685416115","text":"import numpy as np\nimport math\nimport os\nimport sys\nimport openvino.runtime as ov\nimport cv2\n\n# Setup model environment\nMODEL_DIR = \"model\"\nDATA_DIR = \"data\"\n\n### Configuration to detect meter and extract from image\nMETER_SHAPE = [512, 512] \nCIRCLE_CENTER = [256, 256] \nCIRCLE_RADIUS = 250\nPI = math.pi\nRECTANGLE_HEIGHT = 120\nRECTANGLE_WIDTH = 1570\nTYPE_THRESHOLD = 40\nCOLORMAP = np.array([[28, 28, 28], [238, 44, 44], [250, 250, 250]])\n\n# There are 2 types of meters in test image datasets\nMETER_CONFIG = [{\n 'scale_interval_value': 25.0 / 50.0,\n 'range': 25.0,\n 'unit': \"(MPa)\"\n}, {\n 'scale_interval_value': 1.6 / 32.0,\n 'range': 1.6,\n 'unit': \"(MPa)\"\n}]\n\nSEG_LABEL = {'background': 0, 'pointer': 1, 'scale': 2}\n# Initialize OpenVINO Runtime\nie_core = ov.Core()\n\n\nclass Model:\n \"\"\"\n This class represents a OpenVINO model object.\n\n \"\"\"\n def __init__(self, model_path, new_shape):\n \"\"\"\n Initialize the model object\n \n Param: \n model_path (string): path of inference model\n new_shape (dict): new shape of model input\n\n \"\"\"\n self.model = ie_core.read_model(model=model_path)\n self.model.reshape(new_shape)\n self.compiled_model = ie_core.compile_model(model=self.model, device_name=\"CPU\")\n self.output_layer = self.compiled_model.output(0)\n\n def predict(self, input_image):\n \"\"\"\n Run inference\n \n Param: \n input_image (np.array): input data\n \n Retuns:\n result (np.array)): model output data\n \"\"\"\n result = self.compiled_model(input_image)[self.output_layer]\n return result\n \n## Data Process\n\n### Preprocess image to detect the meter\ndef det_preprocess(input_image, target_size):\n \"\"\"\n Preprocessing the input data for detection task\n\n Param: \n input_image (np.array): input data\n size (int): the image size required by model input layer\n Retuns:\n img.astype (np.array): preprocessed image\n \n \"\"\"\n img = cv2.resize(input_image, (target_size, target_size))\n img = np.transpose(img, [2, 0, 1]) / 255\n img = np.expand_dims(img, 0)\n img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\n img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\n img -= img_mean\n img /= img_std\n return img.astype(np.float32)\n\n### BBoxes image to detect the meter\ndef filter_bboxes(det_results, score_threshold):\n \"\"\"\n Filter out the detection results with low confidence\n\n Param:\n det_results (list[dict]): detection results\n score_threshold (float): confidence threshold\n\n Retuns:\n filtered_results (list[dict]): filter detection results\n \n \"\"\"\n filtered_results = []\n for i in range(len(det_results)):\n if det_results[i, 1] > score_threshold:\n filtered_results.append(det_results[i])\n return filtered_results\n\n### Crop area of interest from bboxes that detect the meter\ndef roi_crop(image, results, scale_x, scale_y):\n \"\"\"\n Crop the area of detected meter of original image\n\n Param:\n img (np.array):original image。\n det_results (list[dict]): detection results\n scale_x (float): the scale value in x axis\n scale_y (float): the scale value in y axis\n\n Retuns:\n roi_imgs (list[np.array]): the list of meter images\n loc (list[int]): the list of meter locations\n \n \"\"\"\n roi_imgs = []\n loc = []\n for result in results:\n bbox = result[2:]\n xmin, ymin, xmax, ymax = [int(bbox[0] * scale_x), int(bbox[1] * scale_y), int(bbox[2] * scale_x), int(bbox[3] * scale_y)]\n sub_img = image[ymin:(ymax + 1), xmin:(xmax + 1), :]\n roi_imgs.append(sub_img)\n loc.append([xmin, ymin, xmax, ymax])\n return roi_imgs, loc\n\n### Preprocess image from area of interest to segment meter in next step\ndef roi_process(input_images, target_size, interp=cv2.INTER_LINEAR):\n \"\"\"\n Prepare the roi image of detection results data\n Preprocessing the input data for segmentation task\n\n Param:\n input_images (list[np.array]):the list of meter images\n target_size (list|tuple): height and width of resized image, e.g [heigh,width]\n interp (int):the interp method for image reszing\n\n Retuns:\n img_list (list[np.array]):the list of processed images\n resize_img (list[np.array]): for visualization\n \n \"\"\"\n img_list = list()\n resize_list = list()\n for img in input_images:\n img_shape = img.shape\n scale_x = float(target_size[1]) / float(img_shape[1])\n scale_y = float(target_size[0]) / float(img_shape[0])\n resize_img = cv2.resize(img, None, None, fx=scale_x, fy=scale_y, interpolation=interp)\n resize_list.append(resize_img)\n resize_img = resize_img.transpose(2, 0, 1) / 255\n img_mean = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))\n img_std = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))\n resize_img -= img_mean\n resize_img /= img_std\n img_list.append(resize_img)\n return img_list, resize_list\n\n### Erode to the segment meter result\ndef erode(seg_results, erode_kernel):\n \"\"\"\n Erode the segmentation result to get the more clear instance of pointer and scale\n\n Param:\n seg_results (list[dict]):segmentation results\n erode_kernel (int): size of erode_kernel\n\n Return:\n eroded_results (list[dict]): the lab map of eroded_results\n \n \"\"\"\n kernel = np.ones((erode_kernel, erode_kernel), np.uint8)\n eroded_results = seg_results\n for i in range(len(seg_results)):\n eroded_results[i] = cv2.erode(seg_results[i].astype(np.uint8), kernel)\n return eroded_results\n\n### Convert circle shape into retangle shape to read meter\ndef circle_to_rectangle(seg_results):\n \"\"\"\n Switch the shape of label_map from circle to rectangle\n\n Param:\n seg_results (list[dict]):segmentation results\n\n Return:\n rectangle_meters (list[np.array]):the rectangle of label map\n\n \"\"\"\n rectangle_meters = list()\n for i, seg_result in enumerate(seg_results):\n label_map = seg_result\n\n # The size of rectangle_meter is determined by RECTANGLE_HEIGHT and RECTANGLE_WIDTH\n rectangle_meter = np.zeros((RECTANGLE_HEIGHT, RECTANGLE_WIDTH), dtype=np.uint8)\n for row in range(RECTANGLE_HEIGHT):\n for col in range(RECTANGLE_WIDTH):\n theta = PI * 2 * (col + 1) / RECTANGLE_WIDTH\n \n # The radius of meter circle will be mapped to the height of rectangle image\n rho = CIRCLE_RADIUS - row - 1\n y = int(CIRCLE_CENTER[0] + rho * math.cos(theta) + 0.5)\n x = int(CIRCLE_CENTER[1] - rho * math.sin(theta) + 0.5)\n rectangle_meter[row, col] = label_map[y, x]\n rectangle_meters.append(rectangle_meter)\n return rectangle_meters\n\n### Read line scale and pointer on images\ndef rectangle_to_line(rectangle_meters):\n \"\"\"\n Switch the dimension of rectangle label map from 2D to 1D\n\n Param:\n rectangle_meters (list[np.array]):2D rectangle OF label_map。\n\n Return:\n line_scales (list[np.array]): the list of scales value\n line_pointers (list[np.array]):the list of pointers value\n\n \"\"\"\n line_scales = list()\n line_pointers = list()\n for rectangle_meter in rectangle_meters:\n height, width = rectangle_meter.shape[0:2]\n line_scale = np.zeros((width), dtype=np.uint8)\n line_pointer = np.zeros((width), dtype=np.uint8)\n for col in range(width):\n for row in range(height):\n if rectangle_meter[row, col] == SEG_LABEL['pointer']:\n line_pointer[col] += 1\n elif rectangle_meter[row, col] == SEG_LABEL['scale']:\n line_scale[col] += 1\n line_scales.append(line_scale)\n line_pointers.append(line_pointer)\n return line_scales, line_pointers\n\ndef mean_binarization(data_list):\n \"\"\"\n Binarize the data\n\n Param:\n data_list (list[np.array]):input data\n\n Return:\n binaried_data_list (list[np.array]):output data。\n\n \"\"\"\n batch_size = len(data_list)\n binaried_data_list = data_list\n for i in range(batch_size):\n mean_data = np.mean(data_list[i])\n width = data_list[i].shape[0]\n for col in range(width):\n if data_list[i][col] < mean_data:\n binaried_data_list[i][col] = 0\n else:\n binaried_data_list[i][col] = 1\n return binaried_data_list\n\n### \ndef locate_scale(line_scales):\n \"\"\"\n Find location of center of each scale\n\n Param:\n line_scales (list[np.array]):the list of binaried scales value\n\n Return:\n scale_locations (list[list]):location of each scale\n\n \"\"\"\n batch_size = len(line_scales)\n scale_locations = list()\n for i in range(batch_size):\n line_scale = line_scales[i]\n width = line_scale.shape[0]\n find_start = False\n one_scale_start = 0\n one_scale_end = 0\n locations = list()\n for j in range(width - 1):\n if line_scale[j] > 0 and line_scale[j + 1] > 0:\n if not find_start:\n one_scale_start = j\n find_start = True\n if find_start:\n if line_scale[j] == 0 and line_scale[j + 1] == 0:\n one_scale_end = j - 1\n one_scale_location = (one_scale_start + one_scale_end) / 2\n locations.append(one_scale_location)\n one_scale_start = 0\n one_scale_end = 0\n find_start = False\n scale_locations.append(locations)\n return scale_locations\n\ndef locate_pointer(line_pointers):\n \"\"\"\n Find location of center of pointer\n\n Param:\n line_scales (list[np.array]):the list of binaried pointer value\n\n Return:\n scale_locations (list[list]):location of pointer\n\n \"\"\"\n batch_size = len(line_pointers)\n pointer_locations = list()\n for i in range(batch_size):\n line_pointer = line_pointers[i]\n find_start = False\n pointer_start = 0\n pointer_end = 0\n location = 0\n width = line_pointer.shape[0]\n for j in range(width - 1):\n if line_pointer[j] > 0 and line_pointer[j + 1] > 0:\n if not find_start:\n pointer_start = j\n find_start = True\n if find_start:\n if line_pointer[j] == 0 and line_pointer[j + 1] == 0 :\n pointer_end = j - 1\n location = (pointer_start + pointer_end) / 2\n find_start = False\n break\n pointer_locations.append(location)\n return pointer_locations\n\n\ndef get_relative_location(scale_locations, pointer_locations):\n \"\"\"\n Match location of pointer and scales\n\n Param:\n scale_locations (list[list]):location of each scale\n pointer_locations (list[list]):location of pointer\n\n Return:\n pointed_scales (list[dict]): a list of dict with:\n 'num_scales': total number of scales\n 'pointed_scale': predicted number of scales\n \n \"\"\"\n pointed_scales = list()\n for scale_location, pointer_location in zip(scale_locations,\n pointer_locations):\n num_scales = len(scale_location)\n pointed_scale = -1\n if num_scales > 0:\n for i in range(num_scales - 1):\n if scale_location[i] <= pointer_location < scale_location[i + 1]:\n pointed_scale = i + (pointer_location - scale_location[i]) / (scale_location[i + 1] - scale_location[i] + 1e-05) + 1\n result = {'num_scales': num_scales, 'pointed_scale': pointed_scale}\n pointed_scales.append(result)\n return pointed_scales\n\n\ndef calculate_reading(pointed_scales):\n \"\"\"\n Calculate the value of meter according to the type of meter\n\n Param:\n pointed_scales (list[list]):predicted number of scales\n\n Return:\n readings (list[float]): the list of values read from meter\n \n \"\"\"\n readings = list()\n batch_size = len(pointed_scales)\n for i in range(batch_size):\n pointed_scale = pointed_scales[i]\n # find the type of meter according the total number of scales\n if pointed_scale['num_scales'] > TYPE_THRESHOLD:\n reading = pointed_scale['pointed_scale'] * METER_CONFIG[0]['scale_interval_value']\n else:\n reading = pointed_scale['pointed_scale'] * METER_CONFIG[1]['scale_interval_value']\n readings.append(reading)\n return readings\n\ndet_model_path = f\"{MODEL_DIR}/meter_det_model/model.pdmodel\"\ndet_model_shape = {'image': [1, 3, 608, 608], 'im_shape': [1, 2], 'scale_factor': [1, 2]}\nseg_model_path = f\"{MODEL_DIR}/meter_seg_model/model.pdmodel\"\nseg_model_shape = {'image': [ov.Dimension(1, 2), 3, 512, 512]}\n\nerode_kernel = 4\nscore_threshold = 0.5\nseg_batch_size = 2\ninput_shape = 608\n\n# Intialize the model objects\ndetector = Model(det_model_path, det_model_shape)\nsegmenter = Model(seg_model_path, seg_model_shape)\n\ndef segmentation_map_to_image(\n result: np.ndarray, colormap: np.ndarray, remove_holes: bool = False\n) -> np.ndarray:\n \"\"\"\n Convert network result of floating point numbers to an RGB image with\n integer values from 0-255 by applying a colormap.\n\n :param result: A single network result after converting to pixel values in H,W or 1,H,W shape.\n :param colormap: A numpy array of shape (num_classes, 3) with an RGB value per class.\n :param remove_holes: If True, remove holes in the segmentation result.\n :return: An RGB image where each pixel is an int8 value according to colormap.\n \"\"\"\n if len(result.shape) != 2 and result.shape[0] != 1:\n raise ValueError(\n f\"Expected result with shape (H,W) or (1,H,W), got result with shape {result.shape}\"\n )\n\n if len(np.unique(result)) > colormap.shape[0]:\n raise ValueError(\n f\"Expected max {colormap[0]} classes in result, got {len(np.unique(result))} \"\n \"different output values. Please make sure to convert the network output to \"\n \"pixel values before calling this function.\"\n )\n elif result.shape[0] == 1:\n result = result.squeeze(0)\n\n result = result.astype(np.uint8)\n\n contour_mode = cv2.RETR_EXTERNAL if remove_holes else cv2.RETR_TREE\n mask = np.zeros((result.shape[0], result.shape[1], 3), dtype=np.uint8)\n for label_index, color in enumerate(colormap):\n label_index_map = result == label_index\n label_index_map = label_index_map.astype(np.uint8) * 255\n contours, hierarchies = cv2.findContours(\n label_index_map, contour_mode, cv2.CHAIN_APPROX_SIMPLE\n )\n cv2.drawContours(\n mask,\n contours,\n contourIdx=-1,\n color=color.tolist(),\n thickness=cv2.FILLED,\n )\n\n return mask","repo_name":"tongza331/deploy-meter-reader_streamlit","sub_path":"model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":15282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74489405296","text":"\"\"\" Kipoi website configuration file.\n\"\"\"\nDEBUG = True\nEXPLAIN_TEMPLATE_LOADING = False\n\n# Cache config\nCACHE_TYPE = 'simple' # 'memcached'\nMEMCACHED_SERVERS = ['127.0.0.1:11211']\n# Cache duration - set in seconds\nCACHE_TIMEOUT = 600\n\n\nSOURCE = \"kipoi\"\n","repo_name":"kipoi/website","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"38847816718","text":"from Sprites import BaseSprite\n\n\nclass Skillsys(BaseSprite):\n def __init__(self, player, handler):\n image_path = \"img/skills.png\"\n super().__init__(image_path)\n self.skillimage = None\n self.hide = True\n self.available_skills = {\n # first value is cost second is level\n \"fireball\": (0, 0),\n \"bolt\": (10, 5),\n \"energyblast\": (15, 10),\n \"deathball\": (100, 40),\n \"selfdestruct\": (0, 0),\n \"fired\": (20, 20),\n \"fireballv2\": (30, 10)\n }\n self.load_image(image_path)\n self.pressed = 0\n self.player = player\n self.handler = handler\n\n def toggle_visibility(self):\n self.pressed += 1\n if self.pressed == 1:\n self.hide = not self.hide\n self.pressed = 0\n \n def rendering(self, surface):\n if not self.hide:\n clicked = self.pygame.mouse.get_pressed()\n mousepos = self.pygame.mouse.get_pos()\n self.skillimage = self.pygame.transform.scale(self.skillimage, (250,350))\n surface.blit(self.skillimage, (110, 0))\n skills_to_render = [skill for skill in self.available_skills.keys() if skill not in self.player.skills]\n\n skill_data = { #TODO set correct requirements and add all skills \n \"bolt\": {\n \"image_path\": \"img/bolt_skill.png\",\n \"position\": (152, 71),\n \"name\": \"Bolt\",\n \"description\": \"Damage:\\n5% spellpower\",\n \"requirements\": \"Requirements:\\nlevel 5, 10 coin\"\n },\n \"energyblast\": {\n \"image_path\": \"img/Energyblast_skill.png\",\n \"position\": (266, 71),\n \"name\": \"Energyblast\",\n \"description\": \"Damage:\\n20% spellpower\",\n \"requirements\": \"Requirements:\\nlevel 10, 15 coin\"\n },\n \"fireball\": {\n \"image_path\": \"img/fireball_skill.png\",\n \"position\": (209, 71),\n \"name\": \"Fireball\",\n \"description\": \"Damage:\\n10% spellpower\",\n \"requirements\": \"Requirements:\\nlevel 0, 0 coin\"\n },\n \n \"deathball\": {\n \"image_path\": \"img/deathball_skill.png\",\n \"position\": (325,71),\n \"name\": \"Death ball\",\n \"description\": \"Damage:\\n25% spellpower\",\n \"requirements\": \"Requirements:\\nlevel 40, 100 coin\"\n },\n \"selfdestruct\": {\n \"image_path\": \"img/deathball_skill.png\", #Placeholder\n \"position\": (152, 125),\n \"name\": \"Self destruct\",\n \"description\": \"Commit Suicide\",\n \"requirements\": \"Requirements:\\n level 0, 0 coin\"\n },\n \"fired\": {\n \"image_path\": \"img/deathball_skill.png\", #Placeholder\n \"position\": (266, 125),\n \"name\": \"Fired\",\n \"description\": \"Fire the enemy\",\n \"requirements\": \"Requirements:\\n level 20, 20 coin\"\n },\n \"fireballv2\": {\n \"image_path\": \"img/fireball_skill.png\", #Placeholder\n \"position\": (209, 125),\n \"name\": \"Fireball v2\",\n \"description\": \"Scorcing hot fireballs\",\n \"requirements\": \"Requirements:\\n level 10, 30 coin\"\n }\n }\n\n colliding_skill_rects = []\n names = []\n\n for skill in skills_to_render:\n \n if skill in skill_data:\n skill_info = skill_data[skill]\n skill_image = self.pygame.image.load(skill_info[\"image_path\"]).convert_alpha()\n skill_image = self.pygame.transform.scale(skill_image, (25, 25))\n skill_rect = skill_image.get_rect(center=skill_info[\"position\"])\n #skill_info = skill_data[skill]\n \n\n surface.blit(skill_image, skill_rect)\n if skill_rect.collidepoint(mousepos):\n colliding_skill_rects.append(skill_rect)\n stat_image = self.pygame.image.load(\"img/status_bar.png\").convert_alpha()\n stat_surface = self.pygame.Surface((140, 140))\n \n text = self.skillfont.render(skill_info[\"name\"], True, (255, 255, 255))\n text_rect = text.get_rect(center=(60, 10))\n skill_info_text = self.skillfont.render(skill_info[\"description\"], True, (255, 255, 255))\n skill_info_rect = skill_info_text.get_rect(center=(65, 50))\n requirements_info_text = self.skillfont.render(skill_info[\"requirements\"], True, (255, 255, 255))\n requirements_info_rect = requirements_info_text.get_rect(center = (55, 100))\n stat_surface.blit(text, text_rect)\n stat_surface.blit(skill_info_text, skill_info_rect)\n stat_surface.blit(requirements_info_text, requirements_info_rect)\n stat_surface.blit(stat_image, skill_rect)\n if clicked[2]:\n self.Buy_Skill(skill)\n \n if colliding_skill_rects:\n stat_surface.set_alpha(200)\n surface.blit(stat_surface, colliding_skill_rects[0])\n\n \n \n def Buy_Skill(self, wskill):\n if wskill in self.player.skills:\n print('You already have {}!'.format(wskill))\n\n else:\n cost, level_requirement = self.available_skills.get(wskill, (0, 0))\n if self.handler.money >= cost and self.player.level >= level_requirement:\n self.handler.money -= cost\n self.player.skills.append(wskill)\n self.skill()\n\n else:\n print('Not enough money or level too low')\n\n def skill(self):\n for i in self.player.skills:\n if i in self.available_skills:\n self.available_skills.pop(i)\n\n def load_image(self, image_path):\n try:\n self.skillimage = self.pygame.image.load(image_path).convert_alpha()\n except self.pygame.error as e:\n print(\"Error loading skills image:\", str(e))\n\n","repo_name":"neonzz1/RPGDevelopment","sub_path":"Skillssytem.py","file_name":"Skillssytem.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29139063341","text":"\"\"\"\njetson nano object detection \nhttps://rawgit.com/dusty-nv/jetson-inference/dev/docs/html/python/jetson.inference.html#detectNet\n\"\"\"\n\nimport jetson.inference\nimport jetson.utils\nimport rospy\nimport cv2\nimport numpy as np\nimport subprocess, shlex, psutil\nfrom sensor_msgs.msg import Image, CompressedImage\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom darknet_ros_msgs.msg import BBoxes, BBox\n\nnet = jetson.inference.detectNet(\"ssd-mobilenet-v2\", threshold=0.5)\ncamera = jetson.utils.gstCamera(1280,720,\"/dev/video0\") # '/dev/video0' for V4L2\ncompmsg = CompressedImage()\nrospy.init_node(\"visualizer\")\n\n\ndef detection_and_publish(data) :\n\t\n\twhile True :\n\t\timg,width,height = camera.CaptureRGBA()\n\t\tdetections = net.Detect(img)\n\n\t\tbboxes = BBoxes()\n\t\tbboxes.header.stamp = rospy.Time.now()\n\t\tbboxes.header.frame_id = \"detection\"\n\n\t\tfor list in detections :\n\t\t\tbbox = BBox()\n\t\t\tbbox.probability = list.Confidence\n\t\t\tbbox.cx = list.Center.x\n\t\t\tbbox.cy = list.Center.y\n\t\t\tbbox.area = list.Area\n\t\t\tbbox.id = list.ClassID\n\t\t\tbboxes.bboxes.append(bbox)\n\n\t\t#convert to ros \n\t\tnumpyImg = jetson.utils.cudaToNumpy(img, width, height, 4)\n\t\taimg1 = cv2.cvtColor(numpyImg.astype(np.uint8), cv2.COLOR_RGB2BGR)\n\t\tcompmsg.header.stamp = rospy.Time.now()\n\t\tcompmsg.format = \"jpeg\"\n\t\tencode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 30]\n\t\tcompmsg.data = np.array(cv2.imencode('.jpg', aimg1, encode_param)[1]).tostring()\n\t\tcomp_img_pub.publish(compmsg)\n\t\tbboxes_pub.publish(bboxes)\n\n\ndetection_and_publish()\ncomp_img_pub = rospy.Publisher(\"/camera_nano/object_detect/image_raw/compressed\", CompressedImage, queue_size = 1)\nbboxes_pub = rospy.Publisher(\"/nano_detection/bboxes\", BBoxes, queue_size = 1)\nrospy.spin()\n\n","repo_name":"kka-na/jetson_detectnet","sub_path":"detectnet_ros.py","file_name":"detectnet_ros.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37125759350","text":"import json, os\n\ninput_dir = 'input'\n\nfor filename in os.listdir(input_dir):\n f = open(input_dir+\"/\"+filename, \"r\")\n if filename.split(\".\")[1] == \"json\" :\n with open(input_dir+\"/\"+filename) as json_file:\n data = json.load(json_file)\n selected = False\n if data.get(\"ingredients\"):\n for line in data[\"ingredients\"]:\n if line.get(\"ore\"):\n if line[\"ore\"] == \"toolCuttingboard\":\n selected = True\n if selected:\n data[\"type\"] = \"tfc:damage_item_shapeless\"\n with open(\"output/\"+filename, \"w\") as outfile:\n json.dump(data, outfile)\n f.close()\n ","repo_name":"Lebeg134/TFC-PH_Compat","sub_path":"Scripts/CuttingboardScript.py","file_name":"CuttingboardScript.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"36920334815","text":"'''\nCreated on Oct 12, 2010\nDecision Tree Source Code for Machine Learning in Action Ch. 3\n@author: Peter Harrington\n'''\nfrom math import log\nimport operator\n\ndef createDataSet():\n dataSet = [[1, 1, 'yes'],\n [1, 1, 'yes'],\n [1, 0, 'no'],\n [0, 1, 'no'],\n [0, 1, 'no']]\n labels = ['no surfacing','flippers']\n #change to discrete values\n return dataSet, labels\n\ndef calcShannonEnt(dataSet):\n numEntries = len(dataSet)\n labelCounts = {}\n for featVec in dataSet: #the the number of unique elements and their occurance\n currentLabel = featVec[-1]\n if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0\n labelCounts[currentLabel] += 1\n print(currentLabel)\n print(labelCounts)\n shannonEnt = 0.0\n for key in labelCounts:\n prob = float(labelCounts[key])/numEntries\n print(prob)\n shannonEnt -= prob * log(prob,2) #log base 2\n print(shannonEnt)\n return shannonEnt\n\nif __name__ == '__main__':\n dataSet, labels = createDataSet()\n print(dataSet)\n print(labels)\n print(calcShannonEnt(dataSet))","repo_name":"gaodawn/ML","sub_path":"Decision Tree/testshannon.py","file_name":"testshannon.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"23212665029","text":"import statistics\n\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.shortcuts import render, redirect\n\nfrom movie_reviews.forms import SignUpForm, LoginForm, ReviewForm\nfrom movie_reviews.models import Movies, Reviews, MovieReviewState\n\n\ndef root(request):\n if request.user.is_authenticated:\n return redirect('home')\n else:\n return render(request, 'index.html.j2')\n\n\ndef home(request):\n if request.user.is_authenticated:\n all_movies = Movies.objects.all()\n movies_parsed = []\n for movie in all_movies:\n reviews = [review.star_rating for review in list(Reviews.objects.filter(movie=movie))]\n reviews = [0.0] if len(reviews) == 0 else reviews\n snippet = movie.synopsis[:128] + \"...\"\n movies_parsed.append((movie.id, movie.name, snippet, f\"{statistics.mean(reviews):.2f}\"))\n return render(request, 'home.html.j2', {'movies': movies_parsed})\n else:\n return redirect('login')\n\n\ndef movie_redirect(request):\n return redirect('home')\n\n\ndef movies(request, **kwargs):\n if request.user.is_authenticated:\n state = MovieReviewState.NOT_ADDING_REVIEW\n pid = kwargs.pop(\"pid\", 1)\n try:\n selected_movie = Movies.objects.get(id=pid)\n except Movies.DoesNotExist:\n selected_movie = Movies(id=-1, name=\"Movie not found\", synopsis=\"Movie not found!\")\n\n reviews = Reviews.objects.filter(movie__id=pid)\n\n if request.method == 'POST':\n form = ReviewForm(request.POST)\n if form.is_valid():\n if len(reviews.filter(created_by=request.user)) == 0:\n form.save_form(request.user, selected_movie)\n state = MovieReviewState.ADDED_SUCCESSFULLY\n else:\n state = MovieReviewState.ALREADY_REVIEWED\n else:\n state = MovieReviewState.INVALID_FORM\n return render(\n request,\n 'movies.html.j2',\n {\n 'movie': selected_movie,\n 'reviews': reviews,\n 'review_state': state,\n 'review_form': ReviewForm()\n }\n )\n else:\n return redirect('login')\n\n\ndef user_logout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef user_login(request):\n if not request.user.is_authenticated:\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n return render(request, 'login.html.j2', {'form': form, 'login_failed': True})\n else:\n form = LoginForm()\n return render(request, 'login.html.j2', {'form': form, 'login_failed': False})\n else:\n return redirect('home')\n\n\ndef user_signup(request):\n if not request.user.is_authenticated:\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n password = form.clean_password2()\n user = authenticate(username=username, password=password)\n login(request, user)\n return redirect('home')\n else:\n form = SignUpForm()\n return render(request, 'signup.html.j2', {'form': form})\n else:\n return redirect('home')\n","repo_name":"fauzanardh/Movie_Reviews","sub_path":"movie_reviews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12924969852","text":"import argparse\r\nimport logging\r\nimport pathlib\r\nimport warnings\r\nfrom typing import TextIO\r\nimport csv\r\n\r\nimport numpy as np\r\nimport sklearn.metrics as metrics\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom constants import AMINO_ACID_INDICES, STANDARD_AMINO_ACIDS, AMINO_ACID_LETTERS\r\nfrom neural_net import StructureDataset, NeuralNetwork\r\nfrom plots import Plots\r\nfrom train_model import Dataset, read_test_file\r\n\r\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n\r\n\r\ndef get_args():\r\n \"\"\"\r\n Fetch command-line arguments\r\n \"\"\"\r\n arg_parser = argparse.ArgumentParser(description=\"predict sequences from protein conformational features and test \"\r\n \"model performance\")\r\n arg_parser.add_argument('feature_dir',\r\n type=str,\r\n help=\"directory of processed chain features\")\r\n arg_parser.add_argument('test_list',\r\n type=str,\r\n help=\"path to a newline-delimited list of protein chain codes for sequence prediction\")\r\n arg_parser.add_argument('model_params',\r\n type=str,\r\n help=\"file containing trained model parameters\")\r\n arg_parser.add_argument('-p',\r\n '--pred_only',\r\n action='store_true',\r\n help=\"only predict sequences, without testing the model and comparing predictions with \"\r\n \"true sequences.\")\r\n arg_parser.add_argument('-o',\r\n '--out_dir',\r\n type=str,\r\n default='./pred',\r\n help=\"output directory. Will create a new directory if OUT_DIR does not exist.\")\r\n args = arg_parser.parse_args()\r\n return pathlib.Path(args.feature_dir), pathlib.Path(args.test_list), pathlib.Path(args.model_params), \\\r\n pathlib.Path(args.out_dir), args.pred_only\r\n\r\n\r\nclass Predictor:\r\n def __init__(self, parameter_path, prediction_only_mode, test_chains, feature_directory):\r\n parameters = torch.load(parameter_path)\r\n network_shape = [len(layer) for layer in parameters.values()][:-2:2]\r\n input_nodes = parameters['linear_relu_stack.0.weight'].shape[1]\r\n neighbour_count = int((input_nodes - 4) / 11)\r\n self.dataset = Dataset(test_chains, feature_directory, neighbour_count)\r\n self.model = NeuralNetwork(input_nodes=input_nodes, network_shape=network_shape, dropout=1, ).to(device)\r\n self.model.load_state_dict(parameters)\r\n self.prediction_only_mode = prediction_only_mode\r\n\r\n def sequence(self, chain_id: str) -> tuple[list[int], list[np.ndarray], list[torch.tensor], list[np.ndarray]]:\r\n \"\"\"\r\n Input the structural features of each residue in a chain into the neural network to predict the amino acid for\r\n each residue in the chain\r\n\r\n Args:\r\n chain_id: the chain id of the polypeptide chain for which the sequence should be predicted\r\n\r\n Returns:\r\n (tuple):\r\n predicted_residues: a list of integers encoding the amino acids predicted as the sequence of the\r\n polypeptide chain\r\n chain_softmax: a list of 20 by 1 numpy arrays that represent the predicted probability for each amino\r\n acid at each residue position in the polypeptide chain\r\n chain_loss: a list of the cross-entropy loss values between the predicted amino acid probability tensor\r\n and the true amino acid tensor for each residue in the polypeptide chain\r\n true_residues: a list of integers encoding the true sequence of the amino acid chain as it was parsed\r\n by the featurise module\r\n \"\"\"\r\n\r\n chain_features = self.dataset.chains[chain_id].features\r\n feature_tensors = {feature_name: torch.tensor(feature, dtype=torch.float64)\r\n for feature_name, feature in chain_features.items()}\r\n chain_dataset = StructureDataset(feature_tensors)\r\n dataloader = DataLoader(chain_dataset, shuffle=False)\r\n self.model.eval()\r\n chain_softmax = []\r\n predicted_residues = []\r\n true_residues = []\r\n chain_loss = []\r\n # load residue features one at a time\r\n for inputs, label in dataloader:\r\n with torch.no_grad():\r\n # predict residue\r\n output = self.model(inputs.to(device))\r\n if not self.prediction_only_mode:\r\n loss_fn = nn.CrossEntropyLoss(ignore_index=20)\r\n loss = loss_fn(output, label.to(device)).item()\r\n chain_loss.append(loss)\r\n true_residues.extend(label.cpu().numpy())\r\n softmax = nn.functional.softmax(output, dim=1)\r\n chain_softmax.extend(softmax.cpu().numpy())\r\n # the output node with the highest value is the predicted residue\r\n top_residue = output.argmax(1)\r\n predicted_residues.append(int(top_residue))\r\n return predicted_residues, chain_softmax, chain_loss, true_residues\r\n\r\n def complete_seq(self, pred_residues, true_residues, chain, feat_dir):\r\n \"\"\"\r\n Generate a string representing the true and predicted sequence of a polypeptide chain, adding unknown residues\r\n represented by the character 'X'\r\n\r\n Args:\r\n pred_residues: a list of integers representing the predicted amino acid at each residue position in a\r\n polypeptide chain\r\n true_residues: a list of integers representing the true amino acid at each residue position in a\r\n polypeptide chain\r\n chain: ID string of a polypeptide chain\r\n feat_dir: pathlib Path to the directory where structural features are saved\r\n\r\n Returns:\r\n (tuple):\r\n pred_seq: string representing the predicted amino acid sequence of the polypeptide chain\r\n\r\n true_seq: string representing the true amino acid sequence of the polypeptide chain\r\n \"\"\"\r\n excluded_res_path = feat_dir / ('excluded_residues_' + chain + '.csv')\r\n excluded_residues = {}\r\n pred_seq = ''\r\n true_seq = ''\r\n # read list of excluded residues, if any residues were excluded\r\n if excluded_res_path.exists():\r\n with open(excluded_res_path, 'r') as file:\r\n # excluded residues written in file as index,residue_name\r\n for line in file:\r\n line = line.split(',')\r\n excluded_residues[int(line[0])] = line[1].strip('\\n')\r\n j = 0\r\n for i in range(len(list(excluded_residues)) + len(pred_residues)):\r\n # predict 'X' for all excluded residues\r\n if i in excluded_residues:\r\n pred_seq += 'X'\r\n if not self.prediction_only_mode:\r\n # true residues are only 'X' if they are non-standard residues\r\n if excluded_residues[i] in AMINO_ACID_INDICES:\r\n true_seq += AMINO_ACID_LETTERS[excluded_residues[i]]\r\n else:\r\n true_seq += 'X'\r\n else:\r\n pred_seq += list(AMINO_ACID_LETTERS.values())[int(pred_residues[j])]\r\n if not self.prediction_only_mode:\r\n true_seq += list(AMINO_ACID_LETTERS.values())[int(true_residues[j])]\r\n j += 1\r\n return pred_seq, true_seq\r\n\r\n\r\ndef check_labels(true_residues):\r\n \"\"\"\r\n List all residues that do not occur in the true sequence.\r\n\r\n Args:\r\n true_residues: list of true amino acids in the polypeptide chain\r\n Returns:\r\n (tuple):\r\n unused_residues: list of standard amino acids that do not occur in the sequence\r\n\r\n labels: list of standard amino acids that are present in the sequence\r\n \"\"\"\r\n unused_residues = []\r\n labels = []\r\n for key in range(0, 20):\r\n if key not in true_residues:\r\n unused_residues.append(key)\r\n else:\r\n labels.append(key)\r\n return unused_residues, labels\r\n\r\n\r\ndef write_reports(report_dir: pathlib.Path,\r\n chain: str,\r\n report_amino_acids: list[str],\r\n chain_report: dict[str: float],\r\n avg_loss: np.ndarray,\r\n unused_residues: list[int]) -> None:\r\n \"\"\"\r\n Write a text file reporting precision, recall and F-score statistics for each amino acid class\r\n\r\n Args:\r\n report_dir: directory where reports are saved\r\n chain: name of the protein chain\r\n report_amino_acids: list of amino acids names included in the chain\r\n chain_report: text reporting classification statistics\r\n avg_loss: average cross-entropy loss between the true and predicted labels\r\n unused_residues: amino acid classes that do not occur in the true residues\r\n \"\"\"\r\n \r\n with open(report_dir / f'{chain}_report.txt', 'w') as file:\r\n file.write(f'\\nAverage loss: {avg_loss}')\r\n file.write(\r\n '\\nNote: precision and F-scores are set to 0.0 for classes that have no predictions')\r\n if unused_residues:\r\n file.write(\r\n f'\\nAmino acids with 0 support: {\", \".join([STANDARD_AMINO_ACIDS[idx] for idx in unused_residues])}.')\r\n\r\n with open(report_dir / f'{chain}_report.csv', 'w') as csvfile:\r\n metric_list = ['precision', 'recall', 'f1-score', 'support']\r\n csv_writer = csv.writer(csvfile,\r\n lineterminator=\"\\n\"\r\n )\r\n csv_writer.writerow([\"amino acid\"] + metric_list)\r\n for label in STANDARD_AMINO_ACIDS:\r\n if label in report_amino_acids:\r\n row = [label] + [chain_report[label][metric]\r\n for metric in metric_list\r\n ]\r\n else:\r\n row = [label, \"N/A\", \"N/A\", \"N/A\", 0]\r\n csv_writer.writerow(row)\r\n\r\n\r\nclass Evaluator:\r\n def __init__(self, out_dir):\r\n self.out_dir = out_dir\r\n self.model_predictions = []\r\n self.model_true = []\r\n self.model_softmax = []\r\n self.model_loss = []\r\n\r\n def chain(self,\r\n chain: str,\r\n true_seq: str,\r\n true_residues: list[np.ndarray],\r\n pred_residues: list[int],\r\n loss: list[int],\r\n chain_softmax: list[np.ndarray]) -> None:\r\n \"\"\"\r\n Generate and save text files with classification metric reports, prediction probabilities and per-residue loss.\r\n\r\n Args:\r\n chain: PDB Chain ID\r\n true_seq: True amino acid sequence of the polypeptide chain\r\n true_residues: Integers representing the true amino acid at each residue position\r\n pred_residues: Integers representing the predicted amino acid at each residue position\r\n loss: Cross-entropy lost for each predicted residue\r\n chain_softmax: Soft-max probability for each predicted residue\r\n \"\"\"\r\n self.model_predictions.extend(pred_residues)\r\n self.model_true.extend(true_residues)\r\n self.model_softmax.extend(chain_softmax)\r\n self.model_loss.extend(loss)\r\n avg_loss = np.mean(loss)\r\n\r\n # list residues that do and do not occur in the true sequence\r\n unused_residues, labels = check_labels(true_residues)\r\n # only plot confusion matrices if all 20 residues occur in the label set\r\n report_amino_acids = [STANDARD_AMINO_ACIDS[i] for i in labels]\r\n with warnings.catch_warnings():\r\n # suppress sklearn warnings\r\n warnings.simplefilter(\"ignore\")\r\n # generate report of per-class precision, recall and F1-scores\r\n chain_report = metrics.classification_report(true_residues,\r\n pred_residues,\r\n target_names=report_amino_acids,\r\n labels=labels,\r\n digits=3,\r\n zero_division=0,\r\n output_dict=True\r\n )\r\n\r\n\r\n # write files\r\n report_dir = self.out_dir / 'chain_reports'\r\n if not report_dir.exists():\r\n report_dir.mkdir()\r\n write_reports(report_dir, chain, report_amino_acids, chain_report, avg_loss, unused_residues)\r\n original_dir = self.out_dir / 'original_sequences'\r\n if not original_dir.exists():\r\n original_dir.mkdir()\r\n with open(original_dir / f'{chain}_original.fasta', 'w') as file:\r\n file.write(f'>{chain[:4].upper()}|Chain {chain[4:].upper()}|SeqPredNN original PDB sequence\\n{true_seq}')\r\n probability_dir = self.out_dir / 'probabilities'\r\n if not probability_dir.exists():\r\n probability_dir.mkdir()\r\n np.savetxt(probability_dir / f'{chain}_probabilities.csv',\r\n chain_softmax,\r\n delimiter=',',\r\n header=','.join(STANDARD_AMINO_ACIDS),\r\n comments='')\r\n loss_dir = self.out_dir / 'residue_loss'\r\n if not loss_dir.exists():\r\n loss_dir.mkdir()\r\n np.savetxt(loss_dir / f'{chain}_residue_losses.csv',\r\n loss,\r\n delimiter=','\r\n )\r\n\r\n def model(self):\r\n \"\"\" Generate a classification report, confusion matrices and a top-K accuracy curve for the test set.\"\"\"\r\n model_plot = Plots(self.out_dir)\r\n avg_loss = np.mean(self.model_loss)\r\n\r\n # list residue labels that do and do not occur in the test set\r\n unused_residues, labels = check_labels(self.model_true)\r\n # only plot confusion matrices if all 20 residues occur in the label set\r\n if not unused_residues:\r\n model_plot.confusion_matrix(self.model_true, self.model_predictions, None, 'unnormalised_conf_matrix')\r\n model_plot.confusion_matrix(self.model_true, self.model_predictions, 'pred', 'pred_norm_conf_matrix')\r\n model_plot.confusion_matrix(self.model_true, self.model_predictions, 'true', 'true_norm_conf_matrix')\r\n with warnings.catch_warnings():\r\n # suppress sklearn warnings\r\n warnings.simplefilter(\"ignore\")\r\n top_k_accuracy = {k: metrics.top_k_accuracy_score(self.model_true, self.model_softmax, k=k) for k in\r\n range(1, 21)}\r\n with open(self.out_dir / 'top_K.csv', 'w') as file:\r\n file.write(f'k,Top-k accuracy')\r\n for k in top_k_accuracy:\r\n file.write(f'{k},{top_k_accuracy[k]}\\n')\r\n else:\r\n print(\"Could not plot confusion matrices or top-K accuracy curve because some residues do not have any \"\r\n \"labels in the test set\")\r\n with warnings.catch_warnings():\r\n # suppress sklearn warnings\r\n warnings.simplefilter(\"ignore\")\r\n # generate report of per-class precision, recall and F1-scores\r\n report_amino_acids = [STANDARD_AMINO_ACIDS[i] for i in labels]\r\n\r\n model_report = metrics.classification_report(self.model_true,\r\n self.model_predictions,\r\n target_names=report_amino_acids,\r\n digits=5,\r\n zero_division=0,\r\n labels=labels,\r\n output_dict=True)\r\n \r\n write_reports(self.out_dir,\r\n \"model\",\r\n report_amino_acids,\r\n model_report,\r\n avg_loss,\r\n unused_residues\r\n )\r\n\r\n\r\ndef prediction():\r\n logging_level = logging.INFO\r\n logging.basicConfig(level=logging_level, format=\"%(message)s\")\r\n logging.captureWarnings(False)\r\n\r\n feature_directory, test_list, parameter_path, output_directory, prediction_only_mode = get_args()\r\n\r\n if not feature_directory.exists():\r\n raise FileNotFoundError(feature_directory)\r\n if not output_directory.exists():\r\n output_directory.mkdir()\r\n\r\n test_chains = read_test_file(test_list)\r\n predict = Predictor(parameter_path, prediction_only_mode, test_chains, feature_directory)\r\n test_chains = predict.dataset.chains\r\n if predict.dataset.excluded_chains:\r\n with open(output_directory / 'excluded_chains.txt', 'w') as file:\r\n file.write('\\n'.join(predict.dataset.excluded_chains))\r\n evaluate = Evaluator(output_directory)\r\n n_chains = len(test_chains)\r\n i = 1\r\n prediction_dir = output_directory / 'predicted_sequences'\r\n if not prediction_dir.exists():\r\n prediction_dir.mkdir()\r\n for chain in test_chains:\r\n print(f'Chain {i}/{n_chains}')\r\n pred_residues, chain_softmax, loss, true_residues = predict.sequence(chain)\r\n pred_seq, true_seq = predict.complete_seq(pred_residues, true_residues, chain, feature_directory)\r\n print(chain, '- Predicted sequence:\\n' + pred_seq)\r\n if not prediction_only_mode:\r\n print(chain, '- Original sequence:\\n' + true_seq + '\\n')\r\n with open(prediction_dir / f'{chain}_predicted.fasta', 'w') as file:\r\n file.write(f'>{chain[:4].upper()}|Chain {chain[4:].upper()}|SeqPredNN Prediction\\n{pred_seq}')\r\n true_known_residues = []\r\n pred_known_residues = []\r\n softmax_known_residues = []\r\n\r\n for true_residue, pred_residue, softmax_residue in zip(true_residues, pred_residues, chain_softmax):\r\n if true_residue != AMINO_ACID_INDICES['X']:\r\n true_known_residues.append(true_residue)\r\n pred_known_residues.append(pred_residue)\r\n softmax_known_residues.append(softmax_residue)\r\n if not prediction_only_mode:\r\n evaluate.chain(chain, true_seq, true_known_residues, pred_known_residues, loss, softmax_known_residues)\r\n i += 1\r\n\r\n if not prediction_only_mode:\r\n evaluate.model()\r\n\r\n\r\nif __name__ == '__main__':\r\n prediction()\r\n","repo_name":"falategan/SeqPredNN","sub_path":"SeqPredNN/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":18806,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"7678603666","text":"import time\nimport os\n\npies = \"Buldog\"\npieski = [\"Mops\", \"Owczarek\", \"Golden\", \"Szpic\", \"Husky\"]\n\nstrona = input(\"Podaj swoją ulubioną stronę internetową: \")\n\nfilename = \"C:\\\\Users\\\\szawe\\\\PycharmProjects\\\\Skryptowe_2\\\\txt.txt\"\n\nfile = open(filename, 'r')\ncontent = file.read()\nprint(content)\nfile.close()\n\nwith open(filename, 'a') as file:\n file.write(strona)\n file.write(pies)\n file.write('\\n')\n file.writelines(pieski)\n for piesek in pieski:\n file.write(piesek+'\\n')","repo_name":"wieszczeczynskip/Skryptowe_2022","sub_path":"Skryptowe_2/zd8.py","file_name":"zd8.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28867462115","text":"\"\"\"Test CBI module.\"\"\"\nfrom argparse import Namespace\nfrom pathlib import Path\n\nfrom .test_metadata import TEST_FILES_PATH, TMP_ROOT, read_metadata, write_metadata\n\nFN = Path(\"Captain Science #001-cbi.cbr\")\nARCHIVE_PATH = TEST_FILES_PATH / FN\nTMP_PATH = TMP_ROOT / \"test_cbi\"\nNEW_TEST_CBZ_PATH = TMP_PATH / FN.with_suffix(\".cbz\")\nMETADATA = {\n \"series\": \"Captain Science\",\n \"issue\": \"1\",\n \"issue_count\": 7,\n \"publisher\": \"Youthful Adventure Stories\",\n \"month\": 11,\n \"year\": 1950,\n \"genres\": frozenset([\"Science Fiction\"]),\n \"volume\": 1950,\n \"credits\": [\n {\"person\": \"Wally Wood\", \"role\": \"Artist\"},\n {\"person\": \"Joe Orlando\", \"role\": \"Writer\", \"primary\": True},\n ],\n \"language\": \"en\",\n \"country\": \"US\",\n \"title\": \"The Beginning\",\n \"page_count\": 36,\n \"cover_image\": \"Captain Science 001/CaptainScience#1_01.jpg\",\n \"ext\": \"cbr\",\n}\nCONFIG = Namespace(comicbox=Namespace(write_comicbookinfo=True))\n\n\ndef test_read_cbi():\n \"\"\"Read CBI archive.\"\"\"\n read_metadata(ARCHIVE_PATH, METADATA)\n\n\ndef test_write_cbi():\n \"\"\"Write CBI archive.\"\"\"\n write_metadata(TMP_PATH, NEW_TEST_CBZ_PATH, METADATA, CONFIG)\n","repo_name":"ajslater/comicbox","sub_path":"tests/test_cbi.py","file_name":"test_cbi.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"10007602976","text":"\"\"\"highscores.py - Chris Fisher \"cdfisher\", 2023\nHighscores submodule of osrs-tools. Fetches and formats data from the Old\nSchool RuneScape highscores via the index_lite JSON API endpoint.\n\"\"\"\nimport json\nimport requests\nfrom warnings import warn\nfrom pprint import pformat\nfrom resources.entries import *\nfrom resources.url_builder import build_url\n\n\ndef get_target_type(target: str) -> str:\n \"\"\"Returns whether a given contest target is a skill, activity, or boss.\n\n :param target: String representation of target to query\n :return: String representing type.\n \"\"\"\n if target in skill_attributes:\n return 'skill'\n elif target in activity_attributes:\n return 'activity'\n elif target in boss_attributes:\n return 'boss'\n else:\n raise ValueError(f'Target {target} not recognized.')\n\n\nclass SkillEntry:\n \"\"\"SkillEntry\n\n Class used to wrap up data for skills in order for it to be easily accessed via object attributes.\n @:arg data dict: Highscores entries for a given skill.\n \"\"\"\n def __init__(self, data: dict):\n self.rank = data[\"rank\"]\n self.level = data[\"level\"]\n self.xp = data[\"xp\"]\n\n def __str__(self) -> str:\n return f'{{\"rank\": {self.rank},\\n\"level\": {self.level},\\n\"xp\": {self.xp}}}'\n\n def __repr__(self) -> str:\n self._rep_dict = {\"rank\": self.rank, \"level\": self.level, \"xp\": self.xp}\n return str(self._rep_dict)\n\n\nclass ActivityEntry:\n \"\"\"ActivityEntry\n\n Class used to wrap up data for activities so it can be easily accessed via object attributes.\n @:arg data dict: Highscores entries for a given activity.\n \"\"\"\n def __init__(self, data: dict):\n self.rank = data[\"rank\"]\n self.score = data[\"score\"]\n\n def __str__(self) -> str:\n return f'{{\"rank\": {self.rank},\\n\"score\": {self.score}}}'\n\n def __repr__(self) -> str:\n self._rep_dict = {\"rank\": self.rank, \"score\": self.score}\n return str(self._rep_dict)\n\n\nclass BossEntry:\n \"\"\"BossEntry\n\n Class used to wrap up data for bosses so it can be easily accessed via object attributes.\n @:arg data dict: Highscores entries for a given boss.\n \"\"\"\n def __init__(self, data: dict):\n self.rank = data[\"rank\"]\n self.kc = data[\"score\"]\n\n def __str__(self) -> str:\n return f'{{\"rank\": {self.rank},\\n\"KC\": {self.kc}}}'\n\n def __repr__(self) -> str:\n self._rep_dict = {\"rank\": self.rank, \"kc\": self.kc}\n return str(self._rep_dict)\n\n\nclass Highscores:\n \"\"\"Highscores\n\n Class to fetch and represent a player's entries on the OSRS highscores.\n @:arg rsn str: The player's current Old School RuneScape username.\n @:arg target str: Category of the Highscores being queried. Defaults to\n 'default'\n Valid values: {'default', 'ironman', 'ultimate',\n 'hardcore_ironman', 'seasonal', 'deadman',\n 'tournament', 'fresh_start', 'skiller',\n 'skiller_defence'}\n \"\"\"\n\n def __init__(self, rsn: str, target='default'):\n self.rsn = rsn\n self.target = target\n self._parse_data(self._fetch_data())\n\n def _fetch_data(self) -> dict:\n max_retries = 5\n n_retries = 0\n while n_retries <= max_retries:\n self._response = requests.get(build_url(self.target, self.rsn))\n if self._response.status_code == 404:\n n_retries += 1\n if n_retries > max_retries:\n raise ValueError(f'Data for user {self.rsn} not found!')\n else:\n continue\n else:\n self.data = json.loads(self._response.content.decode('utf-8'))\n return self.data\n\n def _parse_data(self, data: dict):\n for i in range(len(data[\"skills\"])):\n try:\n self.__setattr__(entries_dict[data[\"skills\"][i][\"name\"]], SkillEntry(data[\"skills\"][i]))\n except KeyError:\n warn(f'Skill {data[\"skills\"][i][\"name\"]} at id: {i} not recognized.'\n f' Potential update to highscores found.')\n\n for i in range(len(activity_attributes)):\n try:\n self.__setattr__(entries_dict[data[\"activities\"][i][\"name\"]], ActivityEntry(data[\"activities\"][i]))\n except KeyError:\n warn(f'Activity {data[\"activities\"][i][\"name\"]} at id: {i} not recognized.'\n f' Potential update to highscores found.')\n\n for i in range(len(boss_attributes)):\n idx = len(activity_attributes) + i\n try:\n self.__setattr__(entries_dict[data[\"activities\"][idx][\"name\"]], BossEntry(data[\"activities\"][idx]))\n except KeyError:\n warn(f'Boss {data[\"activities\"][idx][\"name\"]} at id: {idx} not recognized.'\n f' Potential update to highscores found.')\n\n def update(self):\n self._parse_data(self._fetch_data())\n\n def get_all_scores(self):\n \"\"\"Returns a list of XP for all skills, score for all activities, and KC for all bosses.\n\n :return: list of ints with length n_entries.\n \"\"\"\n score_list = []\n for i in range(len(skill_attributes)):\n _entry = self.__getattribute__(skill_attributes[i]).xp\n if _entry <= 0:\n _entry = 0\n score_list.append(_entry)\n for i in range(len(activity_attributes)):\n _entry = self.__getattribute__(activity_attributes[i]).score\n if _entry <= 0:\n _entry = 0\n score_list.append(_entry)\n for i in range(len(boss_attributes)):\n _entry = self.__getattribute__(boss_attributes[i]).kc\n if _entry <= 0:\n _entry = 0\n score_list.append(_entry)\n\n return score_list\n\n def get_all_levels(self):\n \"\"\"Returns a list of all levels listed on the highscores for a given player. Any level not listed\n is assumed to be 1.\n \"\"\"\n levels = []\n for i in range(n_skills):\n _entry = self.__getattribute__(skill_attributes[i]).level\n if _entry <= 1:\n _entry = 1\n levels.append(_entry)\n return levels\n\n def get_all_xp(self):\n \"\"\"Returns a list of experience for all skills listed on the highscores for a given player. Any skill not\n listed is assumed to have 0 XP.\n \"\"\"\n xp = []\n for i in range(n_skills):\n _entry = self.__getattribute__(skill_attributes[i]).xp\n if _entry <= 0:\n _entry = 0\n xp.append(_entry)\n return xp\n\n def get_all_activity_scores(self):\n \"\"\"Returns a list of scores for all activities listed on the highscores for a given player. Any activity not\n listed is assumed to have a score of 0.\n \"\"\"\n scores = []\n for i in range(n_activities):\n _entry = self.__getattribute__(activity_attributes[i]).score\n if _entry <= 0:\n _entry = 0\n scores.append(_entry)\n return scores\n\n def get_all_kc(self):\n \"\"\"Returns a list of KC for all bosses listed on the highscores for a given player. Any boss not\n listed is assumed to have 0 KC.\n \"\"\"\n kc = []\n for i in range(n_bosses):\n _entry = self.__getattribute__(boss_attributes[i]).kc\n if _entry <= 0:\n _entry = 0\n kc.append(_entry)\n return kc\n\n # TODO handle ordering of attributes in __str__ and __repr__\n def __repr__(self) -> dict:\n self._rep_dict = dict()\n for k in entries_dict:\n self._rep_dict[entries_dict[k]] = self.__getattribute__(entries_dict[k])\n _rep = dict()\n _rep[self.rsn] = self._rep_dict\n return _rep\n\n def __str__(self):\n return pformat(self.__repr__())\n","repo_name":"cdfisher/osrs_tools","sub_path":"osrs_tools/highscores.py","file_name":"highscores.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26281339021","text":"\r\nfrom rltools.FARLBasic import *\r\n\r\n#from Environments.CartPoleEnvironment import CartPoleEnvironment\r\nfrom Environments.CartPoleEnvironmentG import CartPoleEnvironment\r\n\r\nfrom rltools.lwprQ import lwprQ\r\nfrom rltools.kNNSCIPY import kNNQ\r\nfrom rltools.RNeuroQ import RNeuroQ\r\nfrom rltools.NeuroQ import NeuroQ\r\nfrom rltools.ActionSelection import *\r\nimport pickle\r\n#from pylab import *\r\n\r\n\r\ndef CartPoleExperiment(Episodes=100,nk=0):\r\n\r\n print()\r\n print('===================================================================')\r\n print(' INIT EXPERIMENT','k='+str(nk+1))\r\n\r\n # results of the experiment\r\n x = list(range(1,Episodes+1))\r\n y =[]\r\n yr =[]\r\n\r\n #Build the Environment\r\n Env = CartPoleEnvironment()\r\n\r\n # Build a function approximator\r\n #Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=[2,3,10,2],npoints=False,k=1,alpha=0.25)\r\n Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=[2+7,3+7,10+3,2+7],npoints=False,k=4,alpha=0.3,lm=0.95)\r\n #Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=False,npoints=100,k=4,alpha=0.3,lm=0.95)\r\n #Q = lwprQ(nactions=Env.nactions,input_ranges=Env.input_ranges)\r\n #Q = RNeuroQ(Env.nactions, Env.input_ranges, 200, Env.reward_ranges,alpha=0.3)\r\n #Q = NeuroQ(Env.nactions, Env.input_ranges, 100, Env.reward_ranges,Env.deep_in,Env.deep_out,alpha=0.3)\r\n # Get the Action Selector\r\n As = e_greedy_selection(epsilon=0.0)\r\n #As = e_softmax_selection(epsilon=0.3)\r\n\r\n #Build the Agent\r\n CP = FARLBase(Q,Env,As,gamma=1.0)\r\n CP.Environment.graphs=True\r\n\r\n\r\n for i in range(Episodes):\r\n result = CP.SARSAEpisode(1000)\r\n #result = CP.QLearningEpisode(1000)\r\n CP.SelectAction.epsilon = CP.SelectAction.epsilon * 0.9\r\n CP.PlotLearningCurve(i,result[1],CP.SelectAction.epsilon)\r\n print('Episode;', str(i),'Total Reward:',str(result[0]),'Steps:',str(result[1]))\r\n y.append(result[1])\r\n yr.append(result[0])\r\n## if i==50:\r\n## miny =min(y)\r\n## figure(i)\r\n## plot(range(1,len(y)+1),y,'k')\r\n## title(r'$ k = 4, \\quad \\lambda=0.95,\\quad \\epsilon=0 , \\quad \\alpha=0.3 $')\r\n## grid('on')\r\n## axis([1, i, 0, 1100])\r\n## xlabel('Episodes')\r\n## ylabel('Steps')\r\n## savefig('cpresultdiscrete.pdf')\r\n## print \"salvado\"\r\n## close(i)\r\n\r\n\r\n\r\n CP.LearningCurveGraph.display.visible = False\r\n\r\n return [[x,y,nk],[x,yr,nk]]\r\n\r\ndef Experiments():\r\n results1=[]\r\n results2=[]\r\n for i in range(0,10):\r\n x = CartPoleExperiment(Episodes=200,nk=i)\r\n results1.append( x[0] )\r\n results2.append( x[1] )\r\n\r\n pickle.dump(results1,open('discretecartpolestepscq.dat','w'))\r\n pickle.dump(results2,open('discretecartpolerewardcq.dat','w'))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #Experiments()\r\n x = CartPoleExperiment(100,3)\r\n pickle.dump(x[0],open('discretecartpolesteps.dat','w'))\r\n\r\n","repo_name":"jamartinh/ReinforcementLearning","sub_path":"FAReinforcement/CartPoleDemoDiscrete.py","file_name":"CartPoleDemoDiscrete.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39405346122","text":"\"\"\"\n * @FileName: s9_palindrome_number.py\n * @Author: zzc\n * @Date: 2020年01月26日 12:30:40\n * @Version V1.0.0\n\"\"\"\n\n\"\"\"\n判断一个整数是否是回文数。回文数是指正序(从左向右)和倒序(从右向左)读都是一样的整数。\n\n示例1:\n输入: 121\n输出: true\n\n示例2:\n输入: -121\n输出: false\n解释: 从左向右读, 为 -121 。 从右向左读, 为 121- 。因此它不是一个回文数。\n\n示例3:\n输入: 10\n输出: false\n解释: 从右向左读, 为 01 。因此它不是一个回文数。\n\"\"\"\n\n\nclass Solution:\n @staticmethod\n def is_palindrome_1(x: int):\n if x < 0:\n return False\n x = str(x)\n for i in range(len(x)):\n if x[i] != x[len(x) - i - 1]:\n return False\n if i == int(len(x) / 2):\n break\n return True\n\n @staticmethod\n def is_palindrome_2(x: int):\n li1 = list(str(x))\n li2 = list(str(x))\n li2.reverse()\n return li1 == li2\n\n @staticmethod\n def is_palindrome_3(x: int):\n return str(x) == str(x)[::-1]\n\n\nif __name__ == '__main__':\n print(Solution.is_palindrome_1(112211))\n print(Solution.is_palindrome_2(112211))\n print(Solution.is_palindrome_2(112211))\n","repo_name":"285220927/leetcode","sub_path":"easy/s9_palindrome_number.py","file_name":"s9_palindrome_number.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5446145771","text":"import numpy as np\nfrom collections import Counter\nimport sys\nsys.path.append('../..')\n\nfrom LIB.EVAL.meteor import Meteor\nfrom LIB.EVAL.bleu import compute_bleu, diverse_bleu\nfrom LIB.EVAL.rouge import compute_rouge_L\n\n\ndef convert_to_words(map_to_orig, question):\n \"\"\"convert bert word pieces to normal tokens\"\"\"\n i = 0\n words = []\n while i < len(question):\n end = i + 1\n word = question[i]\n for j in range(i + 1, len(question)):\n if j - i > 17:\n break\n key = tuple(question[i:j])\n if key in map_to_orig:\n word = map_to_orig[key]\n end = j\n i = end\n words.append(word.replace(\"##\", \"\"))\n return words\n\n\ndef convert_tokens_seq(eval_file, qa_id, symbols, probs, id2word, map_to_orig):\n \"id sequence to token sequence\"\n def _get_penalty(syms):\n trigrams = [tuple(syms[i: i + 3]) for i in range(len(syms) - 2)]\n repeat_trigram = list(filter(lambda x: x > 1, list(Counter(trigrams).values()))) != []\n return repeat_trigram\n\n answer_dict = {}\n for qid, prob, bsyms in zip(qa_id, probs, zip(*symbols)):\n answers = []\n try:\n bsyms = zip(*bsyms)\n except:\n bsyms = [bsyms]\n for p, syms in zip(prob, bsyms):\n context_tokens = eval_file[str(qid)][\"paragraph\"]\n if 102 in syms:\n syms = syms[:syms.index(102)]\n syms = [id2word[sym] if sym in id2word\n else context_tokens[sym - len(id2word)] for sym in syms]\n tokens = convert_to_words(map_to_orig, syms)\n answer = u' '.join(tokens)\n lp, penalty = len(tokens) + 1, 0.\n if _get_penalty(tokens):\n penalty = 1.0\n answers.append((p / lp - penalty, answer))\n answer_dict[str(qid)] = answers\n return answer_dict\n\n\ndef evaluate(eval_file, answer_dict):\n reference_corpus = []\n translation_corpus = []\n translation_corpus_rouge_oracle = []\n translation_corpus_bleu_oracle = []\n rouges = []\n div_bleus = []\n rouges_oracle = []\n meteor = Meteor()\n res, res_oracle, gts = [], [], []\n for key, answers in answer_dict.items():\n answers = sorted(answers, key=lambda x: x[0], reverse=True)\n ground_truths = [list(map(lambda x: x.lower(), eval_file[key][\"question_eval\"]))]\n prediction = answers[0][1].lower().split()\n answers_tmp = []\n for i, answer in enumerate(answers):\n rouge = compute_rouge_L(answer[1].lower().split(), ground_truths)\n mete = meteor.compute_score([[' '.join(ground_truth) for ground_truth in ground_truths]],\n [' '.join(answer[1].lower().split())])\n bleu = compute_bleu([ground_truths], [answer[1].lower().split()], smooth=True)\n answers_tmp.append((rouge, mete[0], bleu[0], answer[0], answer[1]))\n answers_rouge = sorted(answers_tmp, key=lambda x: x[0], reverse=True)\n answers_mete = sorted(answers_tmp, key=lambda x: x[1], reverse=True)\n answers_bleu = sorted(answers_tmp, key=lambda x: x[2], reverse=True)\n prediction_rouge_oracle = answers_rouge[0][4].lower().split()\n prediction_mete_oracle = answers_mete[0][4].lower().split()\n prediction_bleu_oracle = answers_bleu[0][4].lower().split()\n translation_corpus.append(prediction)\n translation_corpus_rouge_oracle.append(prediction_rouge_oracle)\n translation_corpus_bleu_oracle.append(prediction_bleu_oracle)\n reference_corpus.append(ground_truths)\n rouge = compute_rouge_L(prediction, ground_truths)\n rouge_oracle = compute_rouge_L(prediction_rouge_oracle, ground_truths)\n rouges.append(rouge)\n rouges_oracle.append(rouge_oracle)\n res.append(' '.join(prediction))\n res_oracle.append(' '.join(prediction_mete_oracle))\n gts.append([' '.join(ground_truth) for ground_truth in ground_truths])\n div_bleus.append(diverse_bleu(answers))\n bleu = compute_bleu(reference_corpus, translation_corpus)\n bleu_oracle = compute_bleu(reference_corpus, translation_corpus_bleu_oracle)\n mete = meteor.compute_score(gts, res)\n mete_oracle = meteor.compute_score(gts, res_oracle)\n return {\"bleu\": bleu[0] * 100, \"meteor\": mete[0] * 100, \"rougeL\": np.mean(rouges) * 100,\n \"bleu_oracle\": bleu_oracle[0] * 100, \"meteor_oracle\": mete_oracle[0] * 100,\n \"rougeL_oracle\": np.mean(rouges_oracle) * 100, \"diverse_bleu\": np.mean(div_bleus) * 100}\n\n\ndef evaluate_simple(eval_file, answer_dict):\n reference_corpus = []\n translation_corpus = []\n rouges = []\n meteor = Meteor()\n res, gts = [], []\n for key, answers in answer_dict.items():\n answers = sorted(answers, key=lambda x: x[0], reverse=True)\n ground_truths = [list(map(lambda x: x.lower(), eval_file[key][\"question_eval\"]))]\n prediction = answers[0][1].lower().split()\n translation_corpus.append(prediction)\n reference_corpus.append(ground_truths)\n rouge = compute_rouge_L(prediction, ground_truths)\n rouges.append(rouge)\n res.append(' '.join(prediction))\n gts.append([' '.join(ground_truth) for ground_truth in ground_truths])\n bleu = compute_bleu(reference_corpus, translation_corpus)\n mete = meteor.compute_score(gts, res)\n return {\"bleu\": bleu[0] * 100, \"meteor\": mete[0] * 100, \"rougeL\": np.mean(rouges) * 100}\n\n","repo_name":"ZhangShiyue/QGforQA","sub_path":"QG/BERT_QG/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"57"} +{"seq_id":"30440046742","text":"import gym\nimport numpy as np\nimport torch\n\nclass Env():\n def __init__(self,\n device,\n env_name='CartPole-v0'\n ):\n '''\n Initializes Env object.\n\n The state field is always stored as a tensor,\n updated immediately after receiving the next\n state from the gym environment.\n '''\n self.device = device\n self.env = gym.make(env_name)\n self.state = self.env.reset()\n self.state = Env.state_to_tensor(self.state)\n self.done = False\n\n self.num_actions = self.env.action_space.n\n self.obs_space = self.env.observation_space.shape[0]\n\n def reset(self):\n '''\n Resets the environment.\n '''\n self.env.reset()\n self.done = False\n\n def play_action(self, action):\n '''\n Plays the given action in the environment.\n\n State is updated to tensor-form and returned.\n '''\n self.state, reward, self.done, info = self.env.step(action.item())\n self.state = Env.state_to_tensor(self.state).to(self.device)\n reward = torch.tensor([reward], device=self.device)\n return self.state, reward, self.done, info\n \n @staticmethod\n def state_to_tensor(state):\n '''\n Static method that converts state tuple to pytorch tensor.\n\n If the state is the initial state, it will be a single\n numpy float--we construct a pytorch tensor with the other\n fields set to 0.\n '''\n if type(state) is np.float64:\n arr = np.zeros(4)\n arr[0] = state\n return torch.tensor(arr, dtype=torch.float64).unsqueeze(0)\n else:\n return torch.tensor(state, dtype=torch.float64).unsqueeze(0)\n\n def render(self):\n '''\n Wrapper for gym render.\n '''\n self.env.render()\n\n def close(self):\n '''\n Closes the environment.\n '''\n self.env.close()\n","repo_name":"davidmkwon/rl","sub_path":"src/cartpole/dqn2/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13020287439","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.impute import SimpleImputer, KNNImputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import StandardScaler\n\n\nclass MissingValueHandler:\n \"\"\"\n A class to handle missing values in proteomics data.\n\n Attributes:\n -----------\n data : pandas.DataFrame\n The input data containing missing values.\n\n Methods:\n --------\n drop_rows_with_missing_values(threshold: float = None) -> pandas.DataFrame\n Removes rows containing any missing values or based on a threshold.\n\n impute_missing_values(strategy: str = 'mean') -> pandas.DataFrame\n Imputes missing values using SimpleImputer with specified strategy.\n\n knn_impute_missing_values(n_neighbors: int = 5) -> pandas.DataFrame\n Imputes missing values using KNNImputer with specified number of neighbors.\n\n max_value_imputation() -> pandas.DataFrame\n Imputes missing values using the maximum value in each column.\n\n min_value_imputation() -> pandas.DataFrame\n Imputes missing values using the minimum value in each column.\n\n distribution_imputation(std_multiplier: float = 1) -> pandas.DataFrame\n Imputes missing values based on the distribution of the data.\n \"\"\"\n\n @staticmethod\n def remove_rows_below_threshold(\n data: pd.DataFrame, threshold: float = 0.3\n ) -> pd.DataFrame:\n \"\"\"\n Removes rows containing missing values above the given threshold.\n\n Parameters:\n -----------\n threshold : float, optional\n The percentage threshold of non-missing values required in a row to be kept. (default is 0.8)\n\n Returns:\n --------\n pandas.DataFrame\n Data with rows containing missing values above the threshold removed.\n \"\"\"\n return data.dropna(thresh=threshold * len(data.columns))\n\n @staticmethod\n def simple_impute_values(\n data: pd.DataFrame, strategy: str = \"mean\"\n ) -> pd.DataFrame:\n \"\"\"\n Imputes missing values using SimpleImputer with specified strategy.\n\n Parameters:\n -----------\n strategy : str, optional\n The imputation strategy, can be 'mean', 'median', or 'most_frequent'. (default is 'mean')\n\n Returns:\n --------\n pandas.DataFrame\n Imputed data with missing values filled.\n \"\"\"\n imputer = SimpleImputer(missing_values=np.nan, strategy=strategy)\n imputed_data = imputer.fit_transform(data)\n return pd.DataFrame(imputed_data, columns=data.columns)\n\n @staticmethod\n def knn_impute_missing_values(\n data: pd.DataFrame, n_neighbors: int = 5\n ) -> pd.DataFrame:\n \"\"\"\n Imputes missing values using KNNImputer with specified number of neighbors.\n\n Parameters:\n -----------\n n_neighbors : int, optional\n Number of neighboring samples to use for imputing missing values. (default is 5)\n\n Returns:\n --------\n pandas.DataFrame\n Imputed data with missing values filled.\n \"\"\"\n imputer = KNNImputer(n_neighbors=n_neighbors)\n imputed_data = imputer.fit_transform(data)\n return pd.DataFrame(imputed_data, columns=data.columns, index=data.index)\n\n @staticmethod\n def max_impute_values(data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Imputes missing values using the maximum value in each column.\"\"\"\n max_values = data.max()\n imputed_data = data.fillna(max_values)\n return imputed_data\n\n @staticmethod\n def min_impute_values(data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Imputes missing values using the minimum value in each column.\"\"\"\n # One tenth of the minimum value\n min_values = data.min() / 10\n imputed_data = data.fillna(min_values)\n return imputed_data\n\n @staticmethod\n def distribution_impute_values(\n data: pd.DataFrame, std_multiplier: float = 1\n ) -> pd.DataFrame:\n \"\"\"\n Imputes missing values based on the distribution of the data.\n\n Parameters:\n -----------\n std_multiplier : float, optional\n Standard deviation multiplier for generating imputed values. (default is 1)\n\n Returns:\n --------\n pandas.DataFrame\n Imputed data with missing values filled.\n \"\"\"\n\n def fillna_with_distribution(\n col: pd.Series, std_multiplier: float\n ) -> pd.Series:\n mean = col.mean()\n std = col.std()\n null_count = col.isnull().sum()\n if null_count > 0:\n col.fillna(\n value=np.random.normal(\n loc=mean, scale=std * std_multiplier, size=null_count\n ),\n inplace=True,\n )\n return col\n\n imputed_data = data.apply(fillna_with_distribution, args=(std_multiplier,))\n return imputed_data\n\n @staticmethod\n def impute_missing_values(data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Imputes missing values in a pandas DataFrame using scikit-learn's SimpleImputer and KNNImputer classes.\n\n Args:\n data (pd.DataFrame): The DataFrame to impute missing values in.\n\n Returns:\n pd.DataFrame: The imputed DataFrame.\n \"\"\"\n # Define the imputation methods\n numerical_imputer = SimpleImputer(strategy=\"mean\")\n categorical_imputer = KNNImputer(n_neighbors=5)\n\n # Define the column transformer\n transformer = ColumnTransformer(\n transformers=[\n (\n \"num\",\n numerical_imputer,\n data.select_dtypes(include=\"number\").columns,\n ),\n (\n \"cat\",\n categorical_imputer,\n data.select_dtypes(include=\"object\").columns,\n ),\n ]\n )\n\n # Define the pipeline\n pipeline = Pipeline([(\"imputer\", transformer), (\"scaler\", StandardScaler())])\n # Fit the pipeline to the data\n pipeline.fit(data)\n # Transform the data\n imputed_data = pipeline.transform(data)\n # Convert the transformed data back to a DataFrame\n imputed_data = pd.DataFrame(imputed_data, columns=data.columns)\n\n return imputed_data\n","repo_name":"Ran485/STAVER","sub_path":"staver/MissingValueHandler.py","file_name":"MissingValueHandler.py","file_ext":"py","file_size_in_byte":6453,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"41615772792","text":"from collections import Counter, defaultdict\nimport os\n\nscreen = [[0 for _ in range(50)] for _ in range(6)]\n\n\ndef print_screen():\n for y in range(len(screen)):\n print(''.join([('#' if v else '.') for v in screen[y]]))\n\n\ntest = '''rect 3x2\nrotate column x=1 by 1\nrotate row y=0 by 4\nrotate column x=1 by 1'''\n\nres = defaultdict(Counter)\n\ntxt_name = __file__.split(os.sep)[-1].replace('.py', '.txt')\n#for line in test.split('\\n'):\nfor line in open(txt_name).readlines():\n line = line.strip()\n if line.startswith('rect '):\n x, y = line.split(' ')[1].split('x')\n x = int(x)\n y = int(y)\n for xx in range(x):\n for yy in range(y):\n screen[yy][xx] = 1\n elif line.startswith('rotate row y='):\n a, b = line.split('=')[1].split(' by ')\n a = int(a)\n b = int(b)\n cp = [v for v in screen[a]]\n for i, c in enumerate(cp):\n screen[a][(i + b) % len(screen[0])] = c\n elif line.startswith('rotate column x='):\n a, b = line.split('=')[1].split(' by ')\n a = int(a)\n b = int(b)\n cp = [screen[i][a] for i in range(len(screen))]\n for i, c in enumerate(cp):\n screen[(i + b) % len(screen)][a] = c\n else:\n print(line)\n assert False\n print(line)\n print_screen()\n\n\n\n\nprint('\\n\\n', sum([sum(lst) for lst in screen]))\n\n'''rect AxB\nrotate row y=A by B\nrotate column x=A by B'''","repo_name":"vashu1/data_snippets","sub_path":"_tasks/advent2016/d08.py","file_name":"d08.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"6095600674","text":"# The examples in this file come from the Flask-SQLAlchemy documentation\n# For more information take a look at:\n# http://flask-sqlalchemy.pocoo.org/2.1/quickstart/#simple-relationships\n\nfrom datetime import datetime\n\nfrom rest_api_demo.database import db\n\n\nclass Author(db.Model):\n __tablename__ = 'authors'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.String(50), nullable=False)\n last_name = db.Column(db.String(50), nullable=False)\n fullname = db.column_property(first_name + \" \" + last_name)\n\n # books = db.relationship('Book', back_populates='author', cascade='all, delete-orphan')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n def __repr__(self):\n return f'{self.first_name} {self.last_name}'\n\n\nclass Book(db.Model):\n __tablename__ = 'books'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(50), nullable=False)\n isbn = db.Column(db.String(13), nullable=False, unique=True)\n number_of_pages = db.Column(db.Integer, nullable=False)\n published_date = db.Column(db.Date, nullable=False)\n description = db.Column(db.Text)\n\n author_id = db.Column(db.Integer, db.ForeignKey('authors.id'), nullable=False)\n # author = db.relationship('Author', back_populates='books')\n\n # author_id = db.Column(db.Integer, db.ForeignKey('authors.id'), nullable=False)\n # author = db.relationship('Author', back_populates='books')\n author = db.relationship('Author', backref=db.backref('books', lazy='dynamic'))\n\n def __init__(self, title, isbn, number_of_pages, description, author, published_date=None):\n self.title = title\n self.isbn = isbn\n self.number_of_pages = number_of_pages\n self.description = description\n self.author = author\n if published_date is None:\n self.published_date = datetime.utcnow()\n\n def __repr__(self):\n return f'{self.title} - {self.author.first_name} {self.author.last_name}'\n\n @staticmethod\n def additional_validation(param: str, value: str) -> str:\n return value\n","repo_name":"codedevspb/el_test","sub_path":"rest_api_demo/library/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8410531147","text":"# Over 1170 at the diner\n# Edward Freeman\nffile=open(\"TextFiles/thediner.txt\",\"r\") # open the file thediner.txt to read and refer to it as ffile\nffile2=open(\"TextFiles/over1170.txt\",\"w\") # open a new file called over 1170.txt for writing purposes.\n # and refer to this file as ffile2.\nfor line in ffile: # for every line in the file (until the end), do the indented lines.\n fields=line.split(\",\") # start a new field every time you reach a comma.\n ddate=(fields[0]) # ddate is the first field in the record (fields[0]), a text field\n breakfasts=int(fields[1]) # breakfasts is the second field in the record and is an integer\n lunches=int(fields[2]) # lunches is the third field in the record and is an integer\n if(5*breakfasts+7*lunches>1170): # Standard if statement. If receipts are more than 1170\n print(ddate) # print the date in Python\n ffile2.write(ddate) # Write the date to ffile2, over1170.txt.\n ffile2.write(\"\\n\") # Start a new line in ffile2, over1170.txt\nffile.close() # Close the data file\nffile2.close() # Close the file with the results.\nprint(\"That's all folks.\") # Means nobody met the criteria or a mistake was made\n","repo_name":"Shehu-Muhammad/Python_College_Stuff","sub_path":"Python Stuff/DinerRead.py","file_name":"DinerRead.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71244750259","text":"import os\nimport urllib.parse as up\nfrom flask import url_for\nfrom flask_jwt_extended import JWTManager\nfrom flask_script import Manager\n\nfrom src import db, ma, create_app, configs, api\n\nconfig = os.environ.get('PYTH_SRVR', 'default')\n\nconfig = configs.get(config)\n\nextensions = [db, ma, api]\n\napp = create_app(__name__, config, extensions=extensions)\n\njwt = JWTManager(app)\n\nmanager = Manager(app)\n\n\n@manager.shell\ndef _shell_context():\n return dict(\n app=app,\n db=db,\n ma=ma,\n config=config\n )\n\n\n@manager.command\ndef list_routes():\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = up.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n\n for line in sorted(output):\n print(line)\n\n\nif __name__ == \"__main__\":\n with app.app_context():\n db.create_all()\n manager.run()\n","repo_name":"Msmohits/Library_management","sub_path":"library_management/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31966229060","text":"from django.shortcuts import render\nfrom .models import News\n\n\n# Create your views here.\ndef index(request):\n content_berita = News.objects.all()\n context = {\n \"Content_Announ\" : content_berita.filter(content_category=\"announcement\"),\n \"Content_Event\" : content_berita.filter(content_category=\"incoming_event\"),\n \"Content_News\" : content_berita.filter(content_category=\"hot_news\"),\n \"creator\" : \"Maulana Aji W.\",\n \"page_name\" : \"News\",\n \"site_nav\" : [\n [\"/\", \"Home\"],\n [\"/animelist\", \"Explorer\"],\n [\"/community\", \"Community\"],\n [\"/news\", \"News\"],\n [\"/user_profile\", \"Profile\"],\n ],\n \"website\" : \"My Anime List\",\n }\n return render(request, \"news/index.html\", context)\n\ndef news_detail(request, slug):\n content_berita = News.objects.get(slug=slug)\n context = {\n \"Contents\" : content_berita,\n \"page_name\" : \"News\",\n \"site_nav\" : [\n [\"/\", \"Home\"],\n [\"/animelist\", \"Explorer\"],\n [\"/community\", \"Community\"],\n [\"/news\", \"News\"],\n [\"/user_profile\", \"Profile\"],\n ],\n \"website\" : \"My Anime List\",\n }\n return render(request, \"news/news_detail.html\", context)\n\n# Note:\n# Konten berita di dalam apps News terdapat 3 kategori yaitu:\n# 1. incoming_event\n# 2. hot_news\n# 3. announcement\n# keyboard error g, G, h, H, '_', \"_\"\n","repo_name":"Maulanawesome5/crud_app","sub_path":"news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13417673004","text":"import heapq\nfrom collections import defaultdict\nfrom collections import deque\nfrom time import time\nimport math\nfrom time import perf_counter\nclass node:\n def __init__(self,node):\n self.node = node\n\n def name(self):\n return self.node\n\n\n def __repr__(self):\n return \"node()\"\n def __str__(self):\n return self.node\nclass edge:\n def __init__(self, edge, weight):\n self.edge = edge\n self.weight = weight\n def set_weight(self, weight):\n self.weight = weight\n def get_weight(self,start,adjecent):\n return self.weight\nclass Graph:\n def __init__(self):\n self.graph = defaultdict(list)\n self.map1 = defaultdict()\n\n \n def add_node(self, vertex): #adding vertex to the graph\n self.graph[vertex.node].append(vertex)\n\n def make_edge(self, node1, node2, weight=0):\n if node1 not in self.map1:\n self.map1[node1] = node(node1)\n if node2 not in self.map1:\n self.map1[node2] = node(node2)\n n1, n2 = node1, node2\n\n node1 = self.map1[node1]\n node2 = self.map1[node2]\n \n self.graph[self.map1[n1]].append((node2, weight))\n self.graph[self.map1[n2]].append((node1, weight))\n \n \n\n \n\n def __iter__(self):\n return iter(self.graph.values())\n\n\ndef breadth_first_search(graph,start):\n start_time=time()*10000\n path=[]\n visited=set() #inorder to have unrepeated visited nodes\n queue= deque()\n visited.add(start)\n queue.append(start)\n \n while(len(queue)>0):\n visited_node=queue.popleft()\n path.append(visited_node)\n for adjecent_node in graph.graph:\n if adjecent_node.name() not in visited:\n visited.add(adjecent_node.name())\n queue.append(adjecent_node.name())\n end_time=time()*10000\n time_required=end_time - start_time\n print(\" -> \".join(path))\n print(\"\\n\")\n print(\"$$$$$$ time $$$$$$\")\n \n print(\"required time for breadth first search =\",time_required ,\"X 10^-4 second\")\n print(\"\\n\")\n \ndef depth_first_search(graph,start):\n start_time=time()*10000\n path=[]\n visited=set() #inorder to have unrepeated visited nodes\n stack= []\n visited.add(start)\n stack.append(start)\n while(len(stack)>0):\n visited_node=stack.pop()\n path.append(visited_node)\n\n for node in graph.graph[graph.map1[visited_node]]:\n adjecent_node, weight = node\n if adjecent_node.name() not in visited:\n visited.add(adjecent_node.name())\n stack.append(adjecent_node.name()) \n \n end_time=time()*10000\n time_required = end_time - start_time\n print(\" -> \".join(path))\n print(\"\\n\")\n print(\"$$$$$$ time $$$$$$\")\n \n print(\"required time for depth first search =\",time_required ,\"X 10^-4 second\")\n print(\"\\n\") \n\n \n\ndef dijkastra(graph,start,goal):\n \n visited=set() \n heap=[(0,start,start)]\n while heap:\n weight,node,path=heapq.heappop(heap)\n \n respective_node = graph.map1[node]\n if respective_node.name()== goal:\n return path\n if node not in visited:\n visited.add(node) \n for n in graph.graph[respective_node]:\n new_node, w = n\n if new_node.name() not in visited:\n cost = float(w) + weight\n newpath=str(path)+\"<->\"+str(str(new_node))\n \n heapq.heappush(heap,(cost,new_node.name(), newpath))\n\n\n\n return []\n\n\n\n\ndef heuristic_function(node):\n \n heuristic={}\n with open('heuristic.txt','r',encoding=\"utf-8\") as f:\n lines = f.readlines()\n for li in lines:\n words=li.split(\" \")\n result1=math.pow(float(words[1]), 2)+math.pow(float(words[2]), 2)\n result=math.sqrt(result1)\n \n heuristic[words[0]] = result\n return heuristic[node]\n \n\ndef A_Star(graph, start, goal):\n \n visited=set()\n initial = heuristic_function(start)\n heap=[(initial,0,start,start)]\n while heap:\n total, weight,node,path=heapq.heappop(heap)\n respective_node = graph.map1[node]\n if respective_node.name()== goal:\n return path\n if node not in visited:\n visited.add(node) \n for n in graph.graph[respective_node]:\n new_node, w = n\n if new_node.name() not in visited:\n cost = float(w) + weight\n new_total = cost + heuristic_function(new_node.name())\n newpath=str(path)+\"<->\"+str(str(new_node))\n \n heapq.heappush(heap,(new_total, cost,new_node.name(), newpath))\n return [] \n\ngraph = Graph()\n\nwith open('file.txt','r',encoding=\"utf-8\") as file:\n lines = file.readlines()\n for line in lines:\n words=line.split(\" \")\n graph.make_edge(words[0],words[1],words[2])\n\n\n\nstart=\"eforie\"\nend=\"neamt\"\nprint(\"breadth first search\")\nbreadth_first_search(graph,start)\nprint(\"###############\")\nprint(\"depth first search\")\ndepth_first_search(graph,start)\nprint(\"###############\")\nprint(\"djkastra \")\nstart_time = perf_counter()\nprint(dijkastra(graph,start,end))\nend_time=perf_counter()\nprint(\"The time requred for dijkastra algorithm\",end_time - start_time,\"seconds\" )\nprint(\"\\n\")\nprint(\"###############\")\n\nprint(\"A* shortest path search algorithm\")\nstart_time = perf_counter()\nprint(A_Star(graph,start,end))\nend_time=perf_counter()\nprint(\"The time requred for A* algorithm\",end_time - start_time,\"seconds\" )\n\n\n\n\n\n\n\n","repo_name":"sossyh/Search-Algoritms","sub_path":"Search_Algorithms.py","file_name":"Search_Algorithms.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6048401801","text":"from typing import Optional, Dict\n\nimport torch\nfrom omegaconf import DictConfig\nfrom torchmetrics import MetricCollection\n\nfrom flccpsisrc.common.model_training.pl_datamodule import PSIDataModule\nfrom flccpsisrc.common.model_training.pl_models.base_gpt2 import GPT2LMHead\nfrom flccpsisrc.common.model_training.single_token_metrics import AccuracyMRR\n\n\nclass PSIGPT2(GPT2LMHead):\n def __init__(self, config: DictConfig, actual_vocab_size: int):\n super().__init__(config, actual_vocab_size)\n\n def _get_metrics(self) -> MetricCollection:\n metrics = dict()\n for holdout in [\"train\", \"val\", \"test\"]:\n for node_type in [\"overall\", \"bpeleaf\", \"staticleaf\", \"nonleaf\"]:\n metrics[f\"{holdout}/{node_type}\"] = AccuracyMRR(\n ignore_index=self._config.model.labels_pad,\n top_k=5,\n shift=True,\n )\n return MetricCollection(metrics)\n\n def _compute_metrics(self, holdout: str) -> Dict[str, torch.Tensor]:\n res = dict()\n for node_type in [\"overall\", \"bpeleaf\", \"staticleaf\", \"nonleaf\"]:\n res.update(\n {f\"{holdout}/{node_type}_{k}\": v for k, v in self._metrics[f\"{holdout}/{node_type}\"].compute().items()}\n )\n return res\n\n def _update_metrics(self, logits: torch.Tensor, labels: torch.Tensor, holdout: str) -> Dict[str, torch.Tensor]:\n res = dict()\n datamodule: PSIDataModule = self.trainer.datamodule\n arbitrary_mask, static_leaf_mask, non_leaf_mask = datamodule.psi_facade.tokenizer.classify_ids(labels)\n\n res.update(self._update_metrics_with_mask(logits, labels, holdout, \"overall\", mask=None))\n res.update(self._update_metrics_with_mask(logits, labels, holdout, \"nonleaf\", mask=non_leaf_mask))\n res.update(self._update_metrics_with_mask(logits, labels, holdout, \"staticleaf\", mask=static_leaf_mask))\n res.update(self._update_metrics_with_mask(logits, labels, holdout, \"bpeleaf\", mask=arbitrary_mask))\n\n return res\n\n def _update_metrics_with_mask(\n self, logits: torch.Tensor, labels: torch.Tensor, holdout: str, node_type: str, mask: Optional[torch.Tensor]\n ) -> Dict[str, torch.Tensor]:\n if mask is not None:\n labels = labels.clone()\n labels[mask] = self._config.model.labels_pad\n return {\n f\"{holdout}/{node_type}_{k}\": v for k, v in self._metrics[f\"{holdout}/{node_type}\"](logits, labels).items()\n }\n","repo_name":"SokolovYaroslav/PSI-Transformer","sub_path":"flccpsisrc/common/model_training/pl_models/psi_gpt2.py","file_name":"psi_gpt2.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29739898982","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: api/bucketlist/tests/test_update_bucketlist.py\n# Author: Collins Abitekaniza \n# Date: 19.07.2017\n# Last Modified: 19.07.2017\nfrom .base import BaseTestCase\n\n\nclass UpdateBucketlistTestCase(BaseTestCase):\n\n def setUp(self):\n BaseTestCase.setUp()\n # create a bucket list for testing\n self.app.post('/bucketlists', data={\n \"title\": \"Awesome, beautiful list\",\n \"description\": \"lorem ipsum blah blah ...\",\n }, headers={\"token\": self.token})\n\n def test_update_bucketlist_successfully(self):\n initial_request = self.app.get('/bucketlists', headers={\n \"token\": self.token\n })\n # first confirm initial name\n self.assertEqual(initial_request.status_code, 200)\n self.assertTrue(\n \"awesome, beautiful list\" in initial_request.data.decode('utf-8').lower())\n\n # now send update request\n update_request = self.app.put(\"/bucketlists/1\", data={\n \"title\": \"Edited title over here\",\n \"description\": \"hohoohohohohohohohohoho\",\n }, headers={\"token\": self.token})\n self.assertEqual(update_request.status_code, 200)\n\n final_request = self.app.get('/bucketlists/1', headers={\n \"token\": self.token\n })\n self.assertEqual(final_request.status_code, 200)\n self.assertTrue(\n \"edited title over here\" in final_request.data.decode('utf-8').lower())\n\n def test_update_bucketlist_successfully_content(self):\n update_request = self.app.put(\"/bucketlists/1\", data={\n \"title\": \"Again edited our title, what ?\",\n \"description\": \"lorem blah oh my god\",\n }, headers={\"token\": self.token})\n self.assertEqual(update_request.status_code, 200)\n self.assertTrue(\n \"bucketlist updated successfully\" in update_request.data.decode('utf-8').lower())\n\n def test_update_bucketlist_no_form(self):\n update_request = self.app.put('/bucketlists/1', headers={\n 'token': self.token\n })\n self.assertEqual(update_request.status_code, 200)\n self.assertTrue(\n \"nothing to change\" in update_request.data.decode('utf-8').lower())\n\n def test_update_bucketlist_index_out_of_range(self):\n update_request = self.app.put(\"/bucketlists/49\", data={\n \"title\": \"Now this is not good\",\n \"description\": \"Why trying to overflow bucket\",\n }, headers={\"token\": self.token})\n self.assertEqual(update_request.status_code, 404)\n self.assertTrue(\"bucketlist not found\")\n\n def test_update_bucketlist_no_token(self):\n update_request = self.app.put(\"/bucketlists/1\", data={\n \"title\": \"Ok let me try without id\",\n \"description\": \"evil minds dor ipsum\"\n })\n self.assertEqual(update_request.status_code, 403)\n self.assertTrue(\n \"token required\" in update_request.data.decode('utf-8').lower())\n","repo_name":"collin5/bucketlist","sub_path":"api/bucketlist/tests/test_update_bucketlist.py","file_name":"test_update_bucketlist.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"6626983199","text":"import argparse\nimport pandas as pd\n\n\nprompt = \"Twitter Sentiment Analysis Examples.\\n\\n Tweet: {}\\n Sentiment: \"\nlabels = {1: \"positive\", 0: \"negative\"}\n\nsave_dir = \"./gpt3/input_data/\"\n\n\ndef load_data(split):\n filename = \"./twitter-datasets/full_\" + split + \".csv\"\n return pd.read_csv(filename)\n\n\ndef split_data(df, frac, seed):\n return df.sample(frac=frac, random_state=seed)\n\n\ndef prepare(df, is_test=False):\n cols = {}\n cols[\"prompt\"] = df[\"texts\"].apply(lambda x: prompt.format(x))\n if not is_test:\n cols[\"completion\"] = df[\"labels\"].apply(lambda x: labels[x])\n return pd.DataFrame(cols)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train_frac\", type=float)\n parser.add_argument(\"--seed\", type=int)\n args = parser.parse_args()\n\n # train\n df_train = load_data(\"train\")\n df_train = split_data(df_train, args.train_frac, args.seed)\n df_train = prepare(df_train)\n df_train.to_csv(\n save_dir\n + \"train_{}_{}.csv\".format(str(args.train_frac).replace(\".\", \"-\"), args.seed)\n )\n\n # val\n df_val = load_data(\"val\")\n df_val = prepare(df_val)\n df_val.to_csv(save_dir + \"val.csv\")\n\n # val_final\n df_val_final = load_data(\"val_final\")\n df_val_final = prepare(df_val_final)\n df_val_final.to_csv(save_dir + \"val_final.csv\")\n\n # test\n with open(\"twitter-datasets/test_data.txt\") as f:\n df_test = pd.DataFrame({\"texts\": list(f.readlines())})\n df_test = prepare(df_test, is_test=True)\n df_test.to_csv(save_dir + \"test.csv\")\n","repo_name":"antonschafer/cil-project","sub_path":"gpt3/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"41383720069","text":"# encoding: utf-8\n\n\"\"\"\n@author: you\n@site: \n@time: 2019/8/28 16:26\n\"\"\"\n\n# price = {\n# \t\"cny_price\": 0,\n# \t# \"hk_price\": 0,\n# \t# \"hk__cost_price\": 1200,\n# \t# \"cny__cost_price\": 1000\n# }\n# # print([price.keys()][0])\n#\n# print(price['dddd'])\n\ndict = {'Name': 'Zara', 'Age': 7}\ndict2 = {'Sex': 'female' }\ndict.update(dict2)","repo_name":"A-you/myaddons","sub_path":"membership/models/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"27244302532","text":"import numpy as np\nfrom psm.util import unit_ball_measure, is_on_manifold, Projection\nfrom psm.manifolds import ManifoldStack\nfrom psm.tasks import Task\nfrom psm.algorithms.rrt_star_manifold import RRTStarManifold\n\n\nclass IKRRTStar:\n def __init__(self,\n task: Task,\n cfg: dict):\n self.name = \"IK_RRT\"\n self.n_manifolds = len(task.manifolds)\n self.task = task\n self.start = task.start\n self.cfg = cfg\n self.n_samples = cfg['N']\n self.alpha = cfg['ALPHA']\n self.beta = cfg['BETA']\n self.eps = cfg['EPS']\n self.rho = cfg['RHO']\n self.r_max = cfg['R_MAX']\n self.collision_res = cfg['COLLISION_RES']\n self.d = task.d\n\n self.lim_lo = task.lim_lo\n self.lim_up = task.lim_up\n self.gamma = np.power(2 * (1 + 1.0 / float(self.d)), 1.0 / float(self.d)) * \\\n np.power(task.get_joint_space_volume() / unit_ball_measure(self.d), 1. / float(self.d))\n\n self.Q_near_ids = []\n self.G = None\n self.G_list = []\n self.V_goal = []\n self.V_goal_list = []\n self.path = None\n self.path_id = None\n\n # check if start point is on first manifold\n if not is_on_manifold(task.manifolds[0], task.start, self.eps):\n raise Exception('The start point is not on the manifold h(start)= ' + str(task.manifolds[0].y(task.start)))\n\n def run(self) -> bool:\n # iterate over sequence of manifolds\n q_start = self.start.copy()\n cost = 0.0\n path = []\n for n in range(self.n_manifolds - 1):\n print('######################################################')\n print('n', n)\n print('Active Manifold: ', self.task.manifolds[n].name)\n print('Target Manifold: ', self.task.manifolds[n + 1].name)\n\n # sample a goal configuration with IK\n curr_manifold = self.task.manifolds[n]\n next_manifold = ManifoldStack(manifolds=[self.task.manifolds[n], self.task.manifolds[n + 1]])\n\n ik_proj = Projection(f=next_manifold.y, J=next_manifold.J)\n\n res_plan = False\n max_goals = 10\n iter_goals = 0\n while not res_plan:\n res_proj = False\n while not res_proj:\n q_rand = self.task.sample()\n res_proj, q_goal = ik_proj.project(q_rand)\n if not self.task.is_valid_conf(q_goal):\n res_proj = False\n if self.task.is_collision_conf(q_goal):\n res_proj = False\n\n # plan path to goal configuration with RRT*\n rrt_task = Task('empty')\n rrt_task.start = q_start\n rrt_task.goal = q_goal\n rrt_task.obstacles = self.task.obstacles\n planner = RRTStarManifold(rrt_task, curr_manifold, self.cfg)\n path_idx, opt_path = planner.run()\n if path_idx:\n q_reached = planner.G.V[path_idx[-1]].value\n res_plan = np.linalg.norm(q_reached - q_goal) < self.eps\n else:\n res_plan = False\n\n if not res_plan:\n iter_goals += 1\n\n if iter_goals == max_goals:\n return False\n\n cost += planner.G.comp_opt_path(q_goal)\n path += [[planner.G.V[idx].value for idx in planner.G.path]]\n\n q_start = q_goal.copy()\n\n # store results for later evaluation\n self.G_list.append(planner.G)\n self.V_goal_list.append([0])\n\n self.path = path\n return True\n","repo_name":"etpr/sequential-manifold-planning","sub_path":"psm/algorithms/ik_rrt_star.py","file_name":"ik_rrt_star.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"57"} +{"seq_id":"39884768614","text":"import cv2\r\nimport MyLib as MyL\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport sys\r\n\r\nShowAll = False\r\nBGloop= 10\r\nPixNo = 25\r\nPixNo_1_2 = 12\r\nDirNo = 15\r\nDirAgl= [i*(180//DirNo) for i in range(DirNo)]\r\nRowNo = lambda row: row//PixNo\r\nColNo = lambda col: col//PixNo\r\nWW = 25\r\nFW = 12\r\nFeaNo = 21\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) != 3: \r\n print('Usage: {} image_name'.format(sys.argv[0]))\r\n print('Example: python3 PRL_load_model.py Model_Name For_PRL_a002_03.png') \r\n # python3 PRL_load_model_run.py test For_PRL_a002_08.png\r\n sys.exit(0)\r\n \r\n #\r\n # 1. load image (after 3x3 blur image)\r\n #\r\n print(\"1. Loading image... \"+sys.argv[2])\r\n img = cv2.imread(sys.argv[2],0) \r\n fm_base=np.asarray(img)\r\n fm=fm_base.copy()\r\n\r\n #\r\n # 2. display the original image\r\n #\r\n print(\"2. display image... \")\r\n plt.title(sys.argv[2][-11:])\r\n plt.imshow(fm, cmap=plt.cm.gray)\r\n ImgPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Images\\\\'+sys.argv[1]+'_'+sys.argv[2][-11:-4]+\"_1.png\"\r\n plt.savefig(ImgPath, dpi=600, bbox_inches='tight')\r\n if ShowAll: plt.show()\r\n\r\n #\r\n # 3. find foreground information\r\n #\r\n print(\"3. find foreground info... \")\r\n fpfg=MyL.FP_FindBG(plt, fm, False)\r\n\r\n #\r\n # 4. load model\r\n #\r\n print(\"4. Loading model/weights...\")\r\n from keras.models import model_from_yaml\r\n MdlSourPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Model\\\\'\r\n mdl_fn=MdlSourPath+sys.argv[1]+\".yaml\"\r\n wgt_fn=MdlSourPath+sys.argv[1]+\".h5\"\r\n yaml_file = open(mdl_fn)\r\n loaded_model_yaml = yaml_file.read()\r\n yaml_file.close()\r\n loaded_model = model_from_yaml(loaded_model_yaml)\r\n loaded_model.load_weights(wgt_fn)\r\n\r\n #\r\n # 5. prepare 21-feature\r\n # \r\n print(\"5. prepare 21-feature...\")\r\n height, width = fm.shape[0], fm.shape[1] \r\n NoHeight, NoWidth = RowNo(height), ColNo(width)\r\n\r\n X_data = np.full((NoHeight*NoWidth, FeaNo), 0, dtype=np.double)\r\n X_data_idx=np.full((NoHeight*NoWidth, 2), 0, dtype=int)\r\n fpdir = np.full((NoHeight,NoWidth), -1, dtype=float)\r\n fpdir_prob = np.full((NoHeight,NoWidth), -1, dtype=float)\r\n fpdir_porg = np.full((NoHeight,NoWidth), -1, dtype=float)\r\n k = -1\r\n for i in range(NoHeight):\r\n for j in range(NoWidth):\r\n if fpfg[i][j]:\r\n a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo\r\n img, k = fm[a:b,c:d], k+1 # img is a 25x25 blocks \r\n X_data_idx[k,0], X_data_idx[k,1] = i, j\r\n sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)\r\n sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)\r\n Vx, Vy = np.sum(2*sobelx*sobely), np.sum(sobelx**2-sobely**2)\r\n X_data[k,0], X_data[k,1], X_data[k,2] = Vx, Vy, Vy/Vx \r\n Reg1x, Reg1y= np.sum(2*sobelx[:,6:19]*sobely[:,6:19]), np.sum(sobelx[:,6:19]**2-sobely[:,6:19]**2)\r\n X_data[k,3], X_data[k,4], X_data[k,5] = Reg1x, Reg1y, Reg1y/Reg1x \r\n Reg2x, Reg2y= np.sum(2*sobelx[6:19,:]*sobely[6:19,:]), np.sum(sobelx[6:19,:]**2-sobely[6:19,:]**2)\r\n X_data[k,6], X_data[k,7], X_data[k,8] = Reg2x, Reg2y, Reg2y/Reg2x \r\n Reg3x, Reg3y= np.sum(2*sobelx[0:13, 0:13]*sobely[0:13,0:13]), np.sum(sobelx[0:13,0:13]**2-sobely[0:13,0:13]**2)\r\n X_data[k,9], X_data[k,10], X_data[k,11] = Reg3x, Reg3y, Reg3y/Reg3x \r\n Reg4x, Reg4y= np.sum(2*sobelx[0:13, 13:25]*sobely[0:13,13:25]), np.sum(sobelx[0:13,13:25]**2-sobely[0:13,13:25]**2)\r\n X_data[k,12], X_data[k,13], X_data[k,14] = Reg4x, Reg4y, Reg4y/Reg4x \r\n Reg5x, Reg5y= np.sum(2*sobelx[13:25, 0:13]*sobely[13:25,0:13]), np.sum(sobelx[13:25,0:13]**2-sobely[13:25,0:13]**2)\r\n X_data[k,15], X_data[k,16], X_data[k,17] = Reg5x, Reg5y, Reg5y/Reg5x \r\n Reg6x, Reg6y= np.sum(2*sobelx[13:25,13:25]*sobely[13:25,13:25]), np.sum(sobelx[13:25,13:25]**2-sobely[13:25,13:25]**2) \r\n X_data[k,18], X_data[k,19], X_data[k,20] = Reg6x, Reg6y, Reg6y/Reg6x \r\n \r\n #\r\n # 6. normalize and make prediction\r\n #\r\n print(\"6. normalize and make prediction...\") \r\n import sklearn.preprocessing as preprocessing\r\n X_data_normalize = preprocessing.normalize(X_data)\r\n prediction = loaded_model.predict_classes(X_data_normalize)\r\n\r\n #===========\r\n weightTBL = MyL.prepWeight()\r\n predict_proba = pd.DataFrame.from_records(loaded_model.predict_proba(X_data_normalize))\r\n prob_arr = predict_proba.as_matrix()\r\n prob_list = predict_proba.max(axis=1)\r\n #print(\"Prob 1: \",predict_proba.shape,type(predict_proba),\"predict_proba=====\")#,predict_proba)\r\n #print(\"Prob 2: \",prob_list.shape,\"prob_list=====\")\r\n #print(\"Prob 3: \",weightTBL.shape,\"weightTBL=====\")\r\n #print(\"Prob 4: \",prediction.shape,\"prediction=====\")\r\n #print(\"Prob 5: \",prob_arr.shape,\"prob_arr=====\")\r\n\r\n for i in range(prediction.shape[0]):\r\n fpdir[X_data_idx[i,0]][X_data_idx[i,1]] = prediction[i]\r\n fpdir_porg[X_data_idx[i,0]][X_data_idx[i,1]] = prob_list[i]\r\n fpdir_prob[X_data_idx[i,0]][X_data_idx[i,1]] = sum(weightTBL[prediction[i]]*prob_arr[i])\r\n #if i < 3:\r\n # print(i, prediction[i], prob_list[i], sum(weightTBL[prediction[i]]*prob_arr[i]))\r\n # for j in range(180):\r\n # print('{:2d} {:.3f} {:.3f}'.format(j, weightTBL[prediction[i],j], prob_arr[i,j]))\r\n # print(weightTBL[prediction[i],prediction[i]-10:prediction[i]+10], prob_list[i][prediction[i]-10:prediction[i]+10])\r\n\r\n\r\n #\r\n # 7. write line seg. on fingerprint \r\n #\r\n print(\"7. write line seg. on fingerprint...\") \r\n\r\n fmdir=MyL.UT_SetLine2(plt, fpfg, fpdir, fm.shape, False, 'black',3)\r\n\r\n plt.title(sys.argv[2][-11:])\r\n plt.imshow(fmdir, cmap=plt.cm.gray)\r\n ImgPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Images\\\\'+sys.argv[1]+'_'+sys.argv[2][-11:-4]+\"_2.png\"\r\n plt.savefig(ImgPath, dpi=600, bbox_inches='tight')\r\n if ShowAll: plt.show()\r\n \r\n #\r\n # 8. write line seg. \r\n #\r\n print(\"8. write line seg. ...\") \r\n axs=[[None for _ in range(NoWidth)]]*NoWidth\r\n MyL.UT_SetLine(plt, fpfg, fpdir, fm, axs, False, 'white',3)\r\n plt.title(sys.argv[2][-11:])\r\n plt.imshow(fm, cmap=plt.cm.gray)\r\n ImgPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Images\\\\'+sys.argv[1]+'_'+sys.argv[2][-11:-4]+\"_3.png\"\r\n plt.savefig(ImgPath, dpi=600, bbox_inches='tight')\r\n if ShowAll: plt.show()\r\n\r\n #\r\n # 9. tri-color image\r\n #\r\n print(\"9. Tri-color. image...\") \r\n fmt=np.full(fm.shape, 192, dtype=int)\r\n fmred=MyL.UT_SetTri(plt, fpfg, fpdir, fmt, 2)\r\n plt.title(sys.argv[2][-11:])\r\n plt.imshow(fmt, cmap=plt.cm.gray)\r\n ImgPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Images\\\\'+sys.argv[1]+'_'+sys.argv[2][-11:-4]+\"_5.png\"\r\n plt.savefig(ImgPath, dpi=600, bbox_inches='tight')\r\n #plt.show()\r\n plt.title(sys.argv[2][-11:])\r\n plt.imshow(fmred, cmap=plt.cm.gray)\r\n ImgPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Images\\\\'+sys.argv[1]+'_'+sys.argv[2][-11:-4]+\"_6.png\"\r\n plt.savefig(ImgPath, dpi=600, bbox_inches='tight')\r\n #plt.show()\r\n\r\n #\r\n # 10. prob image\r\n #\r\n print(\"10. Prob. image...\") \r\n fmprb=np.full(fm.shape, 192, dtype=int)\r\n MyL.UT_SetGray(plt, fpfg, fmprb, fpdir_prob)\r\n plt.title(sys.argv[2][-11:])\r\n plt.imshow(fmprb, cmap=plt.cm.gray)\r\n ImgPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Images\\\\'+sys.argv[1]+'_'+sys.argv[2][-11:-4]+\"_4.png\"\r\n plt.savefig(ImgPath, dpi=600, bbox_inches='tight')\r\n #plt.show()\r\n\r\n #\r\n # 11. pixel three color image\r\n #\r\n print(\"11. Tri-color image...\") \r\n print(\"11.a. prepare 21-feature...\")\r\n height, width = fm.shape[0], fm.shape[1] \r\n NoHeight, NoWidth = height-PixNo+1, width-PixNo+1\r\n print(NoHeight, NoWidth) \r\n #X_pix_data = np.full((NoHeight*NoWidth, FeaNo), 0, dtype=np.double)\r\n X_pix_data = np.full((NoWidth, FeaNo), 0, dtype=np.double)\r\n X_pix_idx=np.full((NoHeight*NoWidth, 2), 0, dtype=int)\r\n pix_dir = np.full((NoHeight,NoWidth), -1, dtype=float) # pixel direction\r\n pix_dir_prob = np.full((NoHeight,NoWidth), -1, dtype=float) # pixel dir probabolity\r\n pix_consensus = np.full((NoHeight,NoWidth), -1, dtype=float) # consensus value\r\n print(\"pix_dir.shape, pix_dir_prob.shape: \", pix_dir.shape, pix_dir_prob.shape)\r\n print(\"X_pix_data.shape, X_pix_idx.shape: \", X_pix_data.shape, X_pix_idx.shape, \"\\n processing i: \", end='')\r\n \r\n for i in range(NoHeight):\r\n # need to do this by parts to prevent out-of-memory problem\r\n k = -1\r\n #print('.', end='') \r\n print('{:4d}'.format(i), end='') \r\n sys.stdout.flush()\r\n for j in range(NoWidth):\r\n a,b,c,d=i,i+25,j,j+25\r\n img, k = fm[a:b,c:d], k+1 # img is a 25x25 blocks\r\n #print(i,j, a, b, c, d)\r\n X_pix_idx[k,0], X_pix_idx[k,1] = i+12, j+12\r\n\r\n sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)\r\n sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)\r\n Vx, Vy = np.sum(2*sobelx*sobely)+sys.float_info.epsilon, np.sum(sobelx**2-sobely**2)\r\n X_pix_data[k,0], X_pix_data[k,1], X_pix_data[k,2] = Vx, Vy, Vy/Vx\r\n \r\n Reg1x, Reg1y= np.sum(2*sobelx[:,6:19]*sobely[:,6:19])+sys.float_info.epsilon, np.sum(sobelx[:,6:19]**2-sobely[:,6:19]**2)\r\n X_pix_data[k,3], X_pix_data[k,4], X_pix_data[k,5] = Reg1x, Reg1y, Reg1y/Reg1x \r\n\r\n Reg2x, Reg2y= np.sum(2*sobelx[6:19,:]*sobely[6:19,:])+sys.float_info.epsilon, np.sum(sobelx[6:19,:]**2-sobely[6:19,:]**2)\r\n X_pix_data[k,6], X_pix_data[k,7], X_pix_data[k,8] = Reg2x, Reg2y, Reg2y/Reg2x \r\n\r\n Reg3x, Reg3y= np.sum(2*sobelx[0:13, 0:13]*sobely[0:13,0:13])+sys.float_info.epsilon, np.sum(sobelx[0:13,0:13]**2-sobely[0:13,0:13]**2)\r\n X_pix_data[k,9], X_pix_data[k,10], X_pix_data[k,11] = Reg3x, Reg3y, Reg3y/Reg3x \r\n\r\n Reg4x, Reg4y= np.sum(2*sobelx[0:13, 13:25]*sobely[0:13,13:25])+sys.float_info.epsilon, \\\r\n np.sum(sobelx[0:13,13:25]**2-sobely[0:13,13:25]**2)\r\n X_pix_data[k,12], X_pix_data[k,13], X_pix_data[k,14] = Reg4x, Reg4y, Reg4y/Reg4x \r\n Reg5x, Reg5y= np.sum(2*sobelx[13:25, 0:13]*sobely[13:25,0:13])+sys.float_info.epsilon, \\\r\n np.sum(sobelx[13:25,0:13]**2-sobely[13:25,0:13]**2)\r\n X_pix_data[k,15], X_pix_data[k,16], X_pix_data[k,17] = Reg5x, Reg5y, Reg5y/Reg5x \r\n Reg6x, Reg6y= np.sum(2*sobelx[13:25,13:25]*sobely[13:25,13:25])+sys.float_info.epsilon, \\\r\n np.sum(sobelx[13:25,13:25]**2-sobely[13:25,13:25]**2) \r\n X_pix_data[k,18], X_pix_data[k,19], X_pix_data[k,20] = Reg6x, Reg6y, Reg6y/Reg6x \r\n\r\n X_pix_data_normalize = preprocessing.normalize(X_pix_data)\r\n pix_prediction = loaded_model.predict_classes(X_pix_data_normalize)\r\n pix_predict_proba = pd.DataFrame.from_records(loaded_model.predict_proba(X_pix_data_normalize))\r\n pix_prob_arr = pix_predict_proba.as_matrix()\r\n pix_prob_list = pix_predict_proba.max(axis=1)\r\n\r\n for m in range(NoWidth):\r\n pix_dir[i][m] = pix_prediction[m]\r\n pix_dir_prob[i][m] = pix_prob_list[m]\r\n pix_consensus[i][m] = sum(weightTBL[pix_prediction[m]]*pix_prob_arr[m])\r\n print()\r\n sys.stdout.flush()\r\n pixtri=np.full(fm.shape, 192, dtype=int)\r\n #pixSMT=np.full(fm.shape, 192, dtype=int)\r\n\r\n pixSMT = MyL.UT_PixTri(plt, fpfg, pix_dir, pixtri, SMLoop=3)\r\n plt.title(sys.argv[2][-11:])\r\n plt.imshow(pixtri, cmap=plt.cm.gray)\r\n ImgPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Images\\\\'+sys.argv[1]+'_'+sys.argv[2][-11:-4]+\"_7.png\"\r\n plt.savefig(ImgPath, dpi=600, bbox_inches='tight')\r\n #plt.show()\r\n plt.title(sys.argv[2][-11:])\r\n plt.imshow(pixSMT, cmap=plt.cm.gray)\r\n ImgPath='D:\\\\Fingerprint\\\\paper8_NN\\\\P8NN_Images\\\\'+sys.argv[1]+'_'+sys.argv[2][-11:-4]+\"_8.png\"\r\n plt.savefig(ImgPath, dpi=600, bbox_inches='tight')\r\n #plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"limin-liu-tw/fp_data","sub_path":"PRL_load_model_run.py","file_name":"PRL_load_model_run.py","file_ext":"py","file_size_in_byte":11151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"4194695897","text":"def check_largest_or_smallest(num_list: list[int], check_type: str = \"L\") -> str:\n for idx_i, i in enumerate(num_list):\n for idx_j, j in enumerate(num_list):\n if (idx_j < len(num_list)-idx_i-1) and (num_list[idx_j] < num_list[idx_j+1] if check_type == \"L\" else num_list[idx_j] > num_list[idx_j+1]):\n temp = num_list[idx_j+1]\n num_list[idx_j+1] = num_list[idx_j]\n num_list[idx_j] = temp\n return_string = \"\"\n for idx_n, n in enumerate(num_list):\n return_string += str(n)\n if idx_n+1 != len(num_list):\n return_string += \" > \" if check_type == \"L\" else \" < \"\n return return_string\n\n\nnumberOfInputs = int(input())\nfor i in range(numberOfInputs):\n numList = [int(x) for x in input().split()]\n print(check_largest_or_smallest(numList))\n\n\n#=============================================================\n#Write a program to find the largest of three numbers?\n\n# Input Format\n#\n# The first line informs you the number of test cases. Each separate line has three integer values to find the relation between the three integer values i.e. to find the largest of the three values\n#\n# Constraints\n#\n# -2147483647 ≤ n1,n2 ≤ 2147483647\n#\n# Output Format\n#\n# %d > %d > %d\n#\n# Sample Input 0\n#\n# 3\n# 12 90 45\n# 10 190 1234\n# 1290 56 823\n# Sample Output 0\n#\n# 90 > 45 > 12\n# 1234 > 190 > 10\n# 1290 > 823 > 56\n\n\n# [2,6,1,3,5]\n# [2,1,3,5,6]\n","repo_name":"naren-source/python-flask-jose-salv","sub_path":"problem_solving/module_3/4_7_largest_of_three.py","file_name":"4_7_largest_of_three.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37271888157","text":"def solve_a(elves, steps=1000):\n dirs = [\"N\", \"S\", \"W\", \"E\"]\n total = 0\n moved = True\n while moved:\n moved = False\n proposed = {}\n moving = set()\n refused = set()\n for elf in elves:\n r, c = elf.split('_')\n r = int(r)\n c = int(c)\n # N\n for dir in dirs:\n propose(c, elf, elves, proposed, r, refused, dir, moving)\n # S\n\n dirs = dirs[1:] + [dirs[0]]\n\n for proposed_elf, current_elf in proposed.items():\n if proposed_elf not in refused:\n elves.remove(current_elf)\n elves.add(proposed_elf)\n moved = True\n\n total += 1\n steps -= 1\n if total % 10 == 0:\n print(total)\n\n return total\n # min_x, min_y, max_x, max_y = 100, 100, 0, 0\n # for elf in elves:\n # x, y = elf.split('_')\n # x, y = int(x), int(y)\n # min_x, min_y = min(min_x, x), min(min_y, y)\n # max_x, max_y = max(max_x, x), max(max_y, y)\n #\n # return (max_y - min_y + 1) * (max_x - min_x + 1) - len(elves)\n\n\ndef propose(c, elf, elves, proposed, r, refused, dir, moving):\n go = False\n if elf in moving:\n return\n # check any elves in the 8 positions\n for i, j in ([-1, -1], [-1, 0], [-1, 1],\n [0, -1], [0, 1],\n [1, -1], [1, 0], [1, 1]):\n if f'{r + i}_{c + j}' in elves:\n go = True\n break\n if not go:\n return\n\n if dir == 'N':\n if not {f'{r - 1}_{c - 1}', f'{r - 1}_{c}',\n f'{r - 1}_{c + 1}'}.intersection(elves):\n if f'{r - 1}_{c}' in proposed:\n refused.add(f'{r - 1}_{c}')\n elif elf not in proposed:\n proposed[f'{r - 1}_{c}'] = elf\n moving.add(elf)\n\n elif dir == 'S':\n if not {f'{r + 1}_{c - 1}', f'{r + 1}_{c}', f'{r + 1}_{c + 1}'}.intersection(elves):\n if f'{r + 1}_{c}' in proposed:\n refused.add(f'{r + 1}_{c}')\n elif elf not in proposed:\n proposed[f'{r + 1}_{c}'] = elf\n moving.add(elf)\n\n elif dir == 'W':\n if not {f'{r - 1}_{c - 1}', f'{r}_{c - 1}', f'{r + 1}_{c - 1}'}.intersection(elves):\n if f'{r}_{c - 1}' in proposed:\n refused.add(f'{r}_{c - 1}')\n elif elf not in proposed:\n proposed[f'{r}_{c - 1}'] = elf\n moving.add(elf)\n\n elif dir == 'E':\n if not {f'{r - 1}_{c + 1}', f'{r}_{c + 1}', f'{r + 1}_{c + 1}'}.intersection(elves):\n if f'{r}_{c + 1}' in proposed:\n refused.add(f'{r}_{c + 1}')\n elif elf not in proposed:\n proposed[f'{r}_{c + 1}'] = elf\n moving.add(elf)\n\n\nif __name__ == '__main__':\n map = open(\"23.txt\").read().split('\\n')\n elves = set() # {row}_{column}\n for i, row in enumerate(map):\n for j, cell in enumerate(row):\n if cell == '#':\n elves.add(f'{i}_{j}')\n\n # print(solve_a(elves, 1))\n # print(solve_a(elves, 2))\n print(solve_a(elves, 10))\n","repo_name":"DannyLee12/aoc","sub_path":"2022/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"40722738649","text":"import uvicorn\r\nfrom starlette.responses import JSONResponse\r\nfrom items import *\r\nfrom connect import *\r\nimport json\r\n\r\n# parm: person\r\n# ret: all the person table\r\n# The user click on sign_up\r\n@app.post('/api/sign_up')\r\ndef sigh_up(person: Person):\r\n check = \"select * from us_travel.person where name = '\" + str(person.uname) + \"'\"\r\n mycursor.execute(check)\r\n check = mycursor.fetchall()\r\n if check:\r\n return Response(\"this user name already exist\", status_code=400)\r\n else:\r\n insert = \"INSERT INTO `us_travel`.`person` (`name`, `password`) VALUES ('\" + person.uname + \"', '\" + person.password + \"');\"\r\n mycursor.execute(insert)\r\n MY_DB.commit()\r\n return JSONResponse(status_code=200)\r\n\r\n\r\n# parm: person\r\n# ret: all the person table\r\n# The user click on sign_in\r\n@app.post('/api/sign_in')\r\ndef sigh_in(person: Person):\r\n # if there is no name like that\r\n check = \"select * from us_travel.person where name = '\" + person.uname + \"' and password = '\" + person.password + \"'\"\r\n mycursor.execute(check)\r\n check = mycursor.fetchall()\r\n if not check:\r\n return JSONResponse(\"something is wrong, the username or password are incorrect\", status_code=400)\r\n else:\r\n return JSONResponse(status_code=200)\r\n\r\n\r\n# parm: person\r\n# The user what us delete from the DB\r\n@app.delete('/api/delete_person')\r\ndef delete(person: Person):\r\n sql = \"DELETE FROM us_travel.save_travels WHERE (`password` = '\" + str(\r\n person.password) + \"') and (`name` = '\" + person.uname + \"');\"\r\n mycursor.execute(sql)\r\n MY_DB.commit()\r\n\r\n\r\n# parm: flight_from\r\n# ret: flight for the user according his ask\r\n# The user click on flight\r\n@app.post('/api/lucky')\r\ndef find_travel(flight_from: FlightFrom):\r\n flight_1 = []\r\n flight_2 = []\r\n hotel = []\r\n attraction = []\r\n # get travel when you have all the travel\r\n while not hotel or not flight_1 or not flight_2 or not attraction or len(attraction) != 3:\r\n city = \"SELECT city,state FROM us_travel.airports where airport_id = '\" + flight_from.origin_airport_id + \"';\"\r\n mycursor.execute(city)\r\n result2 = mycursor.fetchall()\r\n\r\n city = result2[0][0]\r\n state = result2[0][1]\r\n\r\n # flight_1 = \"SELECT\"\\\r\n # \" * FROM team01.save_travels \"\\\r\n # \" INNER JOIN team01.flight \"\\\r\n # \" ON team01.save_travels.return_flight_id\"\\\r\n # \" =team01.flight.flight_if\"\\\r\n # \" where team01.save_travels.person_id = \"\\\r\n # \" (select idperson from team01.person where name = '\"+str(flight_from.uname)+\"') \"\\\r\n # \" and team01.flight.arrive = \"\\\r\n # \"(select id_city from team01.city where id_state = \"\\\r\n # \"(select id_state from team01.city where name = '\"+str(+\"'\"\\\r\n # \"limit 1)limit 1\"\\\r\n # \")limit 1\"\r\n # mycursor.execute(flight_1)\r\n # flight_1 = mycursor.fetchall()\r\n #\r\n # if not flight_1:\r\n flight_1 = \"SELECT * FROM us_travel.flight where\" \\\r\n \" arrive = (select id_city from us_travel.city where name = \" \\\r\n \"'\" + city + \"') \" \\\r\n \" order by rand()\" \\\r\n \" limit 1; \" \\\r\n \"\"\r\n\r\n mycursor.execute(flight_1)\r\n flight_1 = mycursor.fetchall()\r\n\r\n dest_city = \"SELECT name FROM us_travel.city where id_city = '\" + str(flight_1[0][2]) + \"';\"\r\n mycursor.execute(dest_city)\r\n dest_city = mycursor.fetchall()\r\n dest_city = dest_city[0][0]\r\n\r\n dest_state = \"select short from us_travel.state where id_state = \" \\\r\n \"(select id_state from us_travel.city where id_city = '\" + str(flight_1[0][2]) + \"') ;\"\r\n mycursor.execute(dest_state)\r\n dest_state = mycursor.fetchall()\r\n dest_state = dest_state[0][0]\r\n\r\n flight_3 = \"SELECT * FROM us_travel.flight where\" \\\r\n \" arrive = '\" + str(flight_1[0][3]) + \"' and dest = '\" + str(flight_1[0][2]) + \"'\" \\\r\n \" order by rand()\" \\\r\n \" limit 1\"\r\n\r\n mycursor.execute(flight_3)\r\n flight_2 = mycursor.fetchall()\r\n\r\n # if there is hotel from his favorite user hotel\r\n hotel = \"SELECT us_travel.hotel.id_hotel \" \\\r\n \" FROM us_travel.save_travels\"\\\r\n \" INNER JOIN us_travel.hotel\"\\\r\n \" ON us_travel.save_travels.id_hotel=us_travel.hotel.id_hotel\"\\\r\n \" where us_travel.save_travels.person_id = \"\\\r\n \" (select idperson from us_travel.person where name = '\"+str(flight_from.uname)+\"')\"\\\r\n \" and us_travel.hotel.id_city = \"\\\r\n \" (select id_city from us_travel.city where name = '\"+str(dest_city)+\"')\"\r\n mycursor.execute(hotel)\r\n hotel = mycursor.fetchall()\r\n\r\n # if not, find the lowest price (do that random to get new trip all click)\r\n if not hotel:\r\n hotel = \"SELECT \"\\\r\n \"us_travel.hotel.id_hotel \"\\\r\n \"FROM us_travel.hotel \"\\\r\n \"INNER JOIN us_travel.city \"\\\r\n \"ON us_travel.city.id_city=us_travel.hotel.id_city \"\\\r\n \"where us_travel.city.id_city =\" \\\r\n \"(select id_city from us_travel.city where name = '\"+str(dest_city)+\"') \"\\\r\n \"order by rand() \"\\\r\n \"limit 1\"\r\n\r\n mycursor.execute(hotel)\r\n hotel = mycursor.fetchall()\r\n\r\n # also random\r\n attraction = \"SELECT id_attraction,type,image FROM us_travel.attraction_1 \" \\\r\n \"where \" \\\r\n \"state = (select id_state from us_travel.state where short = '\" + str(\r\n dest_state) + \"') \" \\\r\n \"order by rand() \" \\\r\n \"limit 3\"\r\n\r\n mycursor.execute(attraction)\r\n attraction = mycursor.fetchall()\r\n\r\n # update\r\n id_hotel = str(hotel[0][0])\r\n going_flight_id = str(flight_1[0][0])\r\n return_flight_id = str(flight_2[0][0])\r\n attraction1_id = str(attraction[0][0])\r\n attraction2_id = str(attraction[1][0])\r\n attraction3_id = str(attraction[2][0])\r\n\r\n person_id = \"SELECT person_id FROM us_travel.last_tripe where person_id = \" \\\r\n \"((select idperson from us_travel.person where name = '\" + str(flight_from.uname) + \"')) ;\"\r\n\r\n mycursor.execute(person_id)\r\n person_id = mycursor.fetchall()\r\n\r\n if not person_id:\r\n insert = \"INSERT INTO `us_travel`.`last_tripe` (`person_id`) VALUES \" \\\r\n \"((select idperson from us_travel.person where name = '\" + str(flight_from.uname) + \"'));\"\r\n mycursor.execute(insert)\r\n MY_DB.commit()\r\n\r\n update = \"UPDATE `us_travel`.`last_tripe` SET `id_hotel`\" \\\r\n \" = '\" + id_hotel + \"', `going_flight_id` = '\" + going_flight_id + \"', `return_flight_id` = '\" + return_flight_id + \"', \" \\\r\n \" `attraction1_id` = '\" + attraction1_id + \"', `attraction2_id` = '\" + attraction2_id + \"', `attraction3_id` = '\" + attraction3_id + \"',\" \\\r\n \" `passangers` = '\" + str( flight_from.passengers) + \"' \" \\\r\n \" WHERE (`person_id` = \" \\\r\n \"(select idperson from us_travel.person where name = '\" + str(flight_from.uname) + \"'));\"\r\n\r\n mycursor.execute(update)\r\n MY_DB.commit()\r\n y = Name(uname=flight_from.uname)\r\n x = get_last_travel(y)\r\n return return_JSON(x)\r\n\r\n\r\ndef id_attraction(type, image):\r\n att1 = \"SELECT id_attraction FROM us_travel.attraction_1 where image = '\" + image + \"' limit 1;\"\r\n mycursor.execute(att1)\r\n att1 = mycursor.fetchall()\r\n\r\n return att1\r\n\r\n\r\n# parm: travel\r\n# ret: -\r\n# The user want to save his travel\r\n@app.post(\"/api/save\")\r\ndef save(save: Save):\r\n hotel = \"SELECT id_hotel FROM us_travel.hotel where name = '\" + save.trip.hotel.name + \"';\"\r\n mycursor.execute(hotel)\r\n hotel = mycursor.fetchall()\r\n\r\n flight_1 = \"SELECT flight_if FROM us_travel.flight \" \\\r\n \"where date = '\" + str(save.trip.going_flight.date) + \"'\" \\\r\n \"and price = '\" + str(save.trip.going_flight.price) + \"'\" \\\r\n \" ;\"\r\n\r\n mycursor.execute(flight_1)\r\n flight_1 = mycursor.fetchall()\r\n\r\n flight_2 = \"SELECT flight_if FROM us_travel.flight where \" \\\r\n \"date = '\" + str(save.trip.return_flight.date) + \"' and \" \\\r\n \"price = '\" + str(save.trip.return_flight.price) + \"' \" \\\r\n \" ;\"\r\n\r\n mycursor.execute(flight_2)\r\n flight_2 = mycursor.fetchall()\r\n\r\n person_name = \"SELECT * FROM us_travel.person where name = '\" + save.uname + \"';\"\r\n mycursor.execute(person_name)\r\n person_name = mycursor.fetchall()\r\n\r\n attraction_1 = id_attraction(save.trip.attractions.att1.name, save.trip.attractions.att1.picture_link)[0][0]\r\n attraction_2 = id_attraction(save.trip.attractions.att2.name, save.trip.attractions.att2.picture_link)[0][0]\r\n attraction_3 = id_attraction(save.trip.attractions.att3.name, save.trip.attractions.att3.picture_link)[0][0]\r\n\r\n # save the info inside save travel table\r\n insert = \"INSERT INTO `us_travel`.`save_travels`\" \\\r\n \" (`id_hotel`, `going_flight_id`, `return_flight_id`, `attraction1_id`, `attraction2_id`, \" \\\r\n \" `attraction3_id`, `person_id`, `passangers`) \" \\\r\n \" VALUES ('\" + str(hotel[0][0]) + \"', '\" + str(flight_1[0][0]) + \"', '\" + str(\r\n flight_2[0][0]) + \"', '\" + str(attraction_1) + \"',\" \\\r\n \" '\" + str(attraction_2) + \"', '\" + str(\r\n attraction_3) + \"', '\" + str(person_name[0][0]) + \"', '\" + str(save.trip.passengers) + \"');\"\r\n\r\n mycursor.execute(insert)\r\n MY_DB.commit()\r\n\r\n\r\n@app.post(\"/api/last\")\r\ndef get_last_travel_API(name: Name):\r\n check = \"SELECT * FROM us_travel.last_tripe where person_id = (select idperson from us_travel.person where name = '\" + name.uname + \"');\"\r\n mycursor.execute(check)\r\n check = mycursor.fetchall()\r\n\r\n if not check:\r\n return JSONResponse(\"there isn't saves trip yet\", status_code=400)\r\n x = (get_last_travel(name))\r\n return return_JSON(x)\r\n\r\n\r\ndef return_JSON(x):\r\n var_flight_return = vars(x.return_flight)\r\n var_flight_going = vars(x.going_flight)\r\n var_hotel = vars(x.hotel)\r\n var_att = {\r\n \"att1\": vars(x.attractions.att1),\r\n \"att2\": vars(x.attractions.att2),\r\n \"att3\": vars(x.attractions.att3)\r\n }\r\n dictio = {\"going_flight\": var_flight_going,\r\n \"return_flight\": var_flight_return,\r\n \"going_flight\": var_flight_going,\r\n \"hotel\": var_hotel,\r\n \"attractions\": var_att,\r\n \"passengers\": x.passengers,\r\n \"origin_airport_id\": x.origin_airport_id\r\n }\r\n return json.dumps(dictio)\r\n\r\n\r\n# parm: name\r\n# ret: the lsat trip that save\r\ndef get_last_travel(name: Name):\r\n\r\n check = \"SELECT * FROM us_travel.last_tripe where person_id = (select idperson from us_travel.person where name = '\" + name.uname + \"');\"\r\n mycursor.execute(check)\r\n check = mycursor.fetchall()\r\n\r\n if not check:\r\n return JSONResponse(\"there isn't saves trip yet\", status_code=400)\r\n\r\n passengers = \"SELECT passangers FROM us_travel.last_tripe where person_id='\" + str(check[0][0]) + \"';\"\r\n mycursor.execute(passengers)\r\n passengers = mycursor.fetchall()\r\n\r\n last_tripe_str = \"SELECT * FROM us_travel.last_tripe where \" \\\r\n \"person_id = \" \\\r\n \"(select idperson from us_travel.person where name = '\" + name.uname + \"');\"\r\n\r\n mycursor.execute(last_tripe_str)\r\n last_tripe_str = mycursor.fetchall()\r\n\r\n hotel = \"SELECT * FROM us_travel.hotel where \" \\\r\n \"id_hotel = '\" + str(last_tripe_str[0][1]) + \"';\"\r\n\r\n mycursor.execute(hotel)\r\n hotel = mycursor.fetchall()\r\n\r\n hotel_city = \"SELECT name,id_state FROM us_travel.city where id_city = '\" + str(hotel[0][2]) + \"'\"\r\n mycursor.execute(hotel_city)\r\n hotel_city = mycursor.fetchall()\r\n\r\n state = \"SELECT name FROM us_travel.state where id_state = '\" + str(hotel_city[0][1]) + \"' \"\r\n mycursor.execute(state)\r\n state = mycursor.fetchall()\r\n\r\n review = \"SELECT score,titel FROM us_travel.reviews where hotel_id = '\" + str(hotel[0][0]) + \"';\"\r\n mycursor.execute(review)\r\n review = mycursor.fetchall()\r\n\r\n if not review:\r\n review = [[\"4.5\", \"WOW!\"], []]\r\n # if (len(str(review[0][0]))) > 4:\r\n # r = (float(review[0][0]))\r\n # r = round(r, 2)\r\n # r = str(r)\r\n # else:\r\n # r = review[0][0]\r\n # else:\r\n # flo = float(review[0][0])\r\n # review[0][0] = str(float(\"{:.2f}\".format(flo))\r\n hotel = Hotel(name=hotel[0][1], city=hotel_city[0][0], state=state[0][0], avg_rating=review[0][0],\r\n review=review[0][1], price=hotel[0][3])\r\n flight_1 = get_flight(last_tripe_str[0][2])\r\n flight_2 = get_flight(last_tripe_str[0][3])\r\n all_attraction = AllAttraction(att1=attraction(last_tripe_str[0][4], state[0][0]),\r\n att2=attraction(last_tripe_str[0][5], state[0][0]),\r\n att3=attraction(last_tripe_str[0][6], state[0][0]))\r\n\r\n original_airport = \"SELECT * FROM us_travel.airports where city = '\" + str(\r\n flight_2.airport_city) + \"' and state = '\" + str(flight_2.airport_state) + \"';\"\r\n mycursor.execute(original_airport)\r\n original_airport = mycursor.fetchall()\r\n\r\n travel = Travel(hotel=hotel, going_flight=flight_1, return_flight=flight_2, attractions=all_attraction,\r\n passengers=str(passengers[0][0]), origin_airport_id=str(original_airport[0][0]))\r\n return travel\r\n\r\n\r\ndef attraction(id, location):\r\n attraction = \"SELECT type,image,location FROM us_travel.attraction_1\" \\\r\n \" where id_attraction = '\" + str(id) + \"';\"\r\n mycursor.execute(attraction)\r\n attraction_1 = mycursor.fetchall()\r\n return Attraction(name=attraction_1[0][0], location=attraction_1[0][2], picture_link=attraction_1[0][1])\r\n\r\n\r\ndef get_flight(airport_id):\r\n flight_1 = \"SELECT * FROM us_travel.flight where flight_if = '\" + str(airport_id) + \"';\"\r\n mycursor.execute(flight_1)\r\n flight_1 = mycursor.fetchall()\r\n\r\n airport = \"SELECT name,city,state FROM us_travel.airports where city = \" \\\r\n \" (select name from us_travel.city where id_city = '\" + str(flight_1[0][2]) + \"' );\"\r\n mycursor.execute(airport)\r\n airport = mycursor.fetchall()\r\n\r\n departure_hour = str(flight_1[0][5])\r\n print(departure_hour)\r\n\r\n if not \":\" in departure_hour:\r\n while len(departure_hour) < 4:\r\n departure_hour = \"0\" + departure_hour\r\n\r\n departure_hour = departure_hour[0:2] + \":\" + departure_hour[2:4]\r\n\r\n arrival_hour = str(flight_1[0][6])\r\n print(arrival_hour)\r\n if not \":\" in arrival_hour:\r\n\r\n while len(arrival_hour) < 4:\r\n arrival_hour = \"0\" + arrival_hour\r\n\r\n arrival_hour = arrival_hour[0:2] + \":\" + arrival_hour[2:4]\r\n\r\n\r\n return Flight(date=str(flight_1[0][1]), carrier=\"\", airport_name=str(airport[0][0]),\r\n airport_city=str(airport[0][1]),\r\n airport_state=str(airport[0][2]), price=str(flight_1[0][4]),\r\n departure_hour=departure_hour, arrival_hour=arrival_hour)\r\n\r\n\r\nif __name__ == '__main__':\r\n uvicorn.run(app, host=\"localhost\", port=8080)\r\n","repo_name":"avichaigel/US-Travel","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33184589308","text":"from sentence_transformers import SentenceTransformer, util, InputExample, losses\nfrom torch.utils.data import DataLoader\nimport re\n\nMATCH_THRESHOLD = 0.68\n\n\ndef convert_bytes_to_string(master_bytes):\n return \"\".join(map(chr,master_bytes))\n\ndef extract_master_text_embeddings(master_dict, model):\n master_text_embeddings = {}\n for key, _ in master_dict.items():\n master_text_embeddings[key] = model.encode(key, convert_to_tensor=True)\n return master_text_embeddings\n\n\ndef text_find_match(document_text, master_text_embeddings, model):\n document_embedding = model.encode(document_text, convert_to_tensor=True)\n max_score, max_field = 0, None\n for field, field_embedding in master_text_embeddings.items():\n cos_sim = util.pytorch_cos_sim(document_embedding, field_embedding)\n if cos_sim > max_score and cos_sim >= MATCH_THRESHOLD:\n max_score, max_field = cos_sim, field\n return max_field, max_score\n\n\ndef extract_master_dict(master_document):\n master_dict = {}\n for pair in master_document.split(';'):\n if not any(c.isalpha() for c in pair):\n continue\n key, value = pair.split(':')\n key = re.sub(r'[^A-Za-z ]+', '', key).lower()\n master_dict[key] = value\n return master_dict\n\n\ndef process_final_position(box_position, center_x, field):\n left_point = box_position[0]\n if left_point[0] <= center_x: # if line is to be filled up on top\n return ((int(center_x), box_position[0][1]), box_position[1])\n return box_position\n\n\ndef match_field_to_master(field_fill_positions, master_dict, master_text_embeddings, model):\n instafill_dict = {}\n used_fields = set()\n used_lines_dict = {}\n for page, fields in field_fill_positions.items():\n used_lines = []\n field_value_dict = {}\n for field, content in fields.items():\n print(\"CHECKING\", field)\n box_position = content['line']\n center_x = content['center_x']\n\n max_field, max_score = text_find_match(\n field, master_text_embeddings, model)\n\n # handle repeating fields\n if '*_' in field:\n field_value_dict[field] = {\n 'value': '', 'position': box_position[0]}\n used_lines.append(box_position)\n continue\n\n if max_field is None:\n continue\n\n used_lines.append(box_position)\n box_position = process_final_position(\n box_position, center_x, field)\n\n used_fields.add(field)\n value = master_dict[max_field]\n print(\"MATCHING\", field, \"TO\", value)\n field_value_dict[field] = {\n 'value': value, 'position': box_position[0]}\n\n used_lines_dict[page] = used_lines\n instafill_dict[page] = field_value_dict\n return instafill_dict, used_lines_dict\n\n\ndef define_train_examples():\n train_examples = []\n with open(\"instafill_text/good_phrase_pairs.txt\", \"r\") as file:\n good_phrase_pairs = file.read()\n pairs_list = good_phrase_pairs.split(\"|\")\n for pair in pairs_list:\n if len(pair) == 0:\n continue\n first, second = pair.split(\":\")\n train_examples.append(InputExample(\n texts=[first, second], label=0.9))\n\n with open(\"instafill_text/bad_phrase_pairs.txt\", \"r\") as file:\n bad_phrase_pairs = file.read()\n pairs_list = bad_phrase_pairs.split(\"|\")\n for pair in pairs_list:\n if len(pair) == 0:\n continue\n first, second = pair.split(\":\")\n train_examples.append(InputExample(\n texts=[first, second], label=0.1))\n\n return train_examples\n\n\ndef train_model():\n model = SentenceTransformer(\n r'E:\\PROJECTS\\instafill_backend\\instafill_text\\model\\field_model1')\n train_examples = define_train_examples()\n train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=16)\n train_loss = losses.CosineSimilarityLoss(model)\n\n model.fit(train_objectives=[\n (train_dataloader, train_loss)], epochs=10, warmup_steps=100)\n model.save('instafill_text/model/field_model1')\n\n\ndef test_model(model):\n sentences = [\"mobile number\", \"cellphone number\", \"telephone number\"]\n embeddings = [model.encode(sentence, convert_to_tensor=True)\n for sentence in sentences]\n cos_sim = util.pytorch_cos_sim(embeddings[0], embeddings[1])\n print(\"SIMILARITY OF 1 AND 2 IS\", cos_sim)\n cos_sim = util.pytorch_cos_sim(embeddings[0], embeddings[2])\n print(\"SIMILARITY OF 1 AND 3 IS\", cos_sim)\n","repo_name":"sheensantoscapadngan/instafill","sub_path":"api/document_helpers/field_helper_text.py","file_name":"field_helper_text.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"70591830580","text":"# -*- coding: utf-8 -*-\n# (c) Nano Nano Ltd 2019\n\nimport sys\nfrom decimal import Decimal\n\nfrom colorama import Fore, Back, Style\nfrom tqdm import tqdm\n\nfrom .config import config\n\nclass AuditRecords(object):\n def __init__(self, transaction_records):\n self.wallets = {}\n self.totals = {}\n self.failures = []\n\n if config.debug:\n print(\"%saudit transaction records\" % Fore.CYAN)\n\n for tr in tqdm(transaction_records,\n unit='tr',\n desc=\"%saudit transaction records%s\" % (Fore.CYAN, Fore.GREEN),\n disable=bool(config.debug or not sys.stdout.isatty())):\n if config.debug:\n print(\"%saudit: TR %s\" % (Fore.MAGENTA, tr))\n if tr.buy:\n self._add_tokens(tr.wallet, tr.buy.asset, tr.buy.quantity)\n\n if tr.sell:\n self._subtract_tokens(tr.wallet, tr.sell.asset, tr.sell.quantity)\n\n if tr.fee:\n self._subtract_tokens(tr.wallet, tr.fee.asset, tr.fee.quantity)\n\n if config.debug:\n print(\"%saudit: final balances by wallet\" % Fore.CYAN)\n for wallet in sorted(self.wallets, key=str.lower):\n for asset in sorted(self.wallets[wallet]):\n print(\"%saudit: %s:%s=%s%s%s\" % (\n Fore.YELLOW,\n wallet,\n asset,\n Style.BRIGHT,\n '{:0,f}'.format(self.wallets[wallet][asset].normalize()),\n Style.NORMAL))\n\n print(\"%saudit: final balances by asset\" % Fore.CYAN)\n for asset in sorted(self.totals):\n print(\"%saudit: %s=%s%s%s\" % (\n Fore.YELLOW,\n asset,\n Style.BRIGHT,\n '{:0,f}'.format(self.totals[asset].normalize()),\n Style.NORMAL))\n\n def _add_tokens(self, wallet, asset, quantity):\n if wallet not in self.wallets:\n self.wallets[wallet] = {}\n\n if asset not in self.wallets[wallet]:\n self.wallets[wallet][asset] = Decimal(0)\n\n self.wallets[wallet][asset] += quantity\n\n if asset not in self.totals:\n self.totals[asset] = Decimal(0)\n\n self.totals[asset] += quantity\n\n if config.debug:\n print(\"%saudit: %s:%s=%s (+%s)\" % (\n Fore.GREEN,\n wallet,\n asset,\n '{:0,f}'.format(self.wallets[wallet][asset].normalize()),\n '{:0,f}'.format(quantity.normalize())))\n\n def _subtract_tokens(self, wallet, asset, quantity):\n if wallet not in self.wallets:\n self.wallets[wallet] = {}\n\n if asset not in self.wallets[wallet]:\n self.wallets[wallet][asset] = Decimal(0)\n\n self.wallets[wallet][asset] -= quantity\n\n if asset not in self.totals:\n self.totals[asset] = Decimal(0)\n\n self.totals[asset] -= quantity\n\n if config.debug:\n print(\"%saudit: %s:%s=%s (-%s)\" %(\n Fore.GREEN,\n wallet,\n asset,\n '{:0,f}'.format(self.wallets[wallet][asset].normalize()),\n '{:0,f}'.format(quantity.normalize())))\n\n if self.wallets[wallet][asset] < 0 and asset not in config.fiat_list:\n tqdm.write(\"%sWARNING%s Balance at %s:%s is negative %s\" % (\n Back.YELLOW+Fore.BLACK, Back.RESET+Fore.YELLOW,\n wallet, asset, '{:0,f}'.format(self.wallets[wallet][asset].normalize())))\n\n def compare_pools(self, holdings):\n passed = True\n for asset in sorted(self.totals):\n if asset in config.fiat_list:\n continue\n\n if asset in holdings:\n if self.totals[asset] == holdings[asset].quantity:\n if config.debug:\n print(\"%scheck pool: %s (ok)\" %(Fore.GREEN, asset))\n else:\n if config.debug:\n print(\"%scheck pool: %s %s (mismatch)\" %(Fore.RED, asset,\n '{:+0,f}'.format((holdings[asset].quantity-\n self.totals[asset]).normalize())))\n\n self._log_failure(asset, self.totals[asset], holdings[asset].quantity)\n passed = False\n else:\n if config.debug:\n print(\"%scheck pool: %s (missing)\" %(Fore.RED, asset))\n\n self._log_failure(asset, self.totals[asset], None)\n passed = False\n\n return passed\n\n def _log_failure(self, asset, audit, s104):\n failure = {}\n failure['asset'] = asset\n failure['audit'] = audit\n failure['s104'] = s104\n\n self.failures.append(failure)\n\n def report_failures(self):\n header = \"%-8s %25s %25s %25s\" % ('Asset',\n 'Audit Balance',\n 'Section 104 Pool',\n 'Difference')\n\n print('\\n%s%s' % (Fore.YELLOW, header))\n for failure in self.failures:\n if failure['s104'] is not None:\n print(\"%s%-8s %25s %25s %s%25s\" % (\n Fore.WHITE,\n failure['asset'],\n '{:0,f}'.format(failure['audit'].normalize()),\n '{:0,f}'.format(failure['s104'].normalize()),\n Fore.RED,\n '{:+0,f}'.format((failure['s104']-failure['audit']).normalize())))\n else:\n print(\"%s%-8s %25s %s%25s\" % (\n Fore.WHITE,\n failure['asset'],\n '{:0,f}'.format(failure['audit'].normalize()),\n Fore.RED,\n ''))\n","repo_name":"rlayug312/bitty.tax","sub_path":"bittytax/audit.py","file_name":"audit.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"44159643917","text":"import numpy as np\nimport sys\nimport os\n\nprint()\nprint('launched gen params')\nprint()\n\nparamSet = sys.argv[1]\nsaveFld = sys.argv[2]\n\n# force params\nm = 50 # 70.0#np.random.choice(mu)\nk = 12 # 6.0\nol = 0.2 # np.random.choice(maxOverlap)\nd = 10 # 9.0\n\n\nmodelParams = np.loadtxt('fittingInfo/paramsModel.csv', delimiter=',')\nkill = modelParams[paramSet, 0]\ninfil = modelParams[paramSet, 1]\npdl1m = modelParams[paramSet, 2]\npdl1g = modelParams[paramSet, 3]\ninflu = modelParams[paramSet, 4]\n\ncellParams = np.zeros((12, 2))\n\n# cancer params\ncellParams[0, 0] = m # mu\ncellParams[1, 0] = k # kc\ncellParams[2, 0] = d # damping\ncellParams[3, 0] = ol # overlap\ncellParams[4, 0] = 1 / 35 # div probability (hours) \ncellParams[5, 0] = 1/(24*10) # death probability (hours) \ncellParams[6, 0] = pdl1m # pdl1 when expressed\ncellParams[7, 0] = pdl1g # prob of gaining pdl1\ncellParams[8, 0] = 20 # diameter (um)\n\n# cd8 params\ncellParams[0, 1] = m # mu\ncellParams[1, 1] = k # kc\ncellParams[2, 1] = d # damping\ncellParams[3, 1] = ol # overlap\ncellParams[4, 1] = 1/(24*3) # death probability \ncellParams[5, 1] = 240 # migration speed um/hr\ncellParams[6, 1] = kill # killProb \ncellParams[7, 1] = influ # influence distance\ncellParams[8, 1] = infil # max infiltration distance\ncellParams[9, 1] = 0.3 # migration bias \ncellParams[10, 1] = 0.1 # decrease of influence when suppressed\ncellParams[11, 1] = 10 # diameter (um) \n\nrecParams = np.zeros((3, 1))\nrecParams[0] = 0.01 # cd8RecRate\nrecParams[1] = 0.3 # cd8Ratio\nrecParams[2] = 200 # recDist (recruit a uniform distribution recDist away from the tumor edge)\n\nenvParams = np.zeros((2, 1))\nenvParams[0] = 40 # simulation duration (days)\nenvParams[1] = 0 # 3d? 0 - no, 1 - yes\n\nos.system('mkdir -p ' + saveFld)\n\nnp.savetxt(saveFld + '/cellParams.csv', cellParams, delimiter=',')\nnp.savetxt(saveFld + '/recParams.csv', recParams, delimiter=',')\nnp.savetxt(saveFld + '/envParams.csv', envParams, delimiter=',')\n\nprint('done generating params')","repo_name":"FinleyLabUSC/Representation-learning-for-ABM-parameter-estimation","sub_path":"model_code/example_1/genParams.py","file_name":"genParams.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71328435377","text":"import numpy as np \n\nimport pandas as pd \n\nimport seaborn as sn\n\nimport cv2 \n\nimport keras as k\n\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\n\nfrom IPython.display import display\n\n\nfrom subprocess import check_output\n\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\ntrain_labels = pd.read_csv('../input/train_v2.csv')\n\ndummies = train_labels['tags'].str.get_dummies(sep=' ')\n\nencoded_labels = pd.concat([train_labels, dummies], axis=1)\n\nencoded_labels.head()\ny = encoded_labels.mean()\n\nx = range(len(y))\n\nplt.figure(figsize=(25,20))\n\nplt.bar(x, y)\n\nplt.xticks(x, encoded_labels.columns[2:], fontsize= 30, rotation=90)\n\nplt.yticks(fontsize=30)\n\nplt.show()\n\nx_train_tif = []\n\n\n\nfor idx, tags in tqdm(train_labels.values):\n\n #img_jpg = cv2.imread('../input/train-jpg/{}.jpg'.format(idx))\n\n #x_train.append(cv2.resize(img_jpg, (32, 32)))\n\n img_tif = cv2.imread('../input/train-tif-v2/{}.tif'.format(idx),-1)[:,:,3]\n\n x_train_tif.append(cv2.resize(img_tif, (32, 32)))\ny_train = np.array(encoded_labels[encoded_labels.columns[-17:]])\n\ny_train = np.array(y_train, np.uint8)\n\nx_train = np.array(x_train_tif, np.float16) / 255.\n\n\n\nprint(x_train.shape)\n\nprint(y_train.shape)\nimport tensorflow as tf\n\nsess = tf.InteractiveSession()\n\nx = tf.placeholder(tf.float32, shape=[None, 784])\n\ny_ = tf.placeholder(tf.float32, shape=[None, 10])\n\nW = tf.Variable(tf.zeros([784,10]))\n\nb = tf.Variable(tf.zeros([10]))\n\nsess.run(tf.global_variables_initializer())\n\ny = tf.matmul(x,W) + b\n\ncross_entropy = tf.reduce_mean(\n\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\nimport keras as k\n\nfrom keras.models import Sequential\n\nfrom keras.layers import Dense, Dropout, Flatten\n\nfrom keras.layers import Conv2D, MaxPooling2D\n\n\n\nsplit = 35000\n\nx_train, x_valid, y_train, y_valid = x_train[:split], x_train[split:], y_train[:split], y_train[split:]\n\n\n\nmodel = Sequential()\n\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n\n activation='relu',\n\n input_shape=( 32, 32, 1)))\n\n\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(128, activation='relu'))\n\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(17, activation='sigmoid'))\n\n\n\nmodel.compile(loss='binary_crossentropy', # We NEED binary here, since categorical_crossentropy l1 norms the output before calculating loss.\n\n optimizer='adam',\n\n metrics=['accuracy'])\n\n \n\nmodel.fit(x_train, y_train,\n\n batch_size=500,\n\n epochs=20,\n\n verbose=1,\n\n validation_data=(x_valid, y_valid))\n\n \n\nfrom sklearn.metrics import fbeta_score\n\n\n\np_valid = model.predict(x_valid, batch_size=128)\n\nprint(y_valid)\n\nprint(p_valid)\n\nprint(fbeta_score(y_valid, np.array(p_valid) > 0.2, beta=2, average='samples'))\nfrom __future__ import division\n\n\n\nimport six\n\nfrom keras.models import Model\n\nfrom keras.layers import (\n\n Input,\n\n Activation,\n\n Dense,\n\n Flatten\n\n)\n\nfrom keras.layers.convolutional import (\n\n Conv2D,\n\n MaxPooling2D,\n\n AveragePooling2D\n\n)\n\nfrom keras.layers.merge import add\n\nfrom keras.layers.normalization import BatchNormalization\n\nfrom keras.regularizers import l2\n\nfrom keras import backend as K\ndef _bn_relu(input):\n\n \"\"\"Helper to build a BN -> relu block\n\n \"\"\"\n\n norm = BatchNormalization(axis=CHANNEL_AXIS)(input)\n\n return Activation(\"relu\")(norm)\n\n\n\n\n\ndef _conv_bn_relu(**conv_params):\n\n \"\"\"Helper to build a conv -> BN -> relu block\n\n \"\"\"\n\n filters = conv_params[\"filters\"]\n\n kernel_size = conv_params[\"kernel_size\"]\n\n strides = conv_params.setdefault(\"strides\", (1, 1))\n\n kernel_initializer = conv_params.setdefault(\"kernel_initializer\", \"he_normal\")\n\n padding = conv_params.setdefault(\"padding\", \"same\")\n\n kernel_regularizer = conv_params.setdefault(\"kernel_regularizer\", l2(1.e-4))\n\n\n\n def f(input):\n\n conv = Conv2D(filters=filters, kernel_size=kernel_size,\n\n strides=strides, padding=padding,\n\n kernel_initializer=kernel_initializer,\n\n kernel_regularizer=kernel_regularizer)(input)\n\n return _bn_relu(conv)\n\n\n\n return f\n\n\n\n\n\ndef _bn_relu_conv(**conv_params):\n\n \"\"\"Helper to build a BN -> relu -> conv block.\n\n This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf\n\n \"\"\"\n\n filters = conv_params[\"filters\"]\n\n kernel_size = conv_params[\"kernel_size\"]\n\n strides = conv_params.setdefault(\"strides\", (1, 1))\n\n kernel_initializer = conv_params.setdefault(\"kernel_initializer\", \"he_normal\")\n\n padding = conv_params.setdefault(\"padding\", \"same\")\n\n kernel_regularizer = conv_params.setdefault(\"kernel_regularizer\", l2(1.e-4))\n\n\n\n def f(input):\n\n activation = _bn_relu(input)\n\n return Conv2D(filters=filters, kernel_size=kernel_size,\n\n strides=strides, padding=padding,\n\n kernel_initializer=kernel_initializer,\n\n kernel_regularizer=kernel_regularizer)(activation)\n\n\n\n return f\n\n\n\n\n\ndef _shortcut(input, residual):\n\n \"\"\"Adds a shortcut between input and residual block and merges them with \"sum\"\n\n \"\"\"\n\n # Expand channels of shortcut to match residual.\n\n # Stride appropriately to match residual (width, height)\n\n # Should be int if network architecture is correctly configured.\n\n input_shape = K.int_shape(input)\n\n residual_shape = K.int_shape(residual)\n\n stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))\n\n stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))\n\n equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]\n\n\n\n shortcut = input\n\n # 1 X 1 conv if shape is different. Else identity.\n\n if stride_width > 1 or stride_height > 1 or not equal_channels:\n\n shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],\n\n kernel_size=(1, 1),\n\n strides=(stride_width, stride_height),\n\n padding=\"valid\",\n\n kernel_initializer=\"he_normal\",\n\n kernel_regularizer=l2(0.0001))(input)\n\n\n\n return add([shortcut, residual])\n\n\n\n\n\ndef _residual_block(block_function, filters, repetitions, is_first_layer=False):\n\n \"\"\"Builds a residual block with repeating bottleneck blocks.\n\n \"\"\"\n\n def f(input):\n\n for i in range(repetitions):\n\n init_strides = (1, 1)\n\n if i == 0 and not is_first_layer:\n\n init_strides = (2, 2)\n\n input = block_function(filters=filters, init_strides=init_strides,\n\n is_first_block_of_first_layer=(is_first_layer and i == 0))(input)\n\n return input\n\n\n\n return f\n\n\n\n\n\ndef basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):\n\n \"\"\"Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.\n\n Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf\n\n \"\"\"\n\n def f(input):\n\n\n\n if is_first_block_of_first_layer:\n\n # don't repeat bn->relu since we just did bn->relu->maxpool\n\n conv1 = Conv2D(filters=filters, kernel_size=(3, 3),\n\n strides=init_strides,\n\n padding=\"same\",\n\n kernel_initializer=\"he_normal\",\n\n kernel_regularizer=l2(1e-4))(input)\n\n else:\n\n conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),\n\n strides=init_strides)(input)\n\n\n\n residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)\n\n return _shortcut(input, residual)\n\n\n\n return f\n\n\n\n\n\ndef bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):\n\n \"\"\"Bottleneck architecture for > 34 layer resnet.\n\n Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf\n\n Returns:\n\n A final conv layer of filters * 4\n\n \"\"\"\n\n def f(input):\n\n\n\n if is_first_block_of_first_layer:\n\n # don't repeat bn->relu since we just did bn->relu->maxpool\n\n conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1),\n\n strides=init_strides,\n\n padding=\"same\",\n\n kernel_initializer=\"he_normal\",\n\n kernel_regularizer=l2(1e-4))(input)\n\n else:\n\n conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),\n\n strides=init_strides)(input)\n\n\n\n conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)\n\n residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)\n\n return _shortcut(input, residual)\n\n\n\n return f\n\n\n\n\n\ndef _handle_dim_ordering():\n\n global ROW_AXIS\n\n global COL_AXIS\n\n global CHANNEL_AXIS\n\n if K.image_dim_ordering() == 'tf':\n\n ROW_AXIS = 1\n\n COL_AXIS = 2\n\n CHANNEL_AXIS = 3\n\n else:\n\n CHANNEL_AXIS = 1\n\n ROW_AXIS = 2\n\n COL_AXIS = 3\n\n\n\n\n\ndef _get_block(identifier):\n\n if isinstance(identifier, six.string_types):\n\n res = globals().get(identifier)\n\n if not res:\n\n raise ValueError('Invalid {}'.format(identifier))\n\n return res\n\n return identifier\nclass ResnetBuilder(object):\n\n @staticmethod\n\n def build(input_shape, num_outputs, block_fn, repetitions):\n\n \"\"\"Builds a custom ResNet like architecture.\n\n Args:\n\n input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)\n\n num_outputs: The number of outputs at final softmax layer\n\n block_fn: The block function to use. This is either `basic_block` or `bottleneck`.\n\n The original paper used basic_block for layers < 50\n\n repetitions: Number of repetitions of various block units.\n\n At each block unit, the number of filters are doubled and the input size is halved\n\n Returns:\n\n The keras `Model`.\n\n \"\"\"\n\n _handle_dim_ordering()\n\n if len(input_shape) != 3:\n\n raise Exception(\"Input shape should be a tuple (nb_channels, nb_rows, nb_cols)\")\n\n\n\n # Permute dimension order if necessary\n\n if K.image_dim_ordering() == 'tf':\n\n input_shape = (input_shape[1], input_shape[2], input_shape[0])\n\n\n\n # Load function from str if needed.\n\n block_fn = _get_block(block_fn)\n\n\n\n input = Input(shape=input_shape)\n\n conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)\n\n pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding=\"same\")(conv1)\n\n\n\n block = pool1\n\n filters = 64\n\n for i, r in enumerate(repetitions):\n\n block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)\n\n filters *= 2\n\n\n\n # Last activation\n\n block = _bn_relu(block)\n\n\n\n # Classifier block\n\n block_shape = K.int_shape(block)\n\n pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]),\n\n strides=(1, 1))(block)\n\n flatten1 = Flatten()(pool2)\n\n dense = Dense(units=num_outputs, kernel_initializer=\"he_normal\",\n\n activation=\"softmax\")(flatten1)\n\n\n\n model = Model(inputs=input, outputs=dense)\n\n return model\n\n\n\n @staticmethod\n\n def build_resnet_18(input_shape, num_outputs):\n\n return ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2])\n\n\n\n @staticmethod\n\n def build_resnet_34(input_shape, num_outputs):\n\n return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3])\n\n\n\n @staticmethod\n\n def build_resnet_50(input_shape, num_outputs):\n\n return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3])\n\n\n\n @staticmethod\n\n def build_resnet_101(input_shape, num_outputs):\n\n return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3])\n\n\n\n @staticmethod\n\n def build_resnet_152(input_shape, num_outputs):\n\n return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])\nresnet = ResnetBuilder.build_resnet_18([4,64,64], 17)\n\nresnet.compile(loss='binary_crossentropy', # We NEED binary here, since categorical_crossentropy l1 norms the output before calculating loss.\n\n optimizer='adam',\n\n metrics=['accuracy'])\nsplit = 35000\n\nx_train, x_valid, y_train, y_valid = x_train[:split], x_train[split:], y_train[:split], y_train[split:]\n\nresnet.fit(x_train, y_train,\n\n batch_size=248,\n\n epochs=1,\n\n verbose=1,\n\n validation_data=(x_valid, y_valid))\n\n \n\nfrom sklearn.metrics import fbeta_score\n\n\n\np_valid = resnet.predict(x_valid, batch_size=128)\n\nprint(y_valid)\n\nprint(p_valid)\n\nprint(fbeta_score(y_valid, np.array(p_valid) > 0.2, beta=2, average='samples'))\n\nx_test = []\n\nfor idx, tags in (train_labels.values[20000:21000,:,:,1]):\n\n img_jpg = cv2.imread('../input/train-jpg/{}.jpg'.format(idx))\n\n x_test.append(cv2.resize(img_jpg, (64, 64)))\n\nx_test = np.array(x_test)\n\n\ny_test = np.array(encoded_labels.primary[20000:21000])\nx_train_jpg = np.array(x_train_jpg)\n\nx_test_jpg = np.array(x_test_jpg)\n\n\n\ny_primary_train = np.array(encoded_labels.primary[:10000])\n\ny_primary_test = np.array(encoded_labels.primary[10000:20000])\n\n\n\n#y_primary = encoded_labels.primary[:10000]\n\nplt.hist(y_primary)\ndata = np.fromfile('../input/train-jpg/',\n\ndtype=np.float32)\n\ndata.shape\n\n(60940800,)\n\ndata.reshape((50,1104,104))\nimport mxnet as mx\n\nbatch_size = 100\n\ntrain_iter = mx.io.NDArrayIter(x_train_jpg, y_primary_train, batch_size, shuffle=True)\n\nval_iter = mx.io.NDArrayIter(x_test_jpg, y_primary_test, batch_size)\ndata = mx.sym.var('data')\n\n# first conv layer\n\nconv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20)\n\ntanh1 = mx.sym.Activation(data=conv1, act_type=\"tanh\")\n\npool1 = mx.sym.Pooling(data=tanh1, pool_type=\"max\", kernel=(2,2), stride=(2,2))\n\n# second conv layer\n\nconv2 = mx.sym.Convolution(data=pool1, kernel=(5,5), num_filter=50)\n\ntanh2 = mx.sym.Activation(data=conv2, act_type=\"tanh\")\n\npool2 = mx.sym.Pooling(data=tanh2, pool_type=\"max\", kernel=(2,2), stride=(2,2))\n\n# first fullc layer\n\nflatten = mx.sym.flatten(data=pool2)\n\nfc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)\n\ntanh3 = mx.sym.Activation(data=fc1, act_type=\"tanh\")\n\n# second fullc\n\nfc2 = mx.sym.FullyConnected(data=tanh3, num_hidden=1)\n\n# softmax loss\n\nlenet = mx.sym.SoftmaxOutput(data=fc2, name='softmax')\na = train_iter.next()\na.data\ndata = mx.sym.var('data')\n\n# Flatten the data from 4-D shape into 2-D (batch_size, num_channel*width*height)\n\ndata = mx.sym.flatten(data=data)\n\n# The first fully-connected layer and the corresponding activation function\n\nfc1 = mx.sym.FullyConnected(data=data, num_hidden=128)\n\nact1 = mx.sym.Activation(data=fc1, act_type=\"relu\")\n\n\n\n# The second fully-connected layer and the corresponding activation function\n\nfc2 = mx.sym.FullyConnected(data=act1, num_hidden = 64)\n\nact2 = mx.sym.Activation(data=fc2, act_type=\"relu\")\n\n\n\n# The second fully-connected layer and the corresponding activation function\n\nfc3 = mx.sym.FullyConnected(data=act1, num_hidden = 64)\n\nact3 = mx.sym.Activation(data=fc3, act_type=\"relu\")\n\n\n\nfc4 = mx.sym.FullyConnected(data=act3, num_hidden=1)\n\n# Softmax with cross entropy loss\n\nmlp = mx.sym.SoftmaxOutput(data=fc3, name='softmax')\nimport logging\n\nlogging.getLogger().setLevel(logging.DEBUG) # logging to stdout\n\n# create a trainable module on CPU\n\nmlp_model = mx.mod.Module(symbol=lenet, context=mx.cpu())\n\nmlp_model.fit(train_iter, # train data\n\n eval_data=val_iter, # validation data\n\n optimizer='sgd', # use SGD to train\n\n optimizer_params={'learning_rate':0.1}, # use fixed learning rate\n\n eval_metric='acc', # report accuracy during training\n\n batch_end_callback = mx.callback.Speedometer(batch_size, 100), # output progress for each 100 data batches\n\n num_epoch=4) # train for at most 10 dataset passes\ny_test.shape\ntest_iter = mx.io.NDArrayIter(x_test, y_test, batch_size)\n\n# predict accuracy of mlp\n\nacc = mx.metric.Accuracy()\n\nmlp_model.score(test_iter, acc)\n\nprint(acc)\nx_train_tif[0].shape\nimg_jpg = cv2.imread('../input/train-jpg/train_1000.jpg')\nimg_jpg.shape\nimg_tif = cv2.imread('../input/train-tif-v2/train_1000.tif', cv2.IMREAD_UNCHANGED)\nimg.view()[0].shape\nimg_tif_to_bgr = cv2.cvtColor(img_jpg, cv2.COLOR_RGB2BGR)\n\nplt.imshow(img_tif[:,:,3])\n\nplt.axis('off')\n\nimg_bgr = cv2.cvtColor(img_jpg, cv2.COLOR_RGB2BGR)\n\nplt.imshow(img_bgr)\n\nplt.axis('off')\nplt.imshow(img_jpg)\n\nplt.axis('off')\nimg_blur = cv2.GaussianBlur(img_bgr,(5,5),0)\n\n#ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\ntitles = ['img_jpg','img_tif','img_bgr','img_blur']#,'TOZERO','TOZERO_INV']\n\nimages = [img_jpg,img_tif[:,:,3],img_bgr,img_blur]\n\n\n\nfor i in range(len(images)):\n\n plt.figure(figsize=(20,10))\n\n plt.subplot(2,3,i+1),plt.imshow(images[i])\n\n plt.title(titles[i])\n\n plt.axis('off')\n\n #plt.xticks([]),plt.yticks([])\n\n\n\nplt.show()","repo_name":"aorursy/new-nb-8","sub_path":"yochanan_notebookc00f44a4b3.py","file_name":"yochanan_notebookc00f44a4b3.py","file_ext":"py","file_size_in_byte":17483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33664754245","text":"import logging\n\n\nimport kivy\n\nfrom kivy.app import App\n\nfrom kivy.clock import Clock\n\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.behaviors import DragBehavior\nfrom kivy.metrics import sp\nfrom kivy.properties import ObjectProperty\n\nimport sys\nimport os\nimport threading\nfrom glob import glob\nfrom queue import Queue\n\nimport sync_dl.config as cfg\n\nfrom sync_dl.helpers import getLocalSongs\nclass LabelPlate(Button):\n pass\n\nclass ConsoleHandler(logging.Handler):\n\n def __init__(self, console, level=logging.NOTSET):\n logging.Handler.__init__(self, level=level)\n self.console = console\n\n def emit(self, record):\n Clock.schedule_once(lambda x:self.console.append(self.format(record)))\n\nclass Console(TextInput):\n\n scrollView = ObjectProperty(None)\n \n def __init__(self, **kwargs):\n super(Console, self).__init__(**kwargs)\n self.keyboard_mode= 'managed'\n\n self.use_bubble=False\n cfg.logger.addHandler(ConsoleHandler(self))\n\n def on_focus(self,instance, value):\n self.focus=False\n \n def on_double_tap(self):\n self.focus=False\n \n def on_triple_tap(self):\n self.focus=False\n \n def on_quad_touch(self):\n self.focus=False\n def append(self,text):\n self.cursor=(0,sys.maxsize)\n self.readonly=False\n self.insert_text(f' ~ {text}\\n')\n self.readonly=True\n self.scrollView.scroll_y = 0\n\n\nclass PlaylistList(GridLayout):\n def __init__(self, **kwargs):\n super(PlaylistList, self).__init__(**kwargs)\n\n self.updateList()\n\n def updateList(self):\n '''\n Populates list of existing playlists as buttons\n '''\n self.clear_widgets()\n playlists = os.listdir(cfg.musicDir)\n\n for playlist in playlists:\n if glob(f\"{cfg.musicDir}/{playlist}/{cfg.metaDataName}*\"):\n button = Button(\n text=playlist,\n on_press = self.playlistClicked\n )\n self.add_widget(button)\n \n def playlistClicked(self,button):\n manager = App.get_running_app().root.manager\n\n existingPlScreen = manager.get_screen('existingPlScreen')\n existingPlScreen.plName = button.text\n manager.current ='existingPlScreen'\n manager.transition.direction = \"left\"\n\nclass SongList(GridLayout):\n def __init__(self, **kwargs):\n super(SongList, self).__init__(**kwargs)\n\n def getOrder(self):\n '''\n gets newOrder to be passed to editPlaylist function in sync_dl, format is (None,newIndex)\n None is a placeholder, if the song had to be downloaded this would be where the id would go\n '''\n newOrder=[]\n\n for i in reversed(range(len(self.children))): # we iterate in reverse because grid layout behaves\n # like a stack, 0 index is last\n newOrder.append((None,self.children[i].initalIndex))\n\n return newOrder\n\n def updateSongs(self,plPath):\n self.clear_widgets()\n localSongs = getLocalSongs(plPath)\n\n for i,song in enumerate(localSongs):\n self.add_widget(DragLabel(self,i,text=song))\n \n\n\nclass DragLabel(DragBehavior, Label):\n\n def __init__(self,grid,initalIndex, **kwargs):\n super(DragLabel, self).__init__(**kwargs)\n self.moving = False\n self.grid = grid\n self.initalIndex = initalIndex\n\n self.size_hint_y = None\n self.dragFontSize = 1.4*self.font_size\n self.height = 2*self.font_size\n self.initalFontSize = self.font_size\n\n\n\n def findNearestSlot(self):\n for i,element in enumerate(self.grid.children):\n if element.y > self.y:\n return i\n\n return len(self.grid.children)\n\n def on_touch_up(self,touch):\n super().on_touch_up(touch)\n if self.moving and self.collide_point(touch.x,touch.y):\n self.grid.remove_widget(self)\n index = self.findNearestSlot()\n self.grid.add_widget(self,index)\n self.font_size=self.initalFontSize\n\n\n def on_touch_down(self,touch):\n super().on_touch_down(touch)\n\n if self.collide_point(touch.x,touch.y):\n \n self.font_size=self.dragFontSize\n \n self.moving = True\n\n\nclass CustomTextInput(TextInput):\n def __init__(self,**kwargs):\n super(CustomTextInput, self).__init__(**kwargs)\n\n self.use_bubble = True\n\n def _hide_cut_copy_paste(self, win=None):\n bubble = self._bubble\n\n if not bubble:\n return\n\n\n def on_touch_down(self,touch):\n super().on_touch_down(touch)\n bubble = self._bubble\n\n if not bubble:\n return\n self._bubble.hide()","repo_name":"PrinceOfPuppers/sync-dl-gui","sub_path":"sync_dl_gui/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8116627188","text":"from collections import defaultdict\nn,c = map(int,input().split())\nfirst = defaultdict(int)\nsecond = defaultdict(int)\n\nfor i in range(n):\n if i%2 ==0:\n first[int(input())]+=1\n else:\n second[int(input())]+=1\n\nfirst_rank =[]\nsecond_rank = []\nfor key,val in first.items():\n first_rank.append((val,key))\nfor key,val in second.items():\n second_rank.append((val,key)) \n\nfirst_rank.sort(reverse=True)\nsecond_rank.sort(reverse=True)\n\nf_val,f_key = first_rank[0]\ns_val,s_key = second_rank[0]\nf_use = 0\ns_use = 0\nif f_key ==s_key:\n if f_val>s_val:\n f_use = f_val\n if len(second_rank)>1:\n s_val,s_key = second_rank[1]\n s_use = s_val\n else:\n s_use = 0\n else:\n s_use = s_val\n if len(first_rank)>1:\n f_val,f_key = first_rank[1]\n f_use = f_val\n else:\n f_use = 0\nelse:\n f_use = f_val\n s_use = s_val\nif n%2 ==0:\n print(((n//2-f_use)+(n//2-s_use))*c)\nelse:\n print(((n//2-f_use+1)+(n//2-s_use))*c)","repo_name":"masaya722/msya3","sub_path":"ProgramingContest/AtCoderRegularContest/020/shimashima.py","file_name":"shimashima.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30672487012","text":"from __future__ import print_function, unicode_literals\n\nimport frappe\nfrom frappe.custom.doctype.property_setter.property_setter import make_property_setter\n\ndoctype_series_map = {\n\t'Attendance': 'ATT-',\n\t'C-Form': 'C-FORM-',\n\t'Customer': 'CUST-',\n\t'Warranty Claim': 'CI-',\n\t'Delivery Note': 'DN-',\n\t'Installation Note': 'IN-',\n\t'Item': 'ITEM-',\n\t'Journal Entry': 'JV-',\n\t'Lead': 'LEAD-',\n\t'Opportunity': 'OPTY-',\n\t'Packing Slip': 'PS-',\n\t'Production Order': 'PRO-',\n\t'Purchase Invoice': 'PINV-',\n\t'Purchase Order': 'PO-',\n\t'Purchase Receipt': 'PREC-',\n\t'Quality Inspection': 'QI-',\n\t'Quotation': 'QTN-',\n\t'Sales Invoice': 'SINV-',\n\t'Sales Order': 'SO-',\n\t'Stock Entry': 'STE-',\n\t'Supplier': 'SUPP-',\n\t'Supplier Quotation': 'SQTN-',\n\t'Issue': 'SUP-'\n}\n\ndef execute():\n\tseries_to_set = get_series_to_set()\n\tfor doctype, opts in series_to_set.items():\n\t\tset_series(doctype, opts[\"options\"], opts[\"default\"])\n\ndef set_series(doctype, options, default):\n\tmake_property_setter(doctype, \"naming_series\", \"options\", options, \"Text\")\n\tmake_property_setter(doctype, \"naming_series\", \"default\", default, \"Text\")\n\ndef get_series_to_set():\n\tseries_to_set = {}\n\n\tfor doctype, new_series in doctype_series_map.items():\n\t\t# you can't fix what does not exist :)\n\t\tif not frappe.db.a_row_exists(doctype):\n\t\t\tcontinue\n\n\t\tseries_to_preserve = get_series_to_preserve(doctype, new_series)\n\n\t\tif not series_to_preserve:\n\t\t\tcontinue\n\n\t\tdefault_series = get_default_series(doctype, new_series)\n\t\tif not default_series:\n\t\t\tcontinue\n\n\t\texisting_series = (frappe.get_meta(doctype).get_field(\"naming_series\").options or \"\").split(\"\\n\")\n\t\texisting_series = filter(None, [d.strip() for d in existing_series])\n\n\t\tif (not (set(existing_series).difference(series_to_preserve) or set(series_to_preserve).difference(existing_series))\n\t\t\tand len(series_to_preserve)==len(existing_series)):\n\t\t\t# print \"No change for\", doctype, \":\", existing_series, \"=\", series_to_preserve\n\t\t\tcontinue\n\n\t\t# set naming series property setter\n\t\tseries_to_preserve = list(set(series_to_preserve + existing_series))\n\t\tif new_series in series_to_preserve:\n\t\t\tseries_to_preserve.remove(new_series)\n\n\t\tif series_to_preserve:\n\t\t\tseries_to_set[doctype] = {\"options\": \"\\n\".join(series_to_preserve), \"default\": default_series}\n\n\treturn series_to_set\n\ndef get_series_to_preserve(doctype, new_series):\n\tseries_to_preserve = frappe.db.sql_list(\"\"\"select distinct naming_series from `tab{doctype}`\n\t\twhere ifnull(naming_series, '') not in ('', %s)\"\"\".format(doctype=doctype), new_series)\n\n\tseries_to_preserve.sort()\n\n\treturn series_to_preserve\n\ndef get_default_series(doctype, new_series):\n\tdefault_series = frappe.db.sql(\"\"\"select naming_series from `tab{doctype}` where ifnull(naming_series, '') not in ('', %s)\n\t\tand creation=(select max(creation) from `tab{doctype}`\n\t\t\twhere ifnull(naming_series, '') not in ('', %s)) order by creation desc limit 1\"\"\".format(doctype=doctype),\n\t\t(new_series, new_series))\n\n\tif not (default_series and default_series[0][0]):\n\t\tprint(\"[Skipping] Cannot guess which naming series to use for\", doctype)\n\t\treturn\n\n\treturn default_series[0][0]\n","repo_name":"shrikant9867/Dairy_Project_erpnext","sub_path":"erpnext/patches/v4_0/set_naming_series_property_setter.py","file_name":"set_naming_series_property_setter.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"19360582400","text":"import json\nimport hashlib\nimport boto3\nimport base64\n\nSQS_QUEUE_NAME = 'NewUserEncodingQueue'\nUSER_DATA_TABLE_NAME = 'user-data'\nsalt = 'sb4539,ya2467,sg4021'\n\ndef lambda_handler(event, context):\n print(\"Event: {}\".format(event))\n \n \"\"\"\n Initialize the resources that will be needed in this function\n \"\"\"\n client = boto3.resource('dynamodb')\n table = client.Table(USER_DATA_TABLE_NAME)\n sqs = boto3.client('sqs')\n \n # Decode the event body since it is base64 encoded\n event = json.loads(base64.b64decode(event[\"body\"]))\n \n \"\"\"\n Check whether the user already exist in our database\n \"\"\"\n checkIfUserAlreadyRegistered = table.get_item(Key={'userId':event[\"personalInformation\"][\"email\"]})\n if \"Item\" in checkIfUserAlreadyRegistered:\n return {\n 'isBase64Encoded': False,\n 'statusCode': 409,\n 'headers': {\n 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,Accept',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,PUT,GET'\n },\n 'body':'User already registered'\n }\n \n \"\"\"\n Prepare the raw data\n \"\"\"\n userPassword = str(hashlib.md5((event[\"personalInformation\"]['password']+salt).encode()).hexdigest())\n event['password'] = userPassword\n event['encoding'] = ''\n event['userId'] = event[\"personalInformation\"][\"email\"]\n del event[\"personalInformation\"][\"password\"]\n \n \"\"\"\n Insert into DynamoDB table\n \"\"\"\n try:\n response = table.put_item(Item=event)\n print(\"Table Response: {}\".format(response))\n except Exception as e:\n print(\"Table Error: {}\".format(e))\n return {\n 'isBase64Encoded': False,\n 'statusCode': 500,\n 'headers': {\n 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,Accept',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,PUT,GET'\n },\n 'body':'Encountered an error while adding user to the table. Error: {}'.format(str(e))\n }\n \n \"\"\"\n Add the new registered user to the SQS encoding queue to create encodings\n \"\"\"\n try:\n url = sqs.get_queue_url(QueueName=SQS_QUEUE_NAME)['QueueUrl']\n response = sqs.send_message(QueueUrl=url, MessageBody=json.dumps(event))\n print(\"SQS Response: {}\".format(response))\n except Exception as e:\n table.delete_item(Key={'userId': event['userId']})\n print(\"SQS Error: {}\".format(e))\n return {\n 'isBase64Encoded': False,\n 'statusCode': 500,\n 'headers': {\n 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,Accept',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,PUT,GET'\n },\n 'body':'Encountered an error while adding user to encoding queue. Error: {}'.format(str(e))\n }\n \n return {\n 'isBase64Encoded': False,\n 'statusCode': 200,\n 'headers': {\n 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,Accept',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,PUT,GET'\n },\n 'body':'User registered'\n }","repo_name":"reficul31/referral-connect-backend","sub_path":"register-lambda/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32809371898","text":"\"\"\"\nAllows subs to charge and fire (stunning) weapons.\n\"\"\"\n\nfrom ALTANTIS.subs.state import get_sub_objects\nfrom ALTANTIS.npcs.npc import get_npc_objects\nfrom ALTANTIS.utils.direction import diagonal_distance\nfrom ALTANTIS.utils.text import list_to_and_separated\nfrom ALTANTIS.utils.entity import Entity\nfrom ALTANTIS.world.world import in_world\nfrom ..sub import Submarine\n\nimport math\nfrom random import shuffle\nfrom typing import Tuple, Dict, List\n\nclass Weaponry():\n def __init__(self, sub : Submarine):\n self.sub = sub\n self.weapons_charge = 1\n self.range = 4\n self.planned_shots : List[Tuple[bool, int, int]] = []\n \n def prepare_shot(self, damaging : bool, x : int, y : int) -> str:\n if not in_world(x, y):\n return \"Coordinate outside of world.\"\n if diagonal_distance(self.sub.movement.get_position(), (x,y)) > self.range:\n return \"Coordinate outside of range.\"\n if damaging and self.weapons_charge >= 2:\n self.planned_shots.append((True, x, y))\n self.weapons_charge -= 2\n return f\"Damaging shot fired at ({x}, {y})!\"\n if (not damaging) and self.weapons_charge >= 1:\n self.planned_shots.append((False, x, y))\n self.weapons_charge -= 1\n return f\"Non-damaging shot fired at ({x}, {y})!\"\n return \"Not enough charge to use that.\"\n \n def weaponry_tick(self) -> str:\n # Do the hits for the current turn:\n results = \"\"\n for shot in self.planned_shots:\n (damaging, x, y) = shot\n hits = {}\n if damaging:\n hits = self.damaging(x, y)\n else:\n hits = self.nondamaging(x, y)\n direct_hits = list_to_and_separated(list(map(lambda entity: entity.name(), hits[\"direct\"])))\n if direct_hits == \"\":\n direct_hits = \"nobody\"\n indirect_hits = list_to_and_separated(list(map(lambda entity: entity.name(), hits[\"indirect\"])))\n if indirect_hits == \"\":\n indirect_hits = \"nobody\"\n damaging_str = \"damaging\" if damaging else \"non-damaging\"\n results += f\"Shot {damaging_str} shot at ({x}, {y}) - directly hit {direct_hits}; indirectly hit {indirect_hits}.\\n\"\n self.planned_shots = []\n\n # Then recharge.\n weapons_power = self.sub.power.get_power(\"weapons\")\n recharge = math.ceil(weapons_power / 2)\n old_charge = self.weapons_charge\n self.weapons_charge = min(weapons_power, old_charge + recharge)\n if old_charge != self.weapons_charge:\n return f\"{results}Recharged weapons up to {self.weapons_charge} charge!\"\n return results\n \n def hits(self, x : int, y : int) -> Dict[str, List[Entity]]:\n # Returns a list of indirect and direct hits.\n indirect : List[Entity] = []\n direct : List[Entity] = []\n for sub in get_sub_objects():\n pos = sub.movement.get_position()\n distance = diagonal_distance(pos, (x, y))\n if distance == 0:\n direct.append(sub)\n elif distance == 1:\n indirect.append(sub)\n \n for npc in get_npc_objects():\n pos = npc.get_position()\n distance = diagonal_distance(pos, (x, y))\n if distance == 0:\n direct.append(npc)\n elif distance == 1:\n indirect.append(npc)\n\n shuffle(indirect)\n shuffle(direct)\n return {\"indirect\": indirect, \"direct\": direct}\n \n def nondamaging(self, x : int, y : int) -> Dict[str, List[Entity]]:\n results = self.hits(x, y)\n for target in results[\"direct\"]:\n if target.is_weak():\n target.damage(1)\n for target in results[\"indirect\"]:\n if target.is_weak():\n target.damage(1)\n return results\n \n def damage_mod(self, entity : Entity) -> int:\n mod = 0\n if entity.is_carbon():\n if \"anticarbon\" in self.sub.upgrades.keywords:\n mod += 1\n else:\n if \"antiplastic\" in self.sub.upgrades.keywords:\n mod += 1\n return mod\n\n def damaging(self, x : int, y : int) -> Dict[str, List[Entity]]:\n results = self.hits(x, y)\n for target in results[\"indirect\"]:\n target.damage(2 + self.damage_mod(target))\n for target in results[\"direct\"]:\n target.damage(1 + self.damage_mod(target))\n return results\n \n def status(self) -> str:\n weapons_power = self.sub.power.get_power(\"weapons\")\n if weapons_power == 0:\n return \"\"\n return f\"\\nWeapons are powered with {self.weapons_charge} weapons charge(s) available (maximum {weapons_power}).\\n\"","repo_name":"finnbar/ALTANTIS","sub_path":"ALTANTIS/subs/subsystems/weapons.py","file_name":"weapons.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"36166166415","text":"# function to figure out if the list is valid\ndef listValid(currentList):\n\n # split the list up\n splitList = currentList.split()\n \n # go through each entry\n for i in splitList:\n \n # numeric?\n if not i.isnumeric():\n \n # invalid list!\n return False\n \n # default\n return True\n\n# sum calculation function\ndef sumList(providedList):\n \n # split the list into separate numbers\n splitList = providedList.split()\n \n # validation\n if len(splitList) > 1:\n \n # calculate sum based on first and last\n s = int(splitList[0]) + int(splitList[-1])\n \n # return the sum\n return s\n \n elif len(splitList) == 1:\n \n # sum is integer itself\n return int(splitList[0])\n \n else:\n \n # invalid!\n return None\n \n# grab first list\nfirstList = input(\"List 1: \")\nsecondList = input(\"List 2: \")\n\n# valid lists?\nif listValid(firstList) and listValid(secondList):\n\n # calculate sums\n firstSum = sumList(firstList)\n secondSum = sumList(secondList)\n\n # validation\n if firstSum is not None and secondSum is not None:\n\n # comparison\n largest = max(firstSum, secondSum)\n \n # print result\n print(\"Output: \" + str(largest))\n \n else:\n\n # print error\n print(\"Please enter valid lists\")\n \nelse:\n\n # print error\n print(\"Please enter valid lists\")","repo_name":"Brhsoftco/UniversityWork","sub_path":"BInfoTech/2807ICT - Programming Principles/Workshops/Week6/week6_problem2.py","file_name":"week6_problem2.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34177589229","text":"import os.path\nimport sys\nfrom Chap05_040 import Morph\nfrom Chap05_041 import Chunk, cabocha_to_chunk_list\n\nwith open(os.path.normcase(\"output/Chapter5/neko.txt.cabocha\"), \"r\", encoding='utf-8') as f:\n cabocha = [l.rstrip(\"\\n\") for l in f.readlines()]\n\nsentences = cabocha_to_chunk_list(cabocha)\n\nnode_list = []\nchunk_list = sentences[int(sys.argv[1])]\nfor chunk in chunk_list:\n dst = chunk.dst\n if dst != -1:\n str_srcs = \"\".join([m.surface for m in chunk.morphs if m.pos != \"記号\"])\n str_dst = \"\".join([m.surface for m in chunk_list[dst].morphs if m.pos != \"記号\"])\n node_list.append([str_srcs, str_dst])\n\nwith open(os.path.normcase(\"output/Chapter5/_044.dot\"), \"w\", encoding='utf-8') as f:\n f.write('digraph graphname {\\nnode [fontname=\"IPAexGothic\"];\\n')\n for nodes in node_list:\n f.write('\"{0}\" -> \"{1}\";\\n'.format(nodes[0], nodes[1]))\n f.write(\"}\\n\")\n","repo_name":"whatalnk/nlp100","sub_path":"Chap05_044.py","file_name":"Chap05_044.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"25486483","text":"import requests\nfrom googlesearch import search\n\ndef get_url(name):\n for j in search(name, tld=\"co.in\", num=1):\n res = j\n break\n return res\n\nprint(get_url(\"باما 206\"))","repo_name":"alizahedzadeh/Bama_Scrapper","sub_path":"scripts/find_by_google.py","file_name":"find_by_google.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"17847330969","text":"def Zodiac(month,day):\n n=('摩羯座','水瓶座','双鱼座','白羊座','金牛座','双子座','巨蟹座','狮子座','处女座','天秤座','天蝎座','射手座','摩羯座')\n d=(20,19,21,20,21,22,23,23,23,24,23,22)\n if day 0:\n imis_premium = Premium.objects.get(policy=imis_policy, validity_to__isnull=True)\n fhir_premium = cls.build_contract_asset_premium_extension(asset_extensions, imis_premium)\n if type(contract_term_asset.extension) is not list:\n contract_term_asset.extension = [fhir_premium]\n else:\n contract_term_asset.extension.append(fhir_premium)\n\n @classmethod\n def build_contract_asset_premium_extension(cls, asset_extensions, imis_premium):\n cls.build_premium_payer_ext(asset_extensions)\n cls.build_premium_category_ext(asset_extensions)\n cls.build_premium_amount_ext(asset_extensions, imis_premium)\n cls.build_premium_receipt_ext(asset_extensions, imis_premium)\n cls.build_premium_date_ext(asset_extensions, imis_premium)\n cls.build_premium_type_ext(asset_extensions, imis_premium)\n return asset_extensions\n\n @classmethod\n def build_premium_payer_ext(cls, asset_extensions):\n extension = Extension.construct()\n extension.url = \"payer\"\n system = f\"{GeneralConfiguration.get_system_base_url()}CodeSystem/contract-premium-payer\"\n extension.valueCodeableConcept = cls.build_codeable_concept(code=\"beneficiary\", system=system)\n if len(extension.valueCodeableConcept.coding) == 1:\n extension.valueCodeableConcept.coding[0].display = _(\"Beneficiary\")\n asset_extensions.extension = [extension]\n\n @classmethod\n def build_premium_category_ext(cls, asset_extensions):\n extension = Extension.construct()\n extension.url = \"category\"\n system = f\"{GeneralConfiguration.get_system_base_url()}CodeSystem/contract-premium-category\"\n extension.valueCodeableConcept = cls.build_codeable_concept(code=\"C\", system=system)\n if len(extension.valueCodeableConcept.coding) == 1:\n extension.valueCodeableConcept.coding[0].display = _(\"Contribution and Others\")\n asset_extensions.extension.append(extension)\n\n @classmethod\n def build_premium_amount_ext(cls, asset_extensions, imis_premium):\n # get the currency defined in configs from core module\n if hasattr(core, 'currency'):\n currency = core.currency\n else:\n currency = \"EUR\"\n\n extension = Extension.construct()\n extension.url = \"amount\"\n money = Money(**{\n \"value\": imis_premium.amount,\n \"currency\": currency\n })\n extension.valueMoney = money\n asset_extensions.extension.append(extension)\n\n @classmethod\n def build_premium_receipt_ext(cls, asset_extensions, imis_premium):\n extension = Extension.construct()\n extension.url = \"receipt\"\n extension.valueString = imis_premium.receipt\n asset_extensions.extension.append(extension)\n\n @classmethod\n def build_premium_date_ext(cls, asset_extensions, imis_premium):\n extension = Extension.construct()\n extension.url = \"date\"\n extension.valueDate = imis_premium.pay_date\n asset_extensions.extension.append(extension)\n\n @classmethod\n def build_premium_type_ext(cls, asset_extensions, imis_premium):\n extension = Extension.construct()\n extension.url = \"type\"\n system = f\"{GeneralConfiguration.get_system_base_url()}CodeSystem/contract-premium-type\"\n extension.valueCodeableConcept = cls.build_codeable_concept(code=imis_premium.pay_type, system=system)\n if len(extension.valueCodeableConcept.coding) == 1:\n extension.valueCodeableConcept.coding[0].display = PayTypeMapping.pay_type[imis_premium.pay_type]\n asset_extensions.extension.append(extension)\n\n @classmethod\n def build_contract_asset_use_period(cls, contract_asset, imis_policy):\n period_use = Period.construct()\n period = Period.construct()\n if imis_policy.start_date is not None:\n period.start = imis_policy.start_date.strftime(\"%Y-%m-%d\")\n period_use.start = period.start\n if imis_policy.effective_date is not None:\n period_use.start = imis_policy.effective_date.strftime(\"%Y-%m-%d\")\n if period_use.start is None:\n period.start = period_use.start\n if imis_policy.expiry_date is not None:\n period_use.end = imis_policy.expiry_date.strftime(\"%Y-%m-%d\")\n period.end = period_use.end\n\n if type(contract_asset.usePeriod) is not list:\n contract_asset.usePeriod = [period_use]\n else:\n contract_asset.usePeriod.append(period_use)\n if type(contract_asset.period) is not list:\n contract_asset.period = [period]\n else:\n contract_asset.period.append(period)\n return contract_asset\n\n @classmethod\n def build_contract_term_offer(cls, contract_term_offer, imis_policy, reference_type):\n offer = ContractTermOffer.construct()\n\n offer_party = ContractTermOfferParty.construct()\n offer_party.reference = [PatientConverter.build_fhir_resource_reference(imis_policy.family.head_insuree, 'Patient')]\n system = f\"{GeneralConfiguration.get_system_base_url()}CodeSystem/contract-resource-party-role\"\n offer_party.role = cls.build_codeable_concept(code=\"beneficiary\", system=system)\n if len(offer_party.role.coding) == 1:\n offer_party.role.coding[0].display = _(\"Beneficiary\")\n\n offer.party = [offer_party]\n contract_term_offer.offer = offer\n\n @classmethod\n def build_contract_status(cls, contract, imis_policy):\n if f\"{imis_policy.status}\" in ContractStatus.contract_status:\n contract.status = ContractStatus.contract_status[f\"{imis_policy.status}\"]\n else:\n contract.status = imis_policy.status\n return contract\n\n @classmethod\n def build_contract_state(cls, contract, imis_policy):\n if f\"{imis_policy.stage}\" in ContractState.contract_state:\n contract.legalState = cls.build_simple_codeable_concept(ContractState.contract_state[f\"{imis_policy.stage}\"])\n else:\n contract.legalState = cls.build_simple_codeable_concept(imis_policy.stage)\n return contract\n\n @classmethod\n def build_contract_valued_item_entity(cls, contract_asset, imis_policy):\n valued_item = ContractTermAssetValuedItem.construct()\n typeReference = cls.build_fhir_resource_reference(imis_policy.product, \"InsurancePlan\", imis_policy.product.code)\n valued_item.entityReference = typeReference\n policy_value = Money.construct()\n policy_value.value = imis_policy.value\n valued_item.net = policy_value\n if type(contract_asset.valuedItem) is not list:\n contract_asset.valuedItem = [valued_item]\n else:\n contract_asset.valuedItem.append(valued_item)\n return contract_asset\n\n @classmethod\n def build_contract_asset_type_reference(cls, contract_asset, imis_policy, reference_type):\n # type reference - take insurees covered as a policy patient\n from core import datetime\n now = datetime.datetime.now()\n\n list_insuree_policy = InsureePolicy.objects.filter(\n Q(policy=imis_policy),\n Q(validity_from__lte=now),\n Q(validity_to__isnull=True) | Q(validity_to__gte=now),\n ).only('insuree')\n\n for insuree_policy in list_insuree_policy:\n insuree = insuree_policy.insuree\n type_reference = cls.build_fhir_resource_reference(\n insuree, \"Patient\", insuree.chf_id, reference_type=reference_type\n )\n if type(contract_asset.typeReference) is not list:\n contract_asset.typeReference = [type_reference]\n else:\n contract_asset.typeReference.append(type_reference)\n\n return contract_asset\n\n @classmethod\n def build_imis_period(cls, imis_policy, fhir_contract, errors):\n for term in fhir_contract:\n if term.asset:\n for asset in term.asset:\n if asset.period:\n for period in asset.period:\n if not cls.valid_condition(period.start is None, _('Missing `period start` attribute'),errors):\n imis_policy.start_date = TimeUtils.str_to_date(period.start)\n imis_policy.enroll_date = TimeUtils.str_to_date(period.start)\n if not cls.valid_condition(period.end is None, _('Missing `period end` attribute'),errors):\n imis_policy.expiry_date = TimeUtils.str_to_date(period.end)\n else:\n cls.valid_condition(not asset.period, _('Missing `period` attribute'),errors)\n\n @classmethod\n def build_imis_useperiod(cls, imis_policy,fhir_contract,errors):\n for term in fhir_contract:\n if term.asset:\n for asset in term.asset:\n if asset.usePeriod:\n for period in asset.usePeriod:\n if not cls.valid_condition(period.start is None, _('Missing `usePeriod start` attribute'),errors):\n imis_policy.effective_date = TimeUtils.str_to_date(period.start)\n if not cls.valid_condition(period.end is None, _('Missing `usePeriod end` attribute'),errors):\n imis_policy.expiry_date = TimeUtils.str_to_date(period.end)\n else:\n cls.valid_condition(not asset.usePeriod, _('Missing `usePeriod` attribute'),errors)\n\n @classmethod\n def build_imis_status(cls, fhir_contract, imis_policy,errors):\n if fhir_contract.status:\n if fhir_contract.status == R4CoverageConfig.get_status_idle_code():\n imis_policy.status = ContractStatus.imis_map_status(R4CoverageConfig.get_status_idle_code(), imis_policy)\n elif fhir_contract.status == R4CoverageConfig.get_status_active_code():\n imis_policy.status = ContractStatus.imis_map_status(R4CoverageConfig.get_status_active_code(), imis_policy)\n elif fhir_contract.status == R4CoverageConfig.get_status_suspended_code():\n imis_policy.status = ContractStatus.imis_map_status(R4CoverageConfig.get_status_suspended_code(), imis_policy)\n elif fhir_contract.status == R4CoverageConfig.get_status_expired_code():\n imis_policy.status = ContractStatus.imis_map_status(R4CoverageConfig.get_status_expired_code(), imis_policy)\n else:\n imis_policy.status = ContractStatus.imis_map_status(R4CoverageConfig.get_status_idle_code(), imis_policy)\n else:\n cls.valid_condition(fhir_contract.status is None, _('Missing `status` attribute'),errors)\n\n @classmethod\n def build_imis_author(cls, fhir_contract, imis_policy, errors):\n if fhir_contract.author:\n reference = fhir_contract.author.reference.split(\"Practitioner/\", 2)\n imis_policy.officer = Officer.objects.get(uuid=reference[1])\n else:\n cls.valid_condition(not fhir_contract.author, _('Missing `author` attribute'), errors)\n\n @classmethod\n def build_imis_subject(cls, fhir_contract, imis_policy, errors):\n from api_fhir_r4.converters.groupConverter import GroupConverter\n if cls.valid_condition(not bool(fhir_contract.subject), _('Missing `subject` attribute'), errors):\n return\n\n ref = fhir_contract.subject[0]\n reference_type = cls.get_resource_type_from_reference(ref)\n if reference_type == 'Group':\n family = GroupConverter.get_imis_obj_by_fhir_reference(ref)\n if family is None:\n raise FHIRException(\n F\"Invalid group reference `{ref}`, no family matching \"\n F\"provided resource_id.\"\n )\n elif reference_type == 'Patient':\n patient = PatientConverter.get_imis_obj_by_fhir_reference(ref)\n family = cls._get_or_build_insuree_family(patient)\n else:\n raise FHIRException(\"Contract subject reference is neither `Group` nor `Patient`\")\n imis_policy.family = family\n\n @classmethod\n def build_imis_signer(cls, fhir_contract, imis_policy, errors):\n if fhir_contract.signer:\n for signer in fhir_contract.signer:\n if signer.type:\n if signer.type.text and signer.party.reference is not None:\n if signer.type.text == 'HeadOfFamily':\n reference = signer.party.reference.split(\"/\", 2)\n try:\n insuree = Insuree.objects.get(uuid=reference[1])\n if insuree.head:\n imis_policy.family= Family.objects.filter(head_insuree=insuree).first()\n else:\n cls.valid_condition(True, _('Missing `Member details provided belong to a depedant` attribute'),errors)\n except:\n cls.valid_condition(True, _('Missing `Family head provided does not exist` attribute'),errors)\n elif signer.type.text == 'EnrolmentOfficer':\n reference = signer.party.reference.split(\"/\", 2)\n imis_policy.officer = Officer.objects.get(uuid=reference[1])\n else:\n pass\n else:\n cls.valid_condition(signer.type is None, _('Missing `type` attribute'),errors)\n else:\n cls.valid_condition(not fhir_contract.signer, _('Missing `signer` attribute'),errors)\n\n @classmethod\n def build_imis_insurees(cls, fhir_contract, imis_policy, errors):\n if fhir_contract.term:\n insurees =[]\n for term in fhir_contract.term:\n if term.asset:\n for asset in term.asset:\n if asset.typeReference:\n for item in asset.typeReference:\n if item.reference is not None:\n reference = item.reference.split(\"Patient/\", 2)\n obj = Insuree.objects.get(uuid=reference[1])\n if imis_policy.family_id is not None:\n if obj.family == imis_policy.family:\n if type(insurees) is not list:\n insurees = [obj.uuid]\n else:\n insurees.append(obj.uuid)\n else:\n if 'Missing `Invalid Context reference` attribute' not in errors:\n cls.valid_condition(True, _('Missing `Invalid Context reference` attribute'),errors)\n imis_policy.insurees = insurees\n else:\n cls.valid_condition(not asset.context, _('Missing `context` attribute'),errors)\n else:\n cls.valid_condition(not term.asset, _('Missing `asset` attribute'),errors)\n\n else:\n cls.valid_condition(not fhir_contract, _('Missing `term` attribute'),errors)\n\n @classmethod\n def build_imis_product(cls,fhir_contract, imis_policy, errors):\n if fhir_contract.term:\n for term in fhir_contract.term:\n if term.asset:\n for asset in term.asset:\n if asset.valuedItem:\n for item in asset.valuedItem:\n if item.entityReference is not None:\n if item.entityReference.reference is not None:\n reference = item.entityReference.reference.split(\"InsurancePlan/\", 2)\n imis_policy.product = Product.objects.get(uuid=reference[1])\n if item.net is not None:\n if item.net.value is not None:\n imis_policy.value = item.net.value\n else:\n cls.valid_condition(not asset.valuedItem, _('Missing `valuedItem` attribute'), errors)\n else:\n cls.valid_condition(not term.asset, _('Missing `asset` attribute'), errors)\n\n else:\n cls.valid_condition(not fhir_contract, _('Missing `term` attribute'), errors)\n\n @classmethod\n def build_imis_state(cls,fhir_contract, imis_policy, errors):\n if fhir_contract.legalState:\n if fhir_contract.legalState.text:\n if fhir_contract.legalState.text == R4CoverageConfig.get_status_offered_code():\n imis_policy.stage = ContractState.imis_map_stage(R4CoverageConfig.get_status_offered_code(), imis_policy)\n elif fhir_contract.legalState.text == R4CoverageConfig.get_status_renewed_code():\n imis_policy.stage = ContractState.imis_map_stage(R4CoverageConfig.get_status_renewed_code(), imis_policy)\n else:\n pass\n else:\n cls.valid_condition(fhir_contract.legalState is None, _('Missing `legalState` attribute'), errors)\n\n @classmethod\n def build_imis_contributions(cls, fhir_contract, imis_policy, errors):\n premiums = []\n if fhir_contract.term:\n for term in fhir_contract.term:\n if term.asset:\n for asset in term.asset:\n if asset.extension and len(asset.extension) > 0:\n if len(asset.extension[0].extension) > 0:\n imis_contribution = Premium()\n imis_contribution.uuid = None\n contribution_extensions = asset.extension[0].extension\n for fhir_contribution in contribution_extensions:\n cls.build_imis_contribution(fhir_contribution, imis_contribution)\n premiums.append(imis_contribution)\n imis_policy.contributions = premiums\n\n @classmethod\n def build_imis_contribution(cls, fhir_contribution, imis_contribution):\n if fhir_contribution.url == \"payer\":\n cls.build_imis_contribution_payer(fhir_contribution, imis_contribution)\n if fhir_contribution.url == \"amount\":\n cls.build_imis_contribution_amount(fhir_contribution, imis_contribution)\n if fhir_contribution.url == \"receipt\":\n cls.build_imis_contribution_receipt(fhir_contribution, imis_contribution)\n if fhir_contribution.url == \"date\":\n cls.build_imis_contribution_pay_date(fhir_contribution, imis_contribution)\n if fhir_contribution.url == \"type\":\n cls.build_imis_contribution_pay_type(fhir_contribution, imis_contribution)\n\n @classmethod\n def build_imis_contribution_payer(cls, fhir_contribution, imis_contribution):\n # TODO add payer to contribution\n pass\n\n @classmethod\n def build_imis_contribution_amount(cls, fhir_contribution, imis_contribution):\n imis_contribution.amount = fhir_contribution.valueMoney.value\n\n @classmethod\n def build_imis_contribution_receipt(cls, fhir_contribution, imis_contribution):\n imis_contribution.receipt = fhir_contribution.valueString\n\n @classmethod\n def build_imis_contribution_pay_date(cls, fhir_contribution, imis_contribution):\n imis_contribution.pay_date = fhir_contribution.valueDate\n\n @classmethod\n def build_imis_contribution_pay_type(cls, fhir_contribution, imis_contribution):\n coding = fhir_contribution.valueCodeableConcept.coding\n if len(coding) > 0:\n code = coding[0]\n imis_contribution.pay_type = code.code\n\n @classmethod\n def _get_or_build_insuree_family(cls, insuree: Insuree):\n if insuree.family:\n if insuree.family.head_insuree != insuree:\n raise FHIRException(\n \"Patient subject reference is not head of the existing family.\")\n return insuree.family\n else:\n insuree.head = True\n return Family(\n location=insuree.current_village,\n head_insuree=insuree,\n address=insuree.current_address\n )\n\n","repo_name":"openimis/openimis-be-api_fhir_r4_py","sub_path":"api_fhir_r4/converters/contractConverter.py","file_name":"contractConverter.py","file_ext":"py","file_size_in_byte":26609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71328550257","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Embedding, LSTM, Conv1D, GlobalMaxPooling1D, SpatialDropout1D\n\nimport tensorflow as tf\nsession_config = tf.ConfigProto(\n log_device_placement=True,\n inter_op_parallelism_threads=0,\n intra_op_parallelism_threads=0,\n allow_soft_placement=True)\nsess = tf.Session(config=session_config)\n\n\nprint(os.listdir('../input/'))\ntrain = pd.read_csv('../input/train.tsv', sep='\\t')\n\nprint('train set: {0}'.format(train.shape))\ntrain.head(10)\ntest = pd.read_csv('../input/test.tsv', sep='\\t')\n\nprint('test set: {0}'.format(train.shape))\ntest.head(10)\nplt.figure(figsize=(10, 8))\n\nplt.hist(train['Sentiment'], 5, alpha=0.2, density=True)\n\nplt.hist(\n [train.loc[train['Phrase'].apply(lambda p: ',' in p), 'Sentiment'],\n train.loc[train['Phrase'].apply(lambda p: '.' in p), 'Sentiment'],\n train.loc[train['Phrase'].apply(lambda p: '!' in p), 'Sentiment'],\n train.loc[train['Phrase'].apply(lambda p: '?' in p), 'Sentiment']],\n 5, alpha=0.5, density=True,\n label=[',', '.', '!', '?'])\n\nplt.xlabel('sentiment')\nplt.ylabel('probability')\nplt.grid(alpha=0.25)\nplt.legend(loc='upper right')\nreplace_list = {r\"i'm\": 'i am',\n r\"'re\": ' are',\n r\"let’s\": 'let us',\n r\"'s\": ' is',\n r\"'ve\": ' have',\n r\"can't\": 'can not',\n r\"cannot\": 'can not',\n r\"shan’t\": 'shall not',\n r\"n't\": ' not',\n r\"'d\": ' would',\n r\"'ll\": ' will',\n r\"'scuse\": 'excuse',\n ',': ' ,',\n '.': ' .',\n '!': ' !',\n '?': ' ?',\n '\\s+': ' '}\n\ndef clean_text(text):\n text = text.lower()\n for s in replace_list:\n text = text.replace(s, replace_list[s])\n text = ' '.join(text.split())\n return text\n\nX_train = train['Phrase'].apply(lambda p: clean_text(p))\nphrase_len = X_train.apply(lambda p: len(p.split(' ')))\nmax_phrase_len = phrase_len.max()+10\nprint('max phrase len: {0}'.format(max_phrase_len-10)+\n '\\nuse maxlen: {0}'.format(max_phrase_len))\n\nplt.figure(figsize=(10, 8))\nplt.hist(phrase_len, alpha=0.2, density=True)\nplt.xlabel('phrase len')\nplt.ylabel('probability')\nplt.grid(alpha=0.25)\ny_train = train['Sentiment']\n\ntokenizer = Tokenizer(num_words=8192,\n filters='\"#$%&()*+-/:;<=>@[\\]^_`{|}~')\ntokenizer.fit_on_texts(X_train)\n\nX_train = tokenizer.texts_to_sequences(X_train)\nX_train = pad_sequences(X_train, maxlen=max_phrase_len)\ny_train = to_categorical(y_train.values)\n\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1)\nprint('X_train size: {0}, '.format(X_train.shape)+\n 'y_train size: {0}\\n'.format(y_train.shape)+\n 'X_val size: {0}, '.format(X_val.shape)+\n 'y_val size: {0}'.format(y_val.shape))\n\nmodel_cnn = Sequential()\nmodel_cnn.add(Embedding(8192, 256))\nmodel_cnn.add(SpatialDropout1D(0.5))\nmodel_cnn.add(Conv1D(128, 3, padding='same', activation='relu', strides=1))\nmodel_cnn.add(GlobalMaxPooling1D())\nmodel_cnn.add(Dense(256, activation='relu'))\nmodel_cnn.add(Dropout(0.5))\nmodel_cnn.add(Dense(5, activation='softmax'))\nmodel_cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel_cnn.summary()\nmodel_cnn.fit(X_train, y_train,\n validation_data=(X_val, y_val),\n epochs=8, batch_size=1024, verbose=1)\nmodel_lstm = Sequential()\nmodel_lstm.add(Embedding(8192, 256))\nmodel_lstm.add(SpatialDropout1D(0.3))\nmodel_lstm.add(LSTM(256, dropout=0.3, recurrent_dropout=0.3))\nmodel_lstm.add(Dense(256, activation='relu'))\nmodel_lstm.add(Dropout(0.3))\nmodel_lstm.add(Dense(5, activation='softmax'))\nmodel_lstm.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])\nmodel_lstm.summary()\n# for lstm batch_size should not be too large\nmodel_lstm.fit(X_train, y_train,\n validation_data=(X_val, y_val),\n epochs=8, batch_size=512, verbose=1)\nX_test = test['Phrase'].apply(lambda p: clean_text(p))\n\nprint('X_train size: {0}'.format(X_test.apply(lambda p: len(p.split(' '))).max()))\n\nX_test = tokenizer.texts_to_sequences(X_test)\nX_test = pad_sequences(X_test, maxlen=max_phrase_len)\n\nsub = pd.read_csv('../input/sampleSubmission.csv')\nsub.head()\ny_cnn = model_cnn.predict(X_test)\ny_lstm = model_lstm.predict(X_test)\n\ny = y_cnn+y_lstm\ny = np.argmax(y, axis=1)\ny[:10]\nsub = pd.read_csv('../input/sampleSubmission.csv')\nsub['Sentiment'] = y\nsub.head()\nsub.to_csv('test_Submission_3.csv', index=False)\n","repo_name":"aorursy/new-nb-8","sub_path":"zhangzhixiang_cnn-and-lstm.py","file_name":"zhangzhixiang_cnn-and-lstm.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19805032345","text":"import sys\nsys.stdin = open(\"../input.txt\", 'r')\n\n\ndef safe(y, x): # 인덱스 검사 함수\n if 0 <= y < 8 and 0 <= x < 8:\n return True\n\n\nking_position, stone_position, move_num = map(\n lambda x: int(x) if x.isdigit() else (ord(x[0]) - 65, ord(x[1]) - 49),\n input().split())\nky, kx = king_position\nsy, sx = stone_position\nmoves = [input() for _ in range(move_num)]\narr = [list(0 for _ in range(8)) for _ in range(8)]\narr[ky][kx], arr[sy][sx] = 'K', 'S'\nd = {'R': (1, 0), 'L': (-1, 0), 'B': (0, -1), 'T': (0, 1), 'RT': (1, 1), 'LT': (-1, 1), 'RB': (1, -1), 'LB': (-1, -1)}\nfor move in moves:\n dy, dx = d[move]\n nky, nkx = ky + dy, kx + dx\n nsy, nsx = sy + dy, sx + dx\n if safe(nky, nkx):\n if not arr[nky][nkx]:\n arr[nky][nkx], arr[ky][kx] = arr[ky][kx], arr[nky][nkx]\n ky, kx = nky, nkx\n continue\n else:\n if safe(nsy, nsx):\n \"\"\"\n 이렇게 한번에 처리하면 안됨\n arr[nsy][nsx], arr[sy][sx], arr[nky][nkx], arr[ky][kx] = \n arr[sy][sx], arr[nsy][nsx], arr[ky][kx], arr[nky][nkx]\n \"\"\"\n arr[nsy][nsx], arr[sy][sx] = arr[sy][sx], arr[nsy][nsx]\n arr[nky][nkx], arr[ky][kx] = arr[ky][kx], arr[nky][nkx]\n ky, kx, sy, sx = nky, nkx, nsy, nsx\nprint(chr(ky+65), chr(kx+49), sep='')\nprint(chr(sy+65), chr(sx+49), sep='')","repo_name":"onewns/TIL","sub_path":"algorithm/Simulation/BOJ1063.py","file_name":"BOJ1063.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72328429298","text":"from restuarant.entity.config_entity import DataIngestionConfig\nimport sys,os\nfrom restuarant.exception import RestuarantException\nfrom restuarant.logger import logging\nfrom restuarant.entity.artifact_entity import DataIngestionArtifact\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom six.moves import urllib\n\nclass DataIngestion:\n\n def __init__(self,data_ingestion_config:DataIngestionConfig ):\n try:\n logging.info(f\"{'>>'*20}Data Ingestion log started.{'<<'*20} \")\n self.data_ingestion_config = data_ingestion_config\n\n except Exception as e:\n raise RestuarantException(e,sys)\n \n\n def download_data(self,) -> str:\n try:\n #extraction remote url to download dataset\n download_url = self.data_ingestion_config.dataset_download_url\n\n #folder location to download file\n csv_download_dir = self.data_ingestion_config.csv_download_dir\n \n os.makedirs(csv_download_dir,exist_ok=True)\n\n restuarant_file_name = os.path.basename(download_url)\n\n csv_file_path = os.path.join(csv_download_dir, restuarant_file_name)\n\n logging.info(f\"Downloading file from :[{download_url}] into :[{csv_file_path}]\")\n urllib.request.urlretrieve(download_url, csv_file_path)\n logging.info(f\"File :[{csv_file_path}] has been downloaded successfully.\")\n return csv_file_path\n\n except Exception as e:\n raise RestuarantException(e,sys) from e\n\n \n def split_data_as_train_test(self) -> DataIngestionArtifact:\n try:\n csv_data_dir = self.data_ingestion_config.csv_download_dir\n\n file_name = os.listdir(csv_data_dir)[0]\n\n zomato_file_path = os.path.join(csv_data_dir,file_name)\n\n\n logging.info(f\"Reading csv file: [{zomato_file_path}]\")\n zomato_dataframe = pd.read_csv(zomato_file_path)\n\n logging.info(f\"Splitting data into train and test\")\n\n # Train test split\n train_set, test_set = train_test_split(zomato_dataframe, test_size=0.2, random_state=1)\n\n train_file_path = os.path.join(self.data_ingestion_config.ingested_train_dir,\n file_name)\n\n test_file_path = os.path.join(self.data_ingestion_config.ingested_test_dir,\n file_name)\n\n if train_set is not None:\n os.makedirs(self.data_ingestion_config.ingested_train_dir,exist_ok=True)\n logging.info(f\"Exporting training datset to file: [{train_file_path}]\")\n train_set.to_csv(train_file_path,index=False)\n\n if test_set is not None:\n os.makedirs(self.data_ingestion_config.ingested_test_dir, exist_ok= True)\n logging.info(f\"Exporting test dataset to file: [{test_file_path}]\")\n test_set.to_csv(test_file_path,index=False) \n\n\n data_ingestion_artifact = DataIngestionArtifact(train_file_path=train_file_path,\n test_file_path=test_file_path,\n is_ingested=True,\n message=f\"Data ingestion completed successfully.\"\n )\n logging.info(f\"Data Ingestion artifact:[{data_ingestion_artifact}]\")\n return data_ingestion_artifact\n\n except Exception as e:\n raise RestuarantException(e,sys) from e\n\n\n def initiate_data_ingestion(self)-> DataIngestionArtifact:\n try:\n csv_file_path = self.download_data()\n return self.split_data_as_train_test()\n except Exception as e:\n raise RestuarantException(e,sys) from e\n \n\n\n def __del__(self):\n logging.info(f\"{'>>'*20}Data Ingestion log completed.{'<<'*20} \\n\\n\")\n","repo_name":"Adityashinde1/restuarant_ratings","sub_path":"restuarant/component/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72098010098","text":"# coding: utf-8\n\n\"\"\"\n App Center Client\n\n Microsoft Visual Studio App Center API # noqa: E501\n\n OpenAPI spec version: preview\n Contact: benedetto.abbenanti@gmail.com\n Project Repository: https://github.com/b3nab/appcenter-sdks\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass AlertBugTrackerRepo(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'name': 'string',\n 'url': 'string',\n 'id': 'string',\n 'description': 'string',\n 'private': 'boolean',\n 'owner': 'object'\n }\n\n attribute_map = {\n 'name': 'name',\n 'url': 'url',\n 'id': 'id',\n 'description': 'description',\n 'private': 'private',\n 'owner': 'owner'\n }\n\n def __init__(self, name=None, url=None, id=None, description=None, private=None, owner=None): # noqa: E501\n \"\"\"AlertBugTrackerRepo - a model defined in Swagger\"\"\" # noqa: E501\n self._name = None\n self._url = None\n self._id = None\n self._description = None\n self._private = None\n self._owner = None\n self.discriminator = None\n self.name = name\n self.url = url\n self.id = id\n if description is not None:\n self.description = description\n if private is not None:\n self.private = private\n if owner is not None:\n self.owner = owner\n\n @property\n def name(self):\n \"\"\"Gets the name of this AlertBugTrackerRepo. # noqa: E501\n\n\n :return: The name of this AlertBugTrackerRepo. # noqa: E501\n :rtype: string\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this AlertBugTrackerRepo.\n\n\n :param name: The name of this AlertBugTrackerRepo. # noqa: E501\n :type: string\n \"\"\"\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name\n\n @property\n def url(self):\n \"\"\"Gets the url of this AlertBugTrackerRepo. # noqa: E501\n\n\n :return: The url of this AlertBugTrackerRepo. # noqa: E501\n :rtype: string\n \"\"\"\n return self._url\n\n @url.setter\n def url(self, url):\n \"\"\"Sets the url of this AlertBugTrackerRepo.\n\n\n :param url: The url of this AlertBugTrackerRepo. # noqa: E501\n :type: string\n \"\"\"\n if url is None:\n raise ValueError(\"Invalid value for `url`, must not be `None`\") # noqa: E501\n\n self._url = url\n\n @property\n def id(self):\n \"\"\"Gets the id of this AlertBugTrackerRepo. # noqa: E501\n\n\n :return: The id of this AlertBugTrackerRepo. # noqa: E501\n :rtype: string\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this AlertBugTrackerRepo.\n\n\n :param id: The id of this AlertBugTrackerRepo. # noqa: E501\n :type: string\n \"\"\"\n if id is None:\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def description(self):\n \"\"\"Gets the description of this AlertBugTrackerRepo. # noqa: E501\n\n\n :return: The description of this AlertBugTrackerRepo. # noqa: E501\n :rtype: string\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Sets the description of this AlertBugTrackerRepo.\n\n\n :param description: The description of this AlertBugTrackerRepo. # noqa: E501\n :type: string\n \"\"\"\n\n self._description = description\n\n @property\n def private(self):\n \"\"\"Gets the private of this AlertBugTrackerRepo. # noqa: E501\n\n\n :return: The private of this AlertBugTrackerRepo. # noqa: E501\n :rtype: boolean\n \"\"\"\n return self._private\n\n @private.setter\n def private(self, private):\n \"\"\"Sets the private of this AlertBugTrackerRepo.\n\n\n :param private: The private of this AlertBugTrackerRepo. # noqa: E501\n :type: boolean\n \"\"\"\n\n self._private = private\n\n @property\n def owner(self):\n \"\"\"Gets the owner of this AlertBugTrackerRepo. # noqa: E501\n\n Repository owner object # noqa: E501\n\n :return: The owner of this AlertBugTrackerRepo. # noqa: E501\n :rtype: object\n \"\"\"\n return self._owner\n\n @owner.setter\n def owner(self, owner):\n \"\"\"Sets the owner of this AlertBugTrackerRepo.\n\n Repository owner object # noqa: E501\n\n :param owner: The owner of this AlertBugTrackerRepo. # noqa: E501\n :type: object\n \"\"\"\n\n self._owner = owner\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AlertBugTrackerRepo):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"b3nab/appcenter-sdks","sub_path":"sdks/python/appcenter_sdk/models/AlertBugTrackerRepo.py","file_name":"AlertBugTrackerRepo.py","file_ext":"py","file_size_in_byte":6501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24823272935","text":"#-*- coding:utf-8 -*-\n\nfrom base import BaseHandler\n\n\nclass EditHandler(BaseHandler):\n '''\n Edit Handler\n '''\n\n def get(self, id):\n todos = self.db.query(\"select * from todo where id=%s\", int(id))\n todo = todos[0]\n if not todo:\n return None\n return self.render(\"edit.html\", todo=todo)\n\n def post(self, id):\n todos = self.db.query(\"select * from todo where id=%s\", int(id))\n todo = todos[0]\n if not todo:\n return None\n todo_text = self.get_argument(\"todo_text\")\n self.db.execute('''update todo set \n todo_text=%s, \n post_date=now() \n where \n id=%s''', \n todo_text, \n int(id))\n self.redirect(\"/\")\n","repo_name":"JakeyChen/tornado_todo","sub_path":"handlers/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"769395289","text":"from graphics import *\r\nimport numpy as np\r\n\r\nwin = GraphWin(\"Rectangular Grid\", 700, 700)\r\nImg = Image(Point(0,0),\"doc.gif\")\r\nw=Img.getWidth()\r\nh=Img.getWidth()\r\n \r\n\r\ndef write_image():\r\n f=open('image_data.txt','w')\r\n \r\n for i in range(0,w):\r\n for j in range (0,h):\r\n color=Img.getPixel(i,j)\r\n r=str(color[0])\r\n g=str(color[1])\r\n b=str(color[2])\r\n string=\"\"\r\n string=string+r+\",\"+g+\",\"+b+\"\\n\"\r\n f.write(string)\r\n\r\n\r\ndef read_image():\r\n f=open('image_data.txt','r')\r\n line=[]\r\n line=f.readlines()\r\n l=0\r\n for i in range(0,w+1):\r\n for j in range(0,h+1):\r\n col = line[l].split(\",\")\r\n r=int(col[0])\r\n g=int(col[1])\r\n b= int(col[2].rstrip())\r\n l=l+1\r\n obj = Rectangle(Point(i,j),Point(i+1,j+1))\r\n obj.setFill(color_rgb(r,g,b))\r\n obj.setOutline(color_rgb(r,g,b))\r\n obj.draw(win)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\ndef main():\r\n \r\n win.setCoords(0.0,0.0,w,h)\r\n win.setBackground(\"white\")\r\n #draw x axes\r\n for i in range (0,w+1):\r\n lx=Line(Point(0,i),Point(w,i))\r\n lx.draw(win)\r\n lx.setFill(\"green\")\r\n lx.setWidth(0.1)\r\n #draw y axes\r\n for j in range (0,h+1):\r\n ly=Line(Point(j,0),Point(j,h))\r\n ly.draw(win)\r\n ly.setFill(\"green\")\r\n ly.setWidth(0.1)\r\n\r\n ox =Line(Point(w/2,0),Point(w/2,w))\r\n oy =Line(Point(0,h/2),Point(h,h/2))\r\n ox.draw(win)\r\n oy.draw(win)\r\n ox.setFill(\"red\")\r\n oy.setFill(\"red\")\r\n write_image()\r\n read_image()\r\n # win.close() \r\n\r\nmain()\r\n#read_image()\r\n","repo_name":"agni-c2103/GRID-IMPLEMENTATION-","sub_path":"read_image_pix_coords.py","file_name":"read_image_pix_coords.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"24498533514","text":"\"\"\"\nFunction(s) reads in monthly data from CESM2-LE for different variables \nusing # of ensemble members for all time periods\n\nNotes\n-----\n Author : Zachary Labe\n Date : 25 June 2021\n\nUsage\n-----\n [1] read_CESM2LE(directory,vari,sliceperiod,sliceshape,slicenan,numOfEns,timeper)\n\"\"\"\n\ndef read_CESM2LE(directory,vari,sliceperiod,sliceshape,slicenan,numOfEns,timeper):\n \"\"\"\n Function reads monthly data from CESM2-LE\n\n Parameters\n ----------\n directory : string\n path for data\n vari : string\n variable for analysis\n sliceperiod : string\n how to average time component of data\n sliceshape : string\n shape of output array\n slicenan : string or float\n Set missing values\n numOfEns : number of ensembles\n integer\n timeper : time period of analysis\n string\n\n Returns\n -------\n lat : 1d numpy array\n latitudes\n lon : 1d numpy array\n longitudes\n var : numpy array\n processed variable\n\n Usage\n -----\n read_CESM2LE(directory,vari,sliceperiod,sliceshape,\n slicenan,numOfEns,timeper)\n \"\"\"\n print('\\n>>>>>>>>>> STARTING read_CESM2LE function!')\n\n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n import calc_Utilities as UT\n\n ###########################################################################\n ### Parameters\n time = np.arange(1850,2100+1,1)\n mon = 12\n ens1 = np.arange(1,10+1,1)\n ens2 = np.arange(21,50+1,1)\n ens = np.append(ens1,ens2)\n\n ###########################################################################\n ### Read in data\n membersvar = []\n for i,ensmember in enumerate(ens):\n filename = directory + '%s/%s_%s_1850-2100.nc' % (vari,vari,\n ensmember)\n data = Dataset(filename,'r')\n lat1 = data.variables['latitude'][:]\n lon1 = data.variables['longitude'][:]\n var = data.variables['%s' % vari][:,:,:]\n data.close()\n\n print('Completed: read *CESM2-LE* Ensemble Member --%s--' % ensmember)\n membersvar.append(var)\n del var\n membersvar = np.asarray(membersvar)\n ensvalue = np.reshape(membersvar,(len(ens),time.shape[0],mon,\n lat1.shape[0],lon1.shape[0]))\n del membersvar\n print('Completed: read all CESM2-LE Members!\\n')\n\n ###########################################################################\n ### Slice over months (currently = [ens,yr,mn,lat,lon])\n ### Shape of output array\n if sliceperiod == 'annual':\n ensvalue = np.nanmean(ensvalue,axis=2)\n if sliceshape == 1:\n ensshape = ensvalue.ravel()\n elif sliceshape == 4:\n ensshape = ensvalue\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: ANNUAL MEAN!')\n elif sliceperiod == 'DJF':\n ensshape = np.empty((ensvalue.shape[0],ensvalue.shape[1]-1,\n lat1.shape[0],lon1.shape[0]))\n for i in range(ensvalue.shape[0]):\n ensshape[i,:,:,:] = UT.calcDecJanFeb(ensvalue[i,:,:,:,:],\n lat1,lon1,'surface',1)\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: DJF MEAN!')\n elif sliceperiod == 'MAM':\n enstime = np.nanmean(ensvalue[:,:,2:5,:,:],axis=2)\n if sliceshape == 1:\n ensshape = enstime.ravel()\n elif sliceshape == 4:\n ensshape = enstime\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: MAM MEAN!')\n elif sliceperiod == 'JJA':\n enstime = np.nanmean(ensvalue[:,:,5:8,:,:],axis=2)\n if sliceshape == 1:\n ensshape = enstime.ravel()\n elif sliceshape == 4:\n ensshape = enstime\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: JJA MEAN!')\n elif sliceperiod == 'SON':\n enstime = np.nanmean(ensvalue[:,:,8:11,:,:],axis=2)\n if sliceshape == 1:\n ensshape = enstime.ravel()\n elif sliceshape == 4:\n ensshape = enstime\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: SON MEAN!')\n elif sliceperiod == 'JFM':\n enstime = np.nanmean(ensvalue[:,:,0:3,:,:],axis=2)\n if sliceshape == 1:\n ensshape = enstime.ravel()\n elif sliceshape == 4:\n ensshape = enstime\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: JFM MEAN!')\n elif sliceperiod == 'AMJ':\n enstime = np.nanmean(ensvalue[:,:,3:6,:,:],axis=2)\n if sliceshape == 1:\n ensshape = enstime.ravel()\n elif sliceshape == 4:\n ensshape = enstime\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: AMJ MEAN!')\n elif sliceperiod == 'JAS':\n enstime = np.nanmean(ensvalue[:,:,6:9,:,:],axis=2)\n if sliceshape == 1:\n ensshape = enstime.ravel()\n elif sliceshape == 4:\n ensshape = enstime\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: JAS MEAN!')\n elif sliceperiod == 'OND':\n enstime = np.nanmean(ensvalue[:,:,9:,:,:],axis=2)\n if sliceshape == 1:\n ensshape = enstime.ravel()\n elif sliceshape == 4:\n ensshape = enstime\n print('Shape of output = ', ensshape.shape,[[ensshape.ndim]])\n print('Completed: OND MEAN!')\n elif sliceperiod == 'none':\n if sliceshape == 1:\n ensshape = ensvalue.ravel()\n elif sliceshape == 4:\n ensshape= np.reshape(ensvalue,(ensvalue.shape[0],ensvalue.shape[1]*ensvalue.shape[2],\n ensvalue.shape[3],ensvalue.shape[4]))\n elif sliceshape == 5:\n ensshape = ensvalue\n print('Shape of output =', ensshape.shape, [[ensshape.ndim]])\n print('Completed: ALL RAVELED MONTHS!')\n\n ###########################################################################\n ### Change missing values\n if slicenan == 'nan':\n ensshape[np.where(np.isnan(ensshape))] = np.nan\n ensshape[np.where(ensshape < -999)] = np.nan \n print('Completed: missing values are =',slicenan)\n else:\n ensshape[np.where(np.isnan(ensshape))] = slicenan\n ensshape[np.where(ensshape < -999)] =slicenan\n\n ###########################################################################\n ### Change units\n if any([vari=='SLP',vari=='PS']):\n ensshape = ensshape/100 # Pa to hPa\n print('Completed: Changed units (Pa to hPa)!')\n elif any([vari=='T2M',vari=='SST']):\n ensshape = ensshape - 273.15 # K to C\n print('Completed: Changed units (K to C)!')\n elif any([vari=='PRECL',vari=='PRECC',vari=='PRECT']):\n ensshape = ensshape * 8.64e7 # m/s to mm/day\n ### \"Average Monthly Rate of Precipitation\"\n print('*** CURRENT UNITS ---> [[ mm/day ]]! ***')\n \n ###########################################################################\n ### Select years of analysis (1850-2100)\n if timeper == 'all':\n print('ALL SIMULATION YEARS')\n print(time)\n histmodel = ensshape\n elif timeper == 'historical':\n yearhistq = np.where((time >= 1950) & (time <= 2019))[0]\n print('HISTORICAL YEARS')\n print(time[yearhistq])\n histmodel = ensshape[:,yearhistq,:,:]\n elif timeper == 'future':\n yearhistq = np.where((time >= 2020) & (time <= 2099))[0]\n print('FUTURE YEARS')\n print(time[yearhistq])\n histmodel = ensshape[:,yearhistq,:,:]\n\n print('Shape of output FINAL = ', histmodel.shape,[[histmodel.ndim]])\n print('>>>>>>>>>> ENDING read_CESM2LE function!') \n return lat1,lon1,histmodel \n\n# ### Test functions - do not use!\n# import numpy as np\n# import matplotlib.pyplot as plt\n# import calc_Utilities as UT\n# directory = '/Users/zlabe/Data/CESM2-LE/monthly/'\n# vari = 'SST'\n# sliceperiod = 'annual'\n# sliceshape = 4\n# slicenan = 'nan'\n# numOfEns = 40\n# timeper = 'all'\n# lat,lon,var = read_CESM2LE(directory,vari,sliceperiod,sliceshape,slicenan,numOfEns,timeper)\n\n# lon2,lat2 = np.meshgrid(lon,lat)\n# ave = UT.calc_weightedAve(var,lat2)\n","repo_name":"zmlabe/ModelBiasesANN","sub_path":"Scripts/read_CESM2LE.py","file_name":"read_CESM2LE.py","file_ext":"py","file_size_in_byte":8437,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"57"} +{"seq_id":"6359648311","text":"#-------------------------------------------------------------------------------\n# Author: Craig Kimball \n# Assignment 4\n# Date: 10/7/2021\n#-------------------------------------------------------------------------------\n# Honor Code Statement: I received no assistance on this assignment that\n# violates the ethical guidelines set forth by professor and class syllabus.\n#-------------------------------------------------------------------------------\ndef swap_text(text):\n rtn = ''\n #for loop that goes through each pair, only goes to even index, so I must check if its a single character\n for i in range(0,len(text),2):\n #checks if there is a character after the current\n if i < len(text)-1:\n rtn += text[i+1]+text[i]\n #if no character (last character) then just add current\n else:\n rtn += text[i]\n \n return rtn\n\ndef which_day(numbers):\n rtn = ''\n sum1 = 0\n sum2 = 0\n #loops through every number by index\n for i in range(len(numbers)):\n #if the index is even add to sum1\n if i%2 == 0:\n sum1+=numbers[i]\n #if the index is not even (odd) add to sum2\n else:\n sum2+=numbers[i]\n rtn = 'Monday' if abs(sum1-sum2)%7 == 1 else 'Tuesday' if abs(sum1-sum2)%7 == 2 else 'Wednesday' if abs(sum1-sum2)%7 == 3 else 'Thursday' if abs(sum1-sum2)%7 == 4 else 'Friday' if abs(sum1-sum2)%7 == 5 else 'Saturday' if abs(sum1-sum2)%7 == 6 else 'Sunday'\n \n return rtn\n\n\ndef delete_duplicates(items):\n #since i cannot remove or delete items from the list I must start with a new one\n rtn = []\n \n #when in doubt use a flag\n #I used a flag here because it allows me to make an action after checking every element of the new list \n is_duplicate = False\n \n #loops through every element in original list\n for i in range(len(items)):\n is_duplicate = False\n \n #since I cant use in I am checking if the current item is in the new list already by checking against every item already in it.\n #essential this is my own 'in' function\n for j in range(len(rtn)):\n if items[i] == rtn[j]:\n #updates flag state\n is_duplicate = True\n \n #logic statement says if its not in the new list add it \n if(not(is_duplicate)):\n rtn.append(items[i]) \n\n return rtn\n\ndef final_guests(draft_guests, new_guest):\n rtn = []\n \n alphabet = ['A','B','C','D','E','F','G','H','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n current_name_index = 0\n new_name_index = 99\n #runs this section of code if and only if the list and new guest are populated\n if len(draft_guests) > 0 and len(new_guest) >0 and draft_guests[0] != '':\n \n #this function checks the alphabetical idex of the new guest name\n for i in range(len(alphabet)):\n if new_guest[0] == alphabet[i]:\n new_name_index = i\n #this loop go through ever guest of the draft list\n for guest in draft_guests:\n \n #gets the alphabetical index of the current guest\n for i in range(len(alphabet)):\n \n if guest[0] == alphabet[i]:\n current_name_index = i\n #compares indexes to confirm order and adds the current guest and new guest in their respective places \n if new_name_index < current_name_index and new_name_index >= 0:\n rtn.append(new_guest)\n #this statement makes sure the new guest cant be added multiple times\n new_name_index = -1\n rtn.append(guest)\n else:\n rtn.append(guest)\n \n #confirms that the name was added somewhere, if not adds it to the end\n \n if(26 > new_name_index > 0):\n rtn.append(new_guest)\n \n #checks for the possibility that the new guest is an empty string and adds it to the front\n if(len(draft_guests) > 0 and len(new_guest) <= 0):\n rtn = [new_guest]\n for guest in draft_guests:\n rtn.append(guest) \n \n #checks for the possibility that the guest list is '' and the new guest hasnt been added\n #has to check to see if list is populated to avoid index errors\n if(len(draft_guests) >0):\n if(draft_guests[0] == ''):\n for guest in draft_guests:\n rtn.append(guest) \n rtn.append(new_guest)\n #checks to see if the list remains empty, and adds the new guest to the list \n if(len(rtn) == 0):\n rtn.append(new_guest)\n return rtn\n\n","repo_name":"Cimball1334/OriamPortfolio","sub_path":"GMUCS/CS112/pa4/ckimbal4_233_P4.py","file_name":"ckimbal4_233_P4.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43712262080","text":"from config import c1, c2\nfrom itertools import product\nfrom string import ascii_letters\nchset = ascii_letters + \" \"\n\n\ndef observe(chset, m, n):\n for c in product(list(chset), repeat=n):\n cnt = 0\n temp = bxor(m[:n], bytes(\"\".join(c).encode()))\n for p in temp:\n if chr(p) in chset:\n cnt += 1\n if cnt == n:\n print(temp)\n # print(cnt)\n\n\ndef bxor(b1, b2):\n parts = []\n for b1, b2 in zip(b1, b2):\n parts.append(bytes([b1 ^ b2]))\n return b\"\".join(parts)\n\n\n[c1, c2] = [bytes.fromhex(\"{:x}\".format(x)) for x in [c1, c2]]\n\nmxor = bxor(c1, c2)\n# observe(chset, m, 5)\n\nm1 = b\"this is the text\"\nm2 = bxor(mxor, m1)\nm1 = m1.decode()\nm2 = m2.decode()\n\nassert(len(m1) == len(c1))\nassert(len(m2) == len(c2))\n\nprint(\"m1 = {:s}\".format(m1))\nprint(\"m2 = {:s}\".format(m2))\n\nprint(\"\".join(sorted([m1, m2])))\n\n\n","repo_name":"pcw109550/id0-rsa.pub","sub_path":"04_AES-CTR_with_Nonce_Reuse/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"57"} +{"seq_id":"29148588631","text":"with open(\"input.txt\", \"r\") as fp:\n lines = fp.readlines()\n lines=[line.rstrip() for line in lines]\n\n# to make a dictionary of bags\nbag_types = []\nall_bags = {}\nfor line in lines:\n mbag = \" \".join(line.split(\" \")[:2])\n contains = line[line.index(\"contain \")+8:-1]\n each_contain = contains.split(\",\")\n each_contain = [cnt.lstrip() for cnt in each_contain]\n each_contain = [\" \".join(cont.split(\" \")[:-1]) for cont in each_contain]\n #print(each_contain)\n each_contain = {\" \".join(cont.split(\" \")[1:]):cont.split(\" \")[0] for cont in each_contain}\n #print(each_contain)\n if mbag not in bag_types:\n bag_types.append(mbag)\n if all_bags.get(mbag):\n each_contain.update(all_bags[mbag]) \n all_bags[mbag] = each_contain\n\nmy_bag = \"shiny gold\"\nbags_contains = {}\ntest_bags=all_bags\nfor k, v in test_bags.items():\n bags_contains[k] = []\n try:\n for kk, vv in v.items():\n\n bags_contains[k]+=[kk]*int(vv)\n except:\n pass\nc=0\n\ndef count_bags(current_bag):\n if current_bag==\" \" or bags_contains.get(current_bag) is None:\n return 0\n\n #print(\"key:\", current_bag)\n cnt = len(bags_contains[current_bag])\n cnts = []\n for k in bags_contains[current_bag]:\n cnts.append(count_bags(k)) \n return sum(cnts)+cnt\n\nprint(f\"{my_bag} bag can hold {count_bags('shiny gold')} bags\")","repo_name":"pikk7/adventOfCode2020","sub_path":"7/sol2.py","file_name":"sol2.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12503293519","text":"import playsound\nimport sys\nprint('\\033[32m--DESCUBRA SEU SIGNO--\\033[m')\nprint('\\033[36m|\\033[m')\nnome = str(input('\\033[30mQual seu nome ? \\033[m')).upper()\nif nome == ' '.strip():\n print('\\033[31m=' * 30, '\\033[m')\n print('\\033[31mNOME INVÁLIDO\\033[m')\n playsound.playsound('signos.mp3') # Som Erro\n sys.exit();\nfor letter in nome:\n if letter.isdigit():\n print('\\033[31m=' * 30, '\\033[m')\n print('\\033[31mNOME INVÁLIDO\\033[m')\n playsound.playsound('signos.mp3') # Som Erro\n sys.exit();\nwhile True:\n try:\n dia = int(input('\\033[30mDigite o DIA do seu nascimento: \\033[m'))\n if dia > 31:\n print('\\033[31m=' * 30, '\\033[m')\n print('\\033[31mDIA DE NASCIMENTO INVÁLIDO\\033[m')\n playsound.playsound('signos.mp3') # Som Erro\n sys.exit();\n mes = int(input('\\033[30mDigite o MÊS do seu nascimento: \\033[m'))\n if mes > 12:\n print('\\033[31m=' * 30, '\\033[m')\n print('\\033[31mMÊS DE NASCIMENTO INVÁLIDO\\033[m')\n playsound.playsound('signos.mp3') # Som Erro\n sys.exit();\n if mes == 1:\n mes = 'JANEIRO'\n if mes == 2:\n mes = 'FEVEREIRO'\n if mes == 3:\n mes = 'MARÇO'\n if mes == 4:\n mes = 'ABRIL'\n if mes == 5:\n mes = 'MAIO'\n if mes == 6:\n mes = 'JUNHO'\n if mes == 7:\n mes = 'JULHO'\n if mes == 8:\n mes = 'AGOSTO'\n if mes == 9:\n mes = 'SETEMBRO'\n if mes == 10:\n mes = 'OUTUBRO'\n if mes == 11:\n mes = 'NOVEMBRO'\n if mes == 12:\n mes = 'DEZEMBRO'\n ano = int(input('\\033[30mDigite o ANO do seu nascimento: \\033[m'))\n print('\\033[36m|\\033[m')\n if ano < 1900: # Usuário pode ter no máximo 118 anos\n print('\\033[31m=' * 30, '\\033[m')\n playsound.playsound('signos.mp3') # Som Erro\n print('\\033[31mANO DE NASCIMENTO INVÁLIDO\\033[m')\n sys.exit();\n break\n except ValueError:\n print('\\033[31m=' * 30, '\\033[m')\n print('\\033[31mFORMATO INVÁLIDO DD/MM/AAA\\033[m')\n playsound.playsound('signos.mp3') # Som Erro\n sys.exit();\nresult = 2018 - ano # Calculo da idade do usuário\nprint('\\033[35m{} você nasceu no Dia {} De {} De {},\\033[35m'.format(nome, dia, mes, ano))\nprint('\\033[36m|\\033[m')\nprint('\\033[32m=' * 30, '\\033[m')\nif result <= 1:\n print('\\033[34mVocê tem {} Ano\\033[m'.format(result))\nelse:\n print('\\033[34mVocê tem {} Anos\\033[m'.format(result))\nif result: # Print quando obter o resuldado\n if mes == 'JANEIRO' and dia > 20 or mes == 'FEVEREIRO' and dia < 21:\n print('Seu Signo é AQUÁRIO: ')\n if mes == 'FEVEREIRO' and dia > 20 or mes == 'MARÇO' and dia < 21:\n print('Seu Signo é PEIXES: ')\n if mes == 'MARÇO' and dia > 20 or mes == 'ABRIL' and dia < 21:\n print('Seu Signo é ÁRIES: ')\n if mes == 'ABRIL' and dia > 20 or mes == 'MAIO' and dia < 21:\n print('Seu Signo é TOURO: ')\n if mes == 'MAIO' and dia > 20 or mes == 'JUNHO' and dia < 21:\n print('Seu Signo é GÊMEOS: ')\n if mes == 'JUNHO' and dia > 20 or mes == 'JULHO' and dia < 21:\n print('Seu Signo é CÂNCER: ')\n if mes == 'JULHO' and dia > 20 or mes == 'AGOSTO' and dia < 21:\n print('Seu Signo é LEÃO: ')\n if mes == 'AGOSTO' and dia > 20 or mes == 'SETEMBRO' and dia < 21:\n print('\\033[32m=' * 30, '\\033[m')\n print('\\033[30mSeu Signo é VIRGEM: \\n\\nVirgem é um signo do elemento Terra, são marcados pela lentidão e por \\nprecisarem de segurança.'\n 'Todos os signos do zodíaco ligado ao elemento possuem uma ótima relação com a comida. \\n'\n 'Assim como a terra, são firmes, estáveis e sólidos. Até mesmo nos momentos em que as pessoas mais se deixam \\nlevar pelas emoções, '\n 'Esse signo prefere apostar na sua estabilidade. Não somente em virgem mas também os outros signos do horóscopo \\nque são regidos pelo elemento terra,'\n 'São muito cuidadosos e excessivamente críticos em suas relações interpessoais, ainda mais quando se trata de amor e sexo.\\033[m')\n if mes == 'SETEMBRO' and dia > 20 or mes == 'OUTUBRO' and dia < 21:\n print('Seu Signo é LIBRA: ')\n if mes == 'OUTUBRO' and dia > 20 or mes == 'NOVEMBRO' and dia < 21:\n print('Seu Signo é ESCORPIÃO: ')\n if mes == 'NOVEMBRO' and dia > 20 or mes == 'DEZEMBRO' and dia < 21:\n print('Seu Signo é SAGITÁRIO: ')\n if mes == 'DEZEMBRO' and dia > 20 or mes == 'JANEIRO' and dia < 21:\n print('Seu Signo é CAPRICÓRNIO: ')","repo_name":"LuuanOliveira/Python","sub_path":"Signos.py","file_name":"Signos.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"814471548","text":"import os\nimport logging\nimport logging.config\n\nfrom external_version_control.external_config import getting_external_config\nfrom external_version_control.storage_factory import get_external_storage\nfrom .upload import UploadConfiguration\nfrom ..clusterObjectModel.service_config_update import ServiceConfigUpdate\n\n\nclass Synchronization:\n\n def __init__(self, **kwargs):\n\n self.logger = logging.getLogger(__name__)\n\n # Configuration for local conf\n self.local_conf_path = None\n # Configuration for configmap [Access to k8s through exist kube_config.]\n self.kube_config_path = None\n # Cluster Configuration of pai.\n self.pai_cluster_configuration_path = None\n\n # External storage configuration data\n self.external_storage_configuration = None\n\n # The config list which should be pushed into cluster.\n self.config_push_list = None\n\n if \"local_conf_path\" in kwargs and kwargs[\"local_conf_path\"] != None:\n self.local_conf_path = kwargs[\"local_conf_path\"]\n\n if \"kube_config_path\" in kwargs and kwargs[\"kube_config_path\"] != None:\n self.kube_config_path = kwargs[\"kube_config_path\"]\n\n if \"pai_cluster_configuration_path\" in kwargs and kwargs[\"pai_cluster_configuration_path\"] != None:\n self.pai_cluster_configuration_path = kwargs[\"pai_cluster_configuration_path\"]\n\n if \"config_push_list\" in kwargs and kwargs[\"config_push_list\"] != None:\n self.config_push_list = kwargs[\"config_push_list\"]\n else:\n self.config_push_list = [\n \"config.yaml\",\n \"k8s-role-definition.yaml\",\n \"kubernetes-configuration.yaml\",\n \"layout.yaml\",\n \"services-configuration.yaml\"\n ]\n \n # Check whether the config files to be uploaded exists\n self._check_if_file_exists()\n\n def _check_if_file_exists(self):\n file_list = set()\n for folder_path_type in [\"local_conf_path\", \"pai_cluster_configuration_path\"]:\n if hasattr(self, folder_path_type):\n folder_path = getattr(self, folder_path_type)\n if folder_path != None and os.path.isdir(folder_path):\n file_list |= set(os.listdir(folder_path))\n missing_files = set(self.config_push_list) - set(file_list)\n for missing_file in missing_files:\n self.logger.error(\"Cannot find {} in your config folder.\".format(missing_file))\n if missing_file == \"config.yaml\":\n self.logger.error(\"Before v1.7.0, this file is stored in ~/pai-deploy/cluster-cfg/config.yaml on the dev box machine.\")\n self.logger.error(\"If you have upgraded to v1.7.0, please copy this file to the config folder.\")\n self.logger.warning(\"If `config.yaml` is lost, you need to create a new one. Here is an example for reference:\")\n self.logger.warning(\"https://openpai.readthedocs.io/en/latest/manual/cluster-admin/installation-guide.html#configyaml-example\")\n if len(missing_files) > 0:\n raise Exception(\"Some configuration files not found.\")\n\n def get_external_storage_conf(self):\n external_config = getting_external_config(\n external_storage_conf_path = self.local_conf_path,\n local_cluster_configuration = self.pai_cluster_configuration_path,\n kube_config_path = self.kube_config_path\n )\n return external_config.get_latest_external_configuration()\n\n def sync_data_from_source(self):\n\n self.external_storage_configuration = self.get_external_storage_conf()\n with get_external_storage(self.external_storage_configuration) as configuration_path:\n self.logger.info(\"The temporary cluster configuration path is : {0}\".format(configuration_path))\n\n config_format_check = ServiceConfigUpdate(configuration_path)\n config_format_check.run()\n\n conf_uploader = UploadConfiguration(configuration_path, self.kube_config_path, self.config_push_list)\n conf_uploader.run()\n self.logger.info(\"Cluster Configuration synchronization from external storage is successful.\")\n","repo_name":"microsoft/pai","sub_path":"deployment/confStorage/synchronization.py","file_name":"synchronization.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":2559,"dataset":"github-code","pt":"57"} +{"seq_id":"9463881484","text":"numEntero = int(input(\"Escriba aquí el número entero positivo: \"))\r\n\r\n#Creaemos un bucle WHILE que haga de numEntero se ainferior a 1 tendrá que volver a escribir un número de nuevo.\r\nwhile numEntero < 1:\r\n print(\"Has escrito un valor inferior a 1, 0 o negativo... Vuelva a intentarlo por favor.\")\r\n numEntero = int(input(\"Escriba de nuevo el número entero positivo: \"))\r\n\r\n#Aquí inicializaremos las varibles de SEMÁFOROS iniciadas a 0 e irán contando cuantos divisores se encuentran en Rojo (no es divisor del número N), Verde (es divisor de número N)\r\nsemaforoVerde = 0\r\n\r\nsemaforoAmbar = 0\r\n\r\nsemaforoRojo = 0\r\n\r\n#Crearemos un bucle FOR que haga de 1 hasta numEntero + 1 que será desde 1 mostrará si son divisores.\r\nfor d in range(1,numEntero + 1):\r\n\r\n #Aquí debemos de establecer las variables de CONTROL de tipo semáforo. Lo que hay que mirar realizando un total de 3 condicionales es si el númeroe s divisor entre N\r\n #En caso de que SÍ lo sea lo añadiremos a semaforoVerde, si no es lo añadiré al semáforoRojo y si no encuentra divisores entre el número N y d lo añadiremos al Ambar.\r\n if numEntero % d == 0:\r\n print(f\"{d} SÍ es divisor de {numEntero}, se añadirá al contador de Semáforo en VERDE\")\r\n semaforoVerde += 1\r\n elif numEntero % d != 0:\r\n print(f\"{d} NO es divisible entre {numEntero} se añadirá al semáforo ROJO\")\r\n semaforoRojo += 1\r\n else:\r\n print(f\"No se ha encontrado ningún divisor. Se añadirá a la variable de semáforo ÁMBAR\\n\")\r\n semaforoAmbar += 1\r\nprint(f\"\\nHay un total de {semaforoVerde} divisores entre {numEntero}\\nHay un total de {semaforoRojo} que no han sido divisores de {numEntero}\\nHay un total de {semaforoAmbar} que no se han encotrado divisores entre {numEntero}\")\r\n\r\nprint(\"\")\r\nautor = __author__ = \"Alfonso Domínguez García\"\r\ncopy = __copyright__=\"Copyright © 2022 Alfonso Domínguez García\"\r\n\r\nprint(autor)\r\nprint(copy)","repo_name":"AlfonDG/PYTHON","sub_path":"UF1/Practica FOR SIS- M3-Estructures repetitives for/Ej4_BuscarDivisor_Alfonso_Dominguez_Garcia.py","file_name":"Ej4_BuscarDivisor_Alfonso_Dominguez_Garcia.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"20541339005","text":"import dc_lib as dc\nimport json \nimport datetime\nimport coins\n\ndef login(ver_code):\n dc.load_config()\n data = {\n 'mobile':'+86%s'%dc.get_config_value('mobile'),\n 'pwd':dc.get_config_value('login_password'),\n 'vercode':ver_code\n }\n print(data)\n content = dc.post('/user/login/',data)\n json_msg = json.loads(content)\n print(json_msg)\n ok = json_msg['ok']\n json_data = json_msg['data']\n if ok:\n token = json_data['token']\n uid = json_data['uid']\n dc.update_config({'token':token,'uid':uid,'login_time':datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')})\n\n return json_msg\n\ndef get_coin_stock(coin_id,stocks):\n coin_stock_list = list(filter(lambda x:x['coin_id']==coin_id,stocks))\n return coin_stock_list[0]['appraisement'] if coin_stock_list else 0\n \ndef get_stocks():\n url = '/my/coin/list/?ori=1'\n content = dc.get(url,unicode_escape=False)\n # print(content)\n json_resp = json.loads(content)\n if json_resp['ok']:\n stocks = []\n json_data = json_resp['data']\n for v in json_data:\n stock = {\n 'coin_id':str(v['coin_id']),\n 'coin_code':v['code'],\n 'amount':float(v['amount']),\n 'usable':float(v['usable']),\n 'cost':float(v['cost']),\n 'price':float(v['price']),\n 'appraisement':float(v['appraisement'])\n }\n if stock['amount']:\n stocks.append(stock)\n \n # print(stocks)\n return stocks\n return None\n\ndef get_orders(coin_id):\n uri = \"/order/history/?symbol_id=%s&status=waiting&start=0&count=10&direction=2\"%coin_id\n resp = json.loads(dc.get(uri).replace('\\r\\n', ''))\n # print(resp)\n if resp['ok'] and resp['data'] and resp['data']['orders']:\n orders = []\n for o in resp['data']['orders']:\n orders.append({\n 'order_id':o['order_id'],\n 'volume':o['volume']\n })\n return orders\n return None\n\ndef cancel_order(order_id,coin_id):\n uri = '/order/cancel/'\n data = {\n 'symbol_id':coin_id,\n 'order_id':order_id\n }\n resp = json.loads(dc.post(uri,data))\n ok = resp['ok']\n if not ok:\n print('cancel %s %s %s'%(order_id,ok,resp['msg']))\n return resp['ok']\n\ndef cancel_all_orders_by_coin_id(coin_id):\n orders = get_orders(coin_id)\n # print(orders)\n if not orders:\n return\n \n for order in orders:\n cancel_order(order['order_id'],coin_id)\n\ndef buy(coin_id,price,volume):\n return trade('buy',coin_id,price,volume)\n\ndef sell(coin_id,price,volume):\n return trade('sell',coin_id,price,volume)\n\ndef trade(action,coin_id,price,volume):\n trade_interval = dc.get_config_value('trade_interval')\n trades_his = get_trades(coin_id)\n\n if trades_his:\n last_trade = trades_his[0]\n min_diff = (datetime.datetime.now()-last_trade['time']).total_seconds()/60\n if min_diff abrupt disruption, 다른 진단이 필요하다, LM\n# shot 31243 -> abrupt disruption, \n# shot 31676 -> \n# warning region : ~ 400ms? 정도로 확대해서 disruption prediction 해보자\n# flag2 : no train, no valid (20%, 240)\n\n# Heaing factor, EC, NBI\n# shot list 확장 + prediction region(warning time ~ 400ms)\n# abrupt disruption shot \n\n# input_shape : [Batch, sequence lenth, ]\n\n'''\nTe = [x1,x2,x3,x4,x5, .... , x50]\n = [0,0,0,0,...,100,120,...,100,0,0,0,]\n\nIdea : core value or specific position (r = 1.8대비 r = 1.5 등), shot마다 다른지 확인\n* edge : error가 높아서 좋지 않음(accuracy bad)\n* lock mode : ikstar 참조, work/disruption/machinelearning/database/data, LM\n* lock mode에 대한 numbering도 포함\n* DB : 32, lock mode error\n'''\n\nconfig = Config()\n\n# argument parser\ndef parsing():\n parser = argparse.ArgumentParser(description=\"training disruption prediction model with 0D data\")\n \n # random seed\n parser.add_argument(\"--random_seed\", type = int, default = 42)\n \n # tag and result directory\n parser.add_argument(\"--model\", type = str, default = 'Transformer', choices=['Transformer', 'CnnLSTM', 'MLSTM_FCN'])\n parser.add_argument(\"--tag\", type = str, default = \"Transformer\")\n parser.add_argument(\"--save_dir\", type = str, default = \"./results\")\n \n # test shot for disruption probability curve\n parser.add_argument(\"--test_shot_num\", type = int, default = 21310)\n\n # gpu allocation\n parser.add_argument(\"--gpu_num\", type = int, default = 0)\n\n # batch size / sequence length / epochs / distance / num workers / pin memory use\n parser.add_argument(\"--batch_size\", type = int, default = 256)\n parser.add_argument(\"--num_epoch\", type = int, default = 128)\n parser.add_argument(\"--seq_len\", type = int, default = 21)\n parser.add_argument(\"--dist\", type = int, default = 3)\n parser.add_argument(\"--num_workers\", type = int, default = 4)\n parser.add_argument(\"--pin_memory\", type = bool, default = True)\n \n # optimizer : SGD, RMSProps, Adam, AdamW\n parser.add_argument(\"--optimizer\", type = str, default = \"AdamW\", choices=[\"SGD\",\"RMSProps\",\"Adam\",\"AdamW\"])\n \n # learning rate, step size and decay constant\n parser.add_argument(\"--lr\", type = float, default = 2e-4)\n parser.add_argument(\"--use_scheduler\", type = bool, default = True)\n parser.add_argument(\"--step_size\", type = int, default = 4)\n parser.add_argument(\"--gamma\", type = float, default = 0.995)\n \n # early stopping\n parser.add_argument('--early_stopping', type = bool, default = True)\n parser.add_argument(\"--early_stopping_patience\", type = int, default = 32)\n parser.add_argument(\"--early_stopping_verbose\", type = bool, default = True)\n parser.add_argument(\"--early_stopping_delta\", type = float, default = 1e-3)\n\n # imbalanced dataset processing\n # Re-sampling\n parser.add_argument(\"--use_sampling\", type = bool, default = False)\n \n # Re-weighting\n parser.add_argument(\"--use_weighting\", type = bool, default = False)\n \n # Deffered Re-weighting\n parser.add_argument(\"--use_DRW\", type = bool, default = False)\n parser.add_argument(\"--beta\", type = float, default = 0.25)\n\n # loss type : CE, Focal, LDAM\n parser.add_argument(\"--loss_type\", type = str, default = \"Focal\", choices = ['CE','Focal', 'LDAM'])\n \n # LDAM Loss parameter\n parser.add_argument(\"--max_m\", type = float, default = 0.5)\n parser.add_argument(\"--s\", type = float, default = 1.0)\n \n # Focal Loss parameter\n parser.add_argument(\"--focal_gamma\", type = float, default = 2.0)\n \n # monitoring the training process\n parser.add_argument(\"--verbose\", type = int, default = 4)\n \n # model setup : transformer\n parser.add_argument(\"--alpha\", type = float, default = 0.01)\n parser.add_argument(\"--dropout\", type = float, default = 0.1)\n parser.add_argument(\"--feature_dims\", type = int, default = 128)\n parser.add_argument(\"--n_layers\", type = int, default = 4)\n parser.add_argument(\"--n_heads\", type = int, default = 8)\n parser.add_argument(\"--dim_feedforward\", type = int, default = 1024)\n parser.add_argument(\"--cls_dims\", type = int, default = 128)\n \n # model setup : cnn lstm\n parser.add_argument(\"--conv_dim\", type = int, default = 64)\n parser.add_argument(\"--conv_kernel\", type = int, default = 3)\n parser.add_argument(\"--conv_stride\", type = int, default = 1)\n parser.add_argument(\"--conv_padding\", type = int, default = 1)\n parser.add_argument(\"--lstm_dim\", type = int, default = 128)\n parser.add_argument(\"--lstm_layers\", type = int, default = 4)\n parser.add_argument(\"--bidirectional\", type = bool, default = True)\n \n # model setup : MLSTM_FCN\n parser.add_argument(\"--fcn_dim\", type = int, default = 128)\n parser.add_argument(\"--reduction\", type = int, default = 16)\n \n args = vars(parser.parse_args())\n\n return args\n\n# torch device state\nprint(\"================= device setup =================\")\nprint(\"torch device avaliable : \", torch.cuda.is_available())\nprint(\"torch current device : \", torch.cuda.current_device())\nprint(\"torch device num : \", torch.cuda.device_count())\n\n# torch cuda initialize and clear cache\ntorch.cuda.init()\ntorch.cuda.empty_cache()\n\n\nif __name__ == \"__main__\":\n\n args = parsing()\n \n # seed initialize\n seed_everything(args['random_seed'], False)\n \n # save directory\n save_dir = args['save_dir']\n \n if not os.path.isdir(save_dir):\n os.mkdir(save_dir)\n \n # tag : {model_name}_clip_{seq_len}_dist_{pred_len}_{Loss-type}_{Boosting-type}\n loss_type = args['loss_type']\n \n if args['use_sampling'] and not args['use_weighting'] and not args['use_DRW']:\n boost_type = \"RS\"\n elif args['use_sampling'] and args['use_weighting'] and not args['use_DRW']:\n boost_type = \"RS_RW\"\n elif args['use_sampling'] and not args['use_weighting'] and args['use_DRW']:\n boost_type = \"RS_DRW\"\n elif args['use_sampling'] and args['use_weighting'] and args['use_DRW']:\n boost_type = \"RS_DRW\"\n elif not args['use_sampling'] and args['use_weighting'] and not args['use_DRW']:\n boost_type = \"RW\"\n elif not args['use_sampling'] and not args['use_weighting'] and args['use_DRW']:\n boost_type = \"DRW\"\n elif not args['use_sampling'] and args['use_weighting'] and args['use_DRW']:\n boost_type = \"DRW\"\n elif not args['use_sampling'] and not args['use_weighting'] and not args['use_DRW']:\n boost_type = \"Normal\"\n \n tag = \"{}_clip_{}_dist_{}_{}_{}_seed_{}\".format(args[\"tag\"], args[\"seq_len\"], args[\"dist\"], loss_type, boost_type, args['random_seed'])\n \n print(\"================= Running code =================\")\n print(\"Setting : {}\".format(tag))\n \n save_best_dir = \"./weights/{}_best.pt\".format(tag)\n save_last_dir = \"./weights/{}_last.pt\".format(tag)\n exp_dir = os.path.join(\"./runs/\", \"tensorboard_{}\".format(tag))\n \n # input features\n ts_cols = config.input_features\n \n # device allocation\n if(torch.cuda.device_count() >= 1):\n device = \"cuda:\" + str(args[\"gpu_num\"])\n else:\n device = 'cpu'\n \n # dataset setup\n ts_train, ts_valid, ts_test, ts_scaler = preparing_0D_dataset(\"./dataset/KSTAR_Disruption_ts_data_extend.csv\", ts_cols = ts_cols, scaler = 'Robust', test_shot = args['test_shot_num'])\n kstar_shot_list = pd.read_csv('./dataset/KSTAR_Disruption_Shot_List.csv', encoding = \"euc-kr\")\n\n train_data = DatasetFor0D(ts_train, kstar_shot_list, seq_len = args['seq_len'], cols = ts_cols, dist = args['dist'], dt = 4 * 1 / 210, scaler = ts_scaler)\n valid_data = DatasetFor0D(ts_valid, kstar_shot_list, seq_len = args['seq_len'], cols = ts_cols, dist = args['dist'], dt = 4 * 1 / 210, scaler = ts_scaler)\n test_data = DatasetFor0D(ts_test, kstar_shot_list, seq_len = args['seq_len'], cols = ts_cols, dist = args['dist'], dt = 4 * 1 / 210, scaler = ts_scaler)\n \n print(\"================= Dataset information =================\")\n print(\"train data : {}, disrupt : {}, non-disrupt : {}\".format(train_data.__len__(), train_data.n_disrupt, train_data.n_normal))\n print(\"valid data : {}, disrupt : {}, non-disrupt : {}\".format(valid_data.__len__(), valid_data.n_disrupt, valid_data.n_normal))\n print(\"test data : {}, disrupt : {}, non-disrupt : {}\".format(test_data.__len__(), test_data.n_disrupt, test_data.n_normal))\n \n # label distribution for LDAM / Focal Loss\n train_data.get_num_per_cls()\n cls_num_list = train_data.get_cls_num_list()\n\n # define model\n if args['model'] == 'Transformer':\n \n model = Transformer(\n n_features=len(ts_cols),\n feature_dims = args['feature_dims'],\n max_len = args['seq_len'],\n n_layers = args['n_layers'],\n n_heads = args['n_heads'],\n dim_feedforward=args['dim_feedforward'],\n dropout = args['dropout'],\n cls_dims = args['cls_dims'],\n n_classes = 2\n )\n \n elif args['model'] == 'CnnLSTM':\n \n model = CnnLSTM(\n seq_len = args['seq_len'],\n n_features=len(ts_cols),\n conv_dim = args['conv_dim'],\n conv_kernel = args['conv_kernel'],\n conv_stride=args['conv_stride'],\n conv_padding=args['conv_padding'],\n lstm_dim=args['lstm_dim'],\n n_layers=args['lstm_layers'],\n bidirectional=args['bidirectional'],\n n_classes=2\n )\n \n elif args['model'] == 'MLSTM_FCN':\n model = MLSTM_FCN(\n n_features = len(ts_cols),\n fcn_dim = args['fcn_dim'],\n kernel_size = args['conv_kernel'],\n stride = args['conv_stride'],\n seq_len = args['seq_len'],\n lstm_dim = args['lstm_dim'],\n lstm_n_layers=args['lstm_layers'],\n lstm_bidirectional=args['bidirectional'],\n lstm_dropout=0.1,\n reduction = args['reduction'],\n alpha = args['alpha'],\n n_classes = 2\n )\n \n print(\"\\n==================== model summary ====================\\n\")\n model.summary()\n model.to(device)\n\n # optimizer\n if args[\"optimizer\"] == \"SGD\":\n optimizer = torch.optim.SGD(model.parameters(), lr = args['lr'])\n elif args[\"optimizer\"] == \"RMSProps\":\n optimizer = torch.optim.RMSprop(model.parameters(), lr = args['lr'])\n elif args[\"optimizer\"] == \"Adam\":\n optimizer = torch.optim.Adam(model.parameters(), lr = args['lr'])\n elif args[\"optimizer\"] == \"AdamW\":\n optimizer = torch.optim.AdamW(model.parameters(), lr = args['lr'])\n else:\n optimizer = torch.optim.AdamW(model.parameters(), lr = args['lr'])\n \n # scheduler\n if args[\"use_scheduler\"] and not args[\"use_DRW\"]: \n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = args['step_size'], gamma=args['gamma'])\n \n elif args[\"use_DRW\"]:\n scheduler = \"DRW\"\n \n else:\n scheduler = None\n \n # Re-sampling\n if args[\"use_sampling\"]:\n train_sampler = ImbalancedDatasetSampler(train_data)\n valid_sampler = RandomSampler(valid_data)\n test_sampler = RandomSampler(test_data)\n\n else:\n train_sampler = RandomSampler(train_data)\n valid_sampler = RandomSampler(valid_data)\n test_sampler = RandomSampler(test_data)\n \n # Samplers for visualization of embedding space\n train_sampler_vis = ImbalancedDatasetSampler(train_data)\n test_sampler_vis = ImbalancedDatasetSampler(test_data)\n \n train_loader = DataLoader(train_data, batch_size = args['batch_size'], sampler=train_sampler, num_workers = args[\"num_workers\"], pin_memory=args[\"pin_memory\"], drop_last = True)\n valid_loader = DataLoader(valid_data, batch_size = args['batch_size'], sampler=valid_sampler, num_workers = args[\"num_workers\"], pin_memory=args[\"pin_memory\"], drop_last = True)\n test_loader = DataLoader(test_data, batch_size = args['batch_size'], sampler=test_sampler, num_workers = args[\"num_workers\"], pin_memory=args[\"pin_memory\"], drop_last = True)\n\n # Re-weighting\n if args['use_weighting']:\n per_cls_weights = 1.0 / np.array(cls_num_list)\n per_cls_weights = per_cls_weights / np.sum(per_cls_weights)\n per_cls_weights = torch.FloatTensor(per_cls_weights).to(device)\n else:\n per_cls_weights = np.array([1,1])\n per_cls_weights = torch.FloatTensor(per_cls_weights).to(device)\n \n # loss\n if args['loss_type'] == \"CE\":\n betas = [0, args['beta'], args['beta'] * 2, args['beta']*3]\n loss_fn = CELoss(weight = per_cls_weights)\n elif args['loss_type'] == 'LDAM':\n max_m = args['max_m']\n s = args['s']\n betas = [0, args['beta'], args['beta'] * 2, args['beta']*3]\n loss_fn = LDAMLoss(cls_num_list, max_m = max_m, s = s, weight = per_cls_weights)\n elif args['loss_type'] == 'Focal':\n betas = [0, args['beta'], args['beta'] * 2, args['beta']*3]\n focal_gamma = args['focal_gamma']\n loss_fn = FocalLoss(weight = per_cls_weights, gamma = focal_gamma)\n else:\n betas = [0, args['beta'], args['beta'] * 2, args['beta']*3]\n loss_fn = CELoss(weight = per_cls_weights)\n \n # training process\n print(\"\\n======================= training process =======================\\n\")\n if args['use_DRW']:\n train_loss, train_acc, train_f1, valid_loss, valid_acc, valid_f1 = train_DRW(\n train_loader,\n valid_loader,\n model,\n optimizer,\n loss_fn,\n device,\n args['num_epoch'],\n args['verbose'],\n save_best_dir = save_best_dir,\n save_last_dir = save_last_dir,\n exp_dir = exp_dir,\n max_norm_grad = 1.0,\n betas = betas,\n cls_num_list = cls_num_list,\n model_type = \"single\",\n test_for_check_per_epoch=test_loader,\n is_early_stopping = args['early_stopping'],\n early_stopping_verbose = args['early_stopping_verbose'],\n early_stopping_patience = args['early_stopping_patience'],\n early_stopping_delta = args['early_stopping_delta']\n )\n \n else:\n train_loss, train_acc, train_f1, valid_loss, valid_acc, valid_f1 = train(\n train_loader,\n valid_loader,\n model,\n optimizer,\n scheduler,\n loss_fn,\n device,\n args['num_epoch'],\n args['verbose'],\n save_best_dir = save_best_dir,\n save_last_dir = save_last_dir,\n exp_dir = exp_dir,\n max_norm_grad = 1.0,\n model_type = \"single\",\n test_for_check_per_epoch=test_loader,\n is_early_stopping = args['early_stopping'],\n early_stopping_verbose = args['early_stopping_verbose'],\n early_stopping_patience = args['early_stopping_patience'],\n early_stopping_delta = args['early_stopping_delta']\n )\n \n # plot the learning curve\n save_learning_curve = os.path.join(save_dir, \"{}_lr_curve.png\".format(tag))\n plot_learning_curve(train_loss, valid_loss, train_f1, valid_f1, figsize = (12,6), save_dir = save_learning_curve)\n \n # evaluation process\n print(\"\\n====================== evaluation process ======================\\n\")\n model.load_state_dict(torch.load(save_best_dir))\n \n save_conf = os.path.join(save_dir, \"{}_test_confusion.png\".format(tag))\n save_txt = os.path.join(save_dir, \"{}_test_eval.txt\".format(tag))\n \n test_loss, test_acc, test_f1 = evaluate(\n test_loader,\n model,\n optimizer,\n loss_fn,\n device,\n save_conf = save_conf,\n save_txt = save_txt\n )\n \n # compute the feature importance of the variables\n print(\"\\n====================== Feature Importance ======================\\n\")\n compute_permute_feature_importance(\n model,\n test_loader,\n ts_cols,\n loss_fn,\n device,\n 'single',\n 'loss',\n os.path.join(save_dir, \"{}_feature_importance.png\".format(tag))\n )\n \n # Additional analyzation\n print(\"\\n====================== Visualization process ======================\\n\")\n \n # reset the sampler\n train_loader = DataLoader(train_data, batch_size = 128, sampler=train_sampler_vis, num_workers = args[\"num_workers\"], pin_memory=args[\"pin_memory\"], drop_last=True)\n test_loader = DataLoader(test_data, batch_size = 128, sampler=test_sampler_vis, num_workers = args[\"num_workers\"], pin_memory=args[\"pin_memory\"], drop_last=True)\n \n try:\n visualize_2D_latent_space(\n model, \n train_loader,\n device,\n os.path.join(save_dir, \"{}_2D_latent_train.png\".format(tag)),\n 3,\n 'tSNE'\n )\n \n visualize_2D_latent_space(\n model, \n test_loader,\n device,\n os.path.join(save_dir, \"{}_2D_latent_test.png\".format(tag)),\n 3,\n 'tSNE'\n )\n \n except:\n print(\"{} : visualize 2D latent space doesn't work due to stability error\".format(tag))\n \n try:\n visualize_3D_latent_space(\n model, \n train_loader,\n device,\n os.path.join(save_dir, \"{}_3D_latent_train.png\".format(tag)),\n 3,\n 'tSNE'\n )\n \n visualize_3D_latent_space(\n model, \n test_loader,\n device,\n os.path.join(save_dir, \"{}_3D_latent_test.png\".format(tag)),\n 3,\n 'tSNE'\n )\n except:\n print(\"{} : visualize 3D latent space doesn't work due to stability error\".format(tag))\n \n # plot probability curve\n test_shot_num = args['test_shot_num']\n \n print(\"\\n================== Probability curve generation process ==================\\n\")\n generate_prob_curve_from_0D(\n model, \n device = device, \n save_dir = os.path.join(save_dir, \"{}_probs_curve_{}.png\".format(tag, test_shot_num)),\n ts_data_dir = \"./dataset/KSTAR_Disruption_ts_data_extend.csv\",\n ts_cols = ts_cols,\n shot_list_dir = './dataset/KSTAR_Disruption_Shot_List_extend.csv',\n shot_num = test_shot_num,\n seq_len = args['seq_len'],\n dist = args['dist'],\n dt = 4 / 210,\n scaler = ts_scaler\n )","repo_name":"ZINZINBIN/Disruption-Prediciton-based-on-Multimodal-Deep-Learning","sub_path":"train_0D_network.py","file_name":"train_0D_network.py","file_ext":"py","file_size_in_byte":19689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13053557288","text":"from dash import Dash, html, dcc, dash_table, callback\nimport plotly.graph_objs as go\nfrom datetime import datetime, timedelta\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nimport requests\nimport pandas as pd\nimport dash\nimport plotly.figure_factory as ff\n\ndash.register_page(__name__)\n# Aquí podrías definir tus figuras de Plotly para el análisis de componentes\n# Este es solo un ejemplo vacío\ndef download_ree(indicador,fecha_inicio,fecha_fin,time_trunc='day'):\n \n headers = {'Accept': 'application/json',\n 'Content-Type': 'applic= n - 1:\n return\n self.parent().moveStackWindow(idx, idx + 1)\n","repo_name":"Revitcivil/pyDynamo","sub_path":"pydynamo_brain/pydynamo_brain/ui/stackListWindow.py","file_name":"stackListWindow.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"22563960898","text":"# EXERCÍCIO 1\n\nwhile True:\n sexo = input(\"Digite seu sexo: \")\n if(sexo == \"M\" or sexo == \"F\"):\n break\n\n# EXERCÍCIO 2\n\nfrom random import randrange\nnumero = randrange(11)\ntentativa = 0\nwhile tentativa != numero:\n tentativa = int(input(\"Advinhe o número: \"))\nprint(\"Pimba! O número era {}!\".format(numero))\n\n# EXERCÍCIO 3\n\nsoma = 0\nquantidade = 0\nmaior = None\nmenor = None\nwhile True:\n digitado = input(\"Número: \")\n soma = soma + int(digitado)\n quantidade += 1\n \n if int(digitado) == None:\n maior = digitado\n elif int(digitado) > maior:\n maior = digitado\n if int(digitado) == None:\n menor = digitado\n if int(digitado) < menor:\n menor = digitado\n\n if digitado == \"stop\":\n break\n\nmedia = soma / quantidade\nprint(\"A média dos {} valores é {}.\".format(quantidade, media))\nprint(\"O maior valor digitado foi {}. O menor valor digitado foi {}\".format(maior, menor))\n\n# EXERCÍCIO 4\n\nwhile True:\n comando = input(\"Digite o salário (ou 'stop' para parar): R$ \")\n \n if(comando == \"stop\"):\n break\n elif (int(comando)*0.11) > 320: \n salario_desconto = int(comando) - 320\n desconto = 1-(salario_desconto / int(comando))\n print(\"O desconto foi de R$ 320.00, {:.2%} do valor do salário. O salário com desconto ficou R$ {:.2f}\".format(desconto, salario_desconto))\n else:\n salario_desconto = int(comando) * 0.89\n desconto = 1-(salario_desconto / int(comando))\n print(\"O salário com desconto ficou R$ {:.2f}\".format(salario_desconto, desconto))\n\n# EXERCÍCIO 5\n\nnumero = int(input(\"Digite o número de praias que deseja cadastrar: \"))\ncontador = 0\npraias_15_20 = 0\nsoma = 0\nmais_dist = ''\nmaior_dist = 0\nwhile contador < numero:\n nome = str(input(\"Digite o nome da praia: \"))\n dist = float(input(\"Digite a distância da praia do centro da cidade: \"))\n \n if (dist > maior_dist):\n mais_dist = nome\n maior_dist = dist\n \n if (dist >= 15 and dist <= 20):\n praias_15_20 += 1\n \n soma = soma + dist\n contador += 1\n\nmedia = soma / numero\n\nprint(\"A praia mais distante do centro da cidade é a {} e fica a {}km\".format(mais_dist, maior_dist))\nprint(\"{} praias estão entre 15km e 20km do centro da cidade\".format(praias_15_20))\nprint(\"A distância média das praias do centro da cidade é {:.2f}km\".format(media))\n\n# EXERCÍCIO FIBONACCI\n\nquantidade = int(input(\"Digite a quantidade de termos da sua sequência de Fibonacci - \"))\ncontador = 0\na = 0\nb = 1\nc = 0\n\nwhile contador < quantidade:\n if contador == (quantidade-1):\n print(c) # Para que não imprima um \"-\" depois do último termo\n break\n print(c, end=\" - \")\n a = b + c\n b = c\n c = a\n contador += 1","repo_name":"andrerocco/programacao-orientada-a-objetos","sub_path":"Exercicios/While (Aula 5).py","file_name":"While (Aula 5).py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35953621439","text":"\"\"\"Collection of utilities used by many parsers.\n\n\"\"\"\nimport os\nimport bz2\nimport gzip\nimport zipfile\n\n# need to do some backflips to support Python 2 bz2 behavior\n# bz2 in Python 2 doesn't have an open function, and in Python 3\n# the BZ2File class only does binary mode\ntry:\n bz2.open\nexcept AttributeError:\n bz2_open = bz2.BZ2File\nelse:\n def bz2_open(filename, mode):\n mode += 't'\n return bz2.open(filename, mode)\n\n# similar changes need to be made for gzip\n# gzip in Python 2 assumes text mode when 'r' is\n# specified and in Python 3 gzip assumes binary mode when\n# 'r' is specified\ntry:\n gzip.compress\nexcept AttributeError:\n gzip_open = gzip.open\nelse:\n def gzip_open(filename, mode):\n mode += 't'\n return gzip.open(filename, mode)\n\n\ndef anyopen(filename, mode='r'):\n \"\"\"Return a file stream for filename, even if compressed.\n\n Supports files compressed with bzip2 (.bz2), gzip (.gz), and zip (.zip)\n compression schemes. The appropriate extension must be present for\n the function to properly handle the file.\n\n Parameters\n ----------\n filename : str\n Path to file to use.\n mode : str\n Mode for stream; usually 'r' or 'w'.\n\n Returns\n -------\n stream : stream\n Open stream for reading.\n\n \"\"\"\n # opener for each type of file\n extensions = {'.bz2': bz2_open,\n '.gz': gzip_open,\n '.zip': zipfile.ZipFile}\n\n ext = os.path.splitext(filename)[1]\n\n if ext in extensions:\n opener= extensions[ext]\n\n else:\n opener = open\n\n return opener(filename, mode)\n","repo_name":"curioz/alchemlyb","sub_path":"src/alchemlyb/parsing/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"19995328255","text":"import graphene\nfrom room.models.location import Location\nfrom room.models.room import Room\nfrom room.models.order import Turn, Order, Unit, MoveType\nfrom graphqlAPI.query.table_type import OrderType\n\n\n# prepare a input arguments\nclass OrderInput(graphene.InputObjectType):\n # basic information, disable to be null\n room_id = graphene.ID(required=True)\n instruction = graphene.String(required=True)\n turn_id = graphene.ID(required=True)\n unit_id = graphene.ID(required=True)\n # move operation\n target_location_id = graphene.ID() \n # convoy operation only\n reference_unit_id = graphene.ID()\n reference_unit_current_location_id = graphene.ID()\n reference_unit_new_location_id = graphene.ID()\n\nclass UpdateOrder(graphene.Mutation):\n # reference from class OrderInput\n class Arguments:\n input = OrderInput(required=True)\n \n ok = graphene.Boolean() \n order = graphene.Field(OrderType)\n \n # Mutation to update a unit \n @classmethod\n def mutate(cls, root, info, input, id=None):\n\n order = Order.objects.get(room__id=input.room_id,\n turn__id=input.turn_id,\n unit__id=input.unit_id)\n\n order.instruction = input.instruction\n # order.turn = Turn.objects.get(pk=input.turn_id)\n # order.unit = Unit.objects.get(pk=input.unit_id)\n # while instruction is Convoy, allow further info to be stored.\n if order.instruction == MoveType.CONVOY or order.instruction == MoveType.SUPPORT:\n order.reference_unit = Unit.objects.get(pk=input.reference_unit_id)\n order.reference_unit_current_location = Location.objects.get(pk=input.reference_unit_current_location_id)\n order.reference_unit_new_location = Location.objects.get(pk=input.reference_unit_new_location_id)\n\n order.save()\n\n return UpdateOrder(ok=True, order=order)\n \n\nclass CreateOrder(graphene.Mutation):\n # reference from class OrderInput\n class Arguments:\n input = OrderInput(required=True)\n \n order = graphene.Field(OrderType)\n \n # Mutation to update a unit \n @classmethod\n def mutate(cls, root, info, input, id=None):\n turn = Turn.objects.get(pk=input.turn_id)\n room = Room.objects.get(pk=input.room_id)\n if turn == None:\n turn = Turn.objects.create(year=room.current_turn)\n\n order = Order.objects.create(\n instruction = input.instruction,\n turn = Turn.objects.create(pk=input.turn_id),\n target_unit = Unit.objects.get(pk=input.unit_id))\n \n if order.instruction == MoveType.MOVE:\n order.target_location = Location.objects.get(pk=input.target_location_id)\n \n # while instruction is Convoy, allow further info to be stored.\n if order.instruction == MoveType.CONVOY or order.instruction == MoveType.SUPPORT:\n order.reference_unit = Unit.objects.get(pk=input.reference_unit_id)\n order.reference_unit_current_location = Location.objects.get(pk=input.reference_unit_current_location_id)\n order.reference_unit_new_location = Location.objects.get(pk=input.reference_unit_new_location_id)\n \n order.save()\n\n return UpdateOrder(ok=True, order=order)","repo_name":"h0nter/ESRS-diplomacy","sub_path":"backend/roomproject/graphqlAPI/mutation/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"8535503169","text":"import sys, os\nsys.path.append(os.path.abspath('../../src/StreamCipher'))\nfrom vernam import *\n\nsecretkey = input('Enter the Secret Key: ')\nmsg = input('Enter the message to be protected: ')\nenc = encrypt(msg, secretkey)\nprint('Encrypted message:')\nprint(enc)\ndec = decrypt(enc, secretkey)\nprint('Decrypted message:')\nprint(dec)\n","repo_name":"pankajpatro703/digitalCom-lib","sub_path":"tests/StreamCipher/ex_vernam.py","file_name":"ex_vernam.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"12714071291","text":"import requests \nfrom bs4 import BeautifulSoup\nimport csv\n\nURL = 'https://www.syllabuskrk.agh.edu.pl/2016-2017/pl/magnesite/study_plans/stacjonarne-fizyka-medyczna'\npage = requests.get(URL)\n\nsoup = BeautifulSoup(page.content, 'html.parser')\n\ntables = soup.find_all('table')\n\noutfile = open(\"przedmioty3.csv\", \"w\")\ncsvwriter = csv.writer(outfile)\n\nfor table in tables:\n semnumid = table.get('id')\n semnum = str(semnumid)\n splitted = semnum.split(\"-\")\n print(splitted[2])\n\n tds = table.find_all('td', class_=\"subject_name\")\n for td in tds:\n ahref = td.find('a')\n if \"kurs obowiązkowy\" in ahref.text:\n continue\n csvwriter.writerow([ahref.text])\n\n\n","repo_name":"lewelyn7/Database_systems_project","sub_path":"src/przedmioty.py","file_name":"przedmioty.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"13293037233","text":"# JSON --->> Java Script Object Notation\n# json is method to store data and objects\nimport json\n\nj = '{\"a\":2,\"b\":4,\"c\":5}'\nb = json.loads(j) # it is used to load the data\nprint(b)\n\n# write a json file\nwith open('1.jason','w') as f:\n json.dump(j,f) # this is used to paste the into file\n\n# for more go to the json python documentry \n\n\n\n\n\n","repo_name":"harsh96902/All-programs","sub_path":"Data Science programs/09_json_file_working.py","file_name":"09_json_file_working.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"42133993348","text":"# https://leetcode.com/problems/n-ary-tree-level-order-traversal/\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n # BFS\n if root is None:\n return root\n \n levels = []\n \n queue = [root]\n \n while queue:\n layer = queue\n queue = []\n nodes = []\n for node in layer:\n queue.extend(node.children)\n nodes.append(node.val)\n levels.append(nodes)\n \n return levels\n ","repo_name":"CzJLee/LeetCode","sub_path":"429.py","file_name":"429.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"9841683915","text":"\nfrom openpyxl import load_workbook\nfrom openpyxl.drawing.image import Image\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import WebDriverException,NoSuchElementException \nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom urllib.error import URLError, HTTPError\nimport datetime,time, os\n\nclass BasePage(object):\n def __init__(self, driver):\n self.driver = driver\n\nclass MainFunction(BasePage):\n def kvCTATest(self,excelName) :\n \n wb = load_workbook(excelName)\n ws = wb.active\n start = time.time() # 시작\n\n for i in range(2, ws.max_row+1) :\n country = ws['B'+str(i)].value\n country_url = ws['C'+str(i)].value\n print(i-1,country,country_url)\n try: \n self.driver.maximize_window()\n self.driver.get(country_url)\n time.sleep(2)\n\n #쿠키 바 닫기\n self.driver.execute_script(\"$('.cookie-bar__close').click()\")\n self.driver.execute_script(\"$('.truste-button1').click()\")\n \n # 타겟 경로 구함\n kv_div = self.driver.find_element(By.CSS_SELECTOR,'.highlights-kv__text')\n kv_div_a = self.driver.find_element(By.CSS_SELECTOR,'.highlights-kv__text a')\n \n banner_div = self.driver.find_element(By.CSS_SELECTOR,'#contents > div.common-banner > div > div.common-banner__item.common-banner__buynow > div > div.common-banner__text > div > div')\n banner_div_a = self.driver.find_element(By.CSS_SELECTOR,'#contents > div.common-banner > div > div.common-banner__item.common-banner__buynow > div > div.common-banner__text > div > div > a')\n \n folder_name = \"KVCTA_Screenshot\"\n current_directory = os.getcwd()\n new_folder_path = os.path.join(current_directory,folder_name)\n if not os.path.exists(new_folder_path):\n os.makedirs(new_folder_path)\n else:\n pass\n\n kv_shot = kv_div.screenshot(folder_name+'\\\\'+'KV_'+country+\"_D3.png\")\n \n kv_cta = kv_div_a.text\n print('KV CTA 이름'+kv_cta)\n banner_cta = banner_div.text\n print('banner_cta 이름 ',banner_cta)\n kv_url = kv_div_a.get_attribute('href')\n banner_url = banner_div_a.get_attribute('href')\n \n #스샷 저장\n time.sleep(3) \n self.driver.execute_script(\"window.scrollTo(0,22145)\")\n \n\n banner_shot = banner_div.screenshot(folder_name+'\\\\'+'banner_'+country+\"_D3.png\")\n \n if kv_cta is not None: \n print('kv_ctaurl',kv_url)\n ws['D'+str(i)] = kv_cta \n ws['E'+str(i)] = kv_url \n kv_img = Image(folder_name+'\\\\'+'KV_'+country+\"_D3.png\")\n ws.add_image(kv_img, 'L'+str(i))\n time.sleep(1)\n else :\n print('CTA 없음')\n ws['D'+str(i)] = \"비노출\"\n time.sleep(1)\n\n if banner_cta is not None: \n print('banner_ctaurl',banner_url)\n ws['H'+str(i)] = banner_cta \n ws['I'+str(i)] = banner_url \n banner_img = Image(folder_name+'\\\\'+'banner_'+country+\"_D3.png\")\n banner_img.height =90\n banner_img.width =240\n ws.add_image(banner_img, 'M'+str(i))\n time.sleep(1)\n else :\n print('banner CTA 없음')\n ws['H'+str(i)] = \"비노출\"\n time.sleep(1)\n except NoSuchElementException :\n print('찾은 엘레먼트가 없음')\n except WebDriverException:\n print('Driver ERROR')\n except HTTPError as e : \n print(\"ERROR\"+str(e.code))\n except URLError as e :\n print(\"reason\"+str(e.reason)) \n \n wb.save('Result_KV_CTA(D3)'+'.xlsx')\n sec = time.time()-start # 종료\n times = str(datetime.timedelta(seconds=sec))\n short = times.split(\".\")[0] #초단위 \n\n print('완료시간',f\"{short} sec\")","repo_name":"yunsk/pyselenium","sub_path":"functions/kvCTA.py","file_name":"kvCTA.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43355675683","text":"# Embedded file name: scripts/client/clientEconomics/DamageHandler.py\nimport BigWorld\nfrom clientEconomics.EconomicsModelView import EconomicsModelView\nfrom economics.EconomicsConfigParser import PARAMS_FIELD, REWARD_FIELD, TEXT_FIELD\n\nclass DamageHandler(object):\n \"\"\"Damage economic events handling logic\n \"\"\"\n ATTACK_TIMEOUT = 3.5\n\n def __init__(self, configuration):\n self._configuration = configuration\n self._currentTargetID = 0\n self._targetIDWithAccumulatedDamage = 0\n self._targetAccumulator = 0\n self._lastTargetDamageTime = 0\n\n def handleDamageEvent(self, eventData):\n \"\"\"Handle damage event received from server. Return event view data for HUD\n :param eventData: Event data received from server\n :return: (Gained battle points, Update view data)\n :rtype: (int, dict)\n \"\"\"\n currentTime = BigWorld.time()\n eventID, rewardsAmount, victimID = self._unpackEventData(eventData)\n reward = self._getEventReward(eventID)\n if victimID != self._currentTargetID:\n self._lastTargetDamageTime = 0\n isAccumulatorTimeout = currentTime - self._lastTargetDamageTime > self.ATTACK_TIMEOUT\n if victimID != self._currentTargetID or isAccumulatorTimeout:\n self._currentTargetID = victimID\n self._targetAccumulator = 0\n totalReward = int(rewardsAmount * reward)\n updateViewData = None\n if totalReward != 0:\n self._targetAccumulator += totalReward\n accumulatorResetted = self._currentTargetID != self._targetIDWithAccumulatedDamage or isAccumulatorTimeout\n self._targetIDWithAccumulatedDamage = self._currentTargetID\n updateViewData = self._buildUpdateViewData(eventID, self._targetAccumulator, accumulatorResetted)\n elif isAccumulatorTimeout:\n self._targetIDWithAccumulatedDamage = 0\n self._lastTargetDamageTime = currentTime\n return (totalReward, updateViewData)\n\n def destroy(self):\n self._configuration = None\n return\n\n @staticmethod\n def _unpackEventData(eventData):\n \"\"\"Unpack event data received from server\n :param eventData: Server event data\n :return: (eventID, rewards amount, victimID)\n :rtype: (str, int, int)\n \"\"\"\n eventID, payload = eventData\n rewardsAmount, victimID = payload\n return (eventID, rewardsAmount, victimID)\n\n def _getEventReward(self, eventID):\n \"\"\"Return battle points reward for specified event\n :param eventID: Event identifier\n :return: Battle points reward\n :rtype: int\n \"\"\"\n eventConfiguration = self._configuration[eventID]\n return eventConfiguration[PARAMS_FIELD][REWARD_FIELD]\n\n def _getEventDescriptionText(self, eventID):\n \"\"\"Return event description text localization tag\n :param eventID: Event identifier\n :return: Description localization tag\n :rtype: str\n \"\"\"\n eventConfiguration = self._configuration[eventID]\n return eventConfiguration[PARAMS_FIELD][TEXT_FIELD]\n\n def _buildUpdateViewData(self, eventID, accumReward, accumResetted):\n \"\"\"Build update view data based on event data\n \"\"\"\n return {'type': EconomicsModelView.SIMPLE_EVENT_TYPE,\n 'isNew': accumResetted,\n 'description': self._getEventDescriptionText(eventID),\n 'points': accumReward}","repo_name":"SEA-group/wowp_scripts","sub_path":"client/clientEconomics/DamageHandler.py","file_name":"DamageHandler.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"21303044856","text":"from __future__ import absolute_import, division, print_function\n\nfrom subprocess import check_call\n\nfrom graphviz import Digraph\n\nfrom .core import istask, get_dependencies, ishashable\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(task[0])\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n while hasattr(func, 'func'):\n func = func.func\n return func.__name__\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n g = Digraph(graph_attr={'rankdir': 'BT'})\n\n seen = set()\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n g.node(k_name, label=str(k), shape='box',\n **data_attributes.get(k, {}))\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n g.node(func_name, label=task_label(v), shape='circle',\n **function_attributes.get(k, {}))\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n g.node(dep_name, label=str(dep), shape='box',\n **data_attributes.get(dep, {}))\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\ndef dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n g.save(filename + '.dot')\n\n check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)\n check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)\n try:\n from IPython.display import Image\n return Image(filename + '.png')\n except ImportError:\n pass\n","repo_name":"soaxelbrooke/dask","sub_path":"dask/dot.py","file_name":"dot.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"24065774315","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport time\n\n\ndef postResults(commitUuid, payload, projectToken, baseUrl):\n url = f'{baseUrl}/2.0/commit/{commitUuid}/issuesRemoteResults'\n headers = {\n 'content-type': 'application/json',\n 'project-token': projectToken\n }\n response = requests.post(url, headers=headers, data=json.dumps(payload))\n print(response)\n print(response.text)\n\n\n\ndef resultsfinal(commitUuid, projectToken, baseUrl):\n url = f'{baseUrl}/2.0/commit/{commitUuid}/resultsFinal'\n headers = {\n 'content-type': 'application/json',\n 'project-token': projectToken\n }\n response = requests.post(url, headers=headers)\n print(response)\n print(response.text)\n\n\ndef loadPatterns(baseUrl):\n shortName = 'spotbugs'\n url = f'{baseUrl}/api/v3/tools'\n headers = {\n 'content-type': 'application/json',\n }\n tools = json.loads(requests.get(url, headers=headers).text)['data']\n spotbugs = next(item for item in tools if item[\"shortName\"] == shortName)\n patternsUrl = f'{baseUrl}/api/v3/tools/{spotbugs[\"uuid\"]}/patterns?limit=1000'\n headers = {\n 'content-type': 'application/json',\n }\n patterns = json.loads(requests.get(patternsUrl, headers=headers).text)['data']\n return patterns\n\n\ndef checkLevelForPattern(patterns, patternId):\n for p in patterns:\n if p['id'] == patternId:\n return p['level']\n return ''\n\n\ndef checkMessageForPattern(patterns, patternId):\n for p in patterns:\n if p['id'] == patternId:\n return p['title']\n return ''\n\n\ndef checkCategoryForPattern(patterns, patternId):\n for p in patterns:\n if p['id'] == patternId:\n return p['category']\n return ''\n\n\ndef process(reportPath, commitUuid, projectToken, baseDir, baseUrl,):\n patterns = loadPatterns(baseUrl)\n with open(reportPath, 'r') as f:\n data = f.read()\n Bs_data = BeautifulSoup(data, \"xml\")\n srcDir = Bs_data.find('SrcDir').text\n if not baseDir.endswith('/'):\n baseDir = baseDir+'/'\n srcDir = srcDir.replace(baseDir, '')\n bug_instances = Bs_data.find_all('BugInstance')\n bugs = []\n for bi in bug_instances:\n type = bi.get('type')\n sourceLine = bi.find('SourceLine', recursive=False).get('start')\n sourcePath = bi.find(\n 'SourceLine', recursive=False).get('sourcepath')\n message = bi.find('LongMessage').text\n # print(bi)\n #print(type, sourceLine, sourcePath, message)\n bugs.append({\n 'source': srcDir+'/'+sourcePath,\n 'line': sourceLine,\n 'type': type,\n 'message': checkMessageForPattern(patterns, type),\n 'level': checkLevelForPattern(patterns, type),\n 'category': checkCategoryForPattern(patterns, type)\n })\n\n groups = {}\n for obj in bugs:\n if(not obj['source'] in groups):\n groups[obj['source']] = {\n 'filename': obj['source'],\n 'results': []\n }\n groups[obj['source']]['results'].append(\n {\n 'Issue': {\n 'patternId': {\n 'value': obj['type']\n },\n 'filename': obj['source'],\n 'message': {\n 'text': obj['message']\n },\n 'level': obj['level'],\n 'category': obj['category'],\n 'location': {\n \"LineLocation\": {\n \"line\": int(obj['line'])\n }\n }\n\n }\n }\n )\n\n payload = [{\n 'tool': 'spotbugs',\n 'issues': {\n 'Success': {\n 'results': list(groups.values())\n }\n }\n }]\n \n postResults(commitUuid, payload, projectToken, baseUrl)\n time.sleep(5)\n resultsfinal(commitUuid, projectToken, baseUrl)\n\n\ndef main():\n print('Welcome to Codacy Spotbugs Parser')\n parser = argparse.ArgumentParser(description='Codacy Spotbugs Parser')\n parser.add_argument('--report-path', dest='reportPath',\n default=None, help='path to the spotbugs report')\n parser.add_argument('--project-token', dest='projectToken', default=None,\n help='the project-token to be used on the REST API')\n parser.add_argument('--commit-uuid', dest='commitUuid', default=None,\n help='the commit uuid')\n parser.add_argument('--basedir', dest='baseDir',\n default=None, help='where code is clonned')\n parser.add_argument('--baseurl', dest='baseUrl', default='https://api.codacy.com',\n help='codacy server address (ignore if cloud)')\n args = parser.parse_args()\n process(args.reportPath,args.commitUuid, args.projectToken, args.baseDir, args.baseUrl)\n\n\nmain()\n","repo_name":"codacy-acme/spotbugs-to-codacy","sub_path":"spotbugs-parser.py","file_name":"spotbugs-parser.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22663235352","text":"# Echo client program\n# This is a simple test client\nimport socket\nimport sys\nPORT = 80 # The same port as used by the server\ndef connect(HOST):\n s = None\n for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n try:\n s = socket.socket(af, socktype, proto)\n except socket.error as msg:\n s = None\n continue\n try:\n s.connect(sa)\n except socket.error as msg:\n s.close()\n s = None\n continue\n break\n if s is None:\n print ('could not open socket')\n sys.exit(1)\n s.close()\n","repo_name":"GiuseppeVadrucci/Libra","sub_path":"libra/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"25283211959","text":"import json\nimport platform\nimport re\nfrom urllib.request import urlopen\n\nMACHINE_SUFFIX = {\n \"Darwin\": {\n \"arm64\": \"aarch64-apple-darwin-install_only.tar.gz\",\n \"x86_64\": \"x86_64-apple-darwin-install_only.tar.gz\",\n },\n \"Linux\": {\n \"aarch64\": {\n \"glibc\": \"aarch64-unknown-linux-gnu-install_only.tar.gz\",\n # musl doesn't exist\n },\n \"x86_64\": {\n \"glibc\": \"x86_64_v3-unknown-linux-gnu-install_only.tar.gz\",\n \"musl\": \"x86_64_v3-unknown-linux-musl-install_only.tar.gz\",\n },\n },\n \"Windows\": {\n \"AMD64\": \"x86_64-pc-windows-msvc-shared-install_only.tar.gz\"\n }\n}\n\nGITHUB_API_URL = f\"https://api.github.com/repos/indygreg/python-build-standalone/releases/latest\"\nPYTHON_VERSION_REGEX = re.compile(r\"cpython-(\\d+\\.\\d+\\.\\d+)\")\n\n\nclass NotAvailable(Exception):\n \"\"\"Raised when the asked Python version is not available.\"\"\"\n\n\ndef get_latest_python_releases() -> list[str]:\n \"\"\"Returns the list of python download links from the latest github release.\"\"\"\n with urlopen(GITHUB_API_URL) as response:\n release_data = json.load(response)\n\n return [asset[\"browser_download_url\"] for asset in release_data[\"assets\"]]\n\n\ndef list_pythons() -> dict[str, str]:\n \"\"\"Returns available python versions for your machine and their download links.\"\"\"\n system, machine = platform.system(), platform.machine()\n download_link_suffix = MACHINE_SUFFIX[system][machine]\n # linux suffixes are nested under glibc or musl builds\n if system == \"Linux\":\n # fallback to musl if libc version is not found\n libc_version = platform.libc_ver()[0] or \"musl\"\n download_link_suffix = download_link_suffix[libc_version]\n\n python_releases = get_latest_python_releases()\n\n available_python_links = [\n link for link in python_releases if link.endswith(download_link_suffix)\n ]\n\n python_versions: dict[str, str] = {}\n for link in available_python_links:\n match = PYTHON_VERSION_REGEX.search(link)\n assert match is not None\n python_version = match[1]\n python_versions[python_version] = link\n\n sorted_python_versions = {\n version: python_versions[version]\n for version in sorted(\n python_versions,\n # sort by semver\n key=lambda version: [int(k) for k in version.split(\".\")],\n reverse=True,\n )\n }\n return sorted_python_versions\n\n\ndef resolve_python_version(requested_version: str) -> None:\n pythons = list_pythons()\n\n for version, version_download_link in pythons.items():\n if version.startswith(requested_version):\n python_version = version\n download_link = version_download_link\n break\n else:\n raise NotAvailable\n\n return python_version, download_link\n","repo_name":"tusharsadhwani/yen","sub_path":"src/yen/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"57"} +{"seq_id":"1353682125","text":"#!/usr/bin/env python3\n\"\"\"Program that implements a full training\"\"\"\n\nimport numpy as np\nfrom policy_gradient import policy_gradient\n\n\ndef train(env, nb_episodes, alpha=0.000045, gamma=0.98, show_result=False):\n \"\"\"\n implements a full training.\n\n Args:\n env: initial environment\n nb_episodes: number of episodes used for training\n alpha: the learning rate\n gamma: the discount factor\n\n Return:\n all values of the score (sum of all rewards during one episode loop)\n \"\"\"\n weight = np.random.rand(4, 2)\n sco_reward = []\n\n for esc in range(nb_episodes):\n state = env.reset()[None, :]\n grads = []\n rewards = []\n v_score = 0\n\n while True:\n if show_result and (esc % 1000 == 0):\n env.render()\n\n action, grad = policy_gradient(state, weight)\n new_state, reward, done, _ = env.step(action)\n grads.append(grad)\n rewards.append(reward)\n v_score += reward\n state = new_state[None, :]\n\n if done:\n break\n\n for i in range(len(grads)):\n enum_s = enumerate(rewards[i:])\n weight += (\n alpha * grads[i] *\n sum([res * gamma**res for _, res in enum_s])\n )\n sco_reward.append(v_score)\n print(\"{}: {}\".format(esc, v_score), end=\"\\r\", flush=False)\n\n return sco_reward\n","repo_name":"andresvanegas19/holbertonschool-machine_learning","sub_path":"reinforcement_learning/0x03-policy_gradients/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37082904966","text":"from myutils.config import globalconfig\nimport sqlite3\nimport winsharedutils \nimport os\nfrom traceback import print_exc\nclass linggesi():\n def __init__(self):\n self.sql=None\n try:\n if os.path.exists(os.path.join(globalconfig['cishu']['linggesi']['path'] ,'ja-zh.db'))==False or \\\n os.path.exists(os.path.join(globalconfig['cishu']['linggesi']['path'] ,'ja-zh-gbk.db'))==False:\n return \n self.sql=sqlite3.connect(os.path.join(globalconfig['cishu']['linggesi']['path'] ,'ja-zh.db'),check_same_thread=False)\n self.sql2=sqlite3.connect(os.path.join(globalconfig['cishu']['linggesi']['path'] ,'ja-zh-gbk.db'),check_same_thread=False)\n except:\n pass\n def end(self):\n self.sql.close()\n self.sql2.close()\n def search(self,word):\n \n mp={}\n for sql in [self.sql,self.sql2]:\n x=sql.execute(\"select word,content from entry where word like ?\",('%{}%'.format(word),))\n exp=x.fetchall()\n \n \n for w,xx in exp:\n \n d=winsharedutils.distance(w,word) \n mp[w]=[xx,d]\n \n\n \n \n x=sorted(list(mp.keys()),key=lambda x: mp[x][1])[:10]\n save=[w+'
'+mp[w][0] for w in x]\n return '
'.join(save)\n ","repo_name":"HIllya51/LunaTranslator","sub_path":"LunaTranslator/LunaTranslator/cishu/linggesi.py","file_name":"linggesi.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":2068,"dataset":"github-code","pt":"57"} +{"seq_id":"31179414895","text":"# -*- coding: utf-8 -*-\nimport turtle\n\n# 绘制彩色螺旋图\ncolors = ['pink', 'orange', 'gold', 'yellow', 'green', 'cyan']\nturtle.speed(0)\nturtle.begin_fill()\nfor x in range(360):\n turtle.pencolor(colors[x%6]) # 画笔颜色\n turtle.width(x / 100 + 1) # 画笔宽度,与turtle.pensize()一样\n # print(x//100 + 1) # x/100 是浮点数, x//100 是整数\n turtle.forward(x)\n turtle.left(59)\n\nturtle.end_fill()\nturtle.hideturtle()\nturtle.done()","repo_name":"RUIZRUI/python-advanced","sub_path":"turtle_spin2.py","file_name":"turtle_spin2.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73122474738","text":"#강사님 솔루션\nT = int(input())\n \nfor tc in range(1, T + 1): \n\n N = int(input())\n count = 1\n #리스트에 인덱스를 붙이기 위해 체크리스트, 숫자가 나오면 1로 바꾸자\n check = [0 for _ in range(10)] # =[0,0,0,0,0,0,0,0,0,0]\n # 1. 주어진 N을 증가시킨다.\n #1N 2N 3N....\n #2.각 회차에서 나오는 수를 분리한다.\n #EX) 1235-> 1 2 3 4로 나눠서 보기 1234 => '1234'\n #3. 위에서 나눈 숫자 체크하기\n result = None\n while True:\n number = str(N * count)\n for num in number: \n idx = int(num) #num을 인덱스로 활용해서 체크\n check[idx] = 1 #얘는지금 str이니까 int로 변환\n #다음으로 가기 전에 다 체크했는지 확인\n #체크 배열의 전체가 1로 표기되었는지 확인\n is_all_checked = True\n for i in check: #0,1,1,..(인덱스가 아니라 실제값)\n if i == 0:\n is_all_checked = False # 0이 나오는순간 거짓으로바꿔\n break #아직 완료아니니까\n if is_all_checked:\n result = number\n break #모든 리스트의 요소가 1로ㅗ 채워져있음 while종료\n count += 1 \n \n print(f'#{tc} {result}')\n #4. 0~9까지 다 체크했으면 종료하기\n #그리고 그때의 회차를 정답으로 프린트\n","repo_name":"hannahN12/Algorithm","sub_path":"SWEA/D2/1288_teacher.py","file_name":"1288_teacher.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27721215323","text":"\"\"\"\nEn este ejemplo, se define la clase NFN que hereda de la clase torch.autograd.Function. Esta clase define las funciones forward y backward que se utilizan para calcular los resultados de la red y los gradientes respectivamente. La función forward calcula la multiplicación matricial entre la entrada y los pesos, mientras que la función backward calcula los gradientes de la entrada y los pesos.\nPara utilizar esta clase, se crea un tensor de entrada x y un tensor de pesos w. Luego, se llama a la función NFN.apply() para calcular la salida de la red y. Finalmente, se calculan los gradientes de y con respecto a x y w utilizando la función y.sum().backward().\n\"\"\"\n\nimport torch\n\nclass NFN(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, input, weight):\n ctx.save_for_backward(input, weight)\n output = input.mm(weight.t())\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n input, weight = ctx.saved_tensors\n grad_input = grad_weight = None\n if ctx.needs_input_grad[0]:\n grad_input = grad_output.mm(weight)\n if ctx.needs_input_grad[1]:\n grad_weight = grad_output.t().mm(input)\n return grad_input, grad_weight\n\nx = torch.randn(3, 5, requires_grad=True)\nw = torch.randn(2, 5, requires_grad=True)\ny = NFN.apply(x, w)\ny.sum().backward()\n\nprint(x.grad)\nprint(w.grad)\n","repo_name":"M48-3/SciLat_ciencia_tg","sub_path":"Red Neuronal Funcional (NFN) PyTorch.py","file_name":"Red Neuronal Funcional (NFN) PyTorch.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29570141151","text":"import rclpy\nfrom rclpy.node import Node\nfrom nav_msgs.msg import Odometry\nfrom tf2_ros.transform_broadcaster import TransformBroadcaster\nfrom geometry_msgs.msg import TransformStamped, Twist\nfrom neato_interfaces.srv import LidarOnOff\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import JointState\nfrom .neato_driver import *\nimport serial\nimport math\nimport numpy as np\n\nclass neato_node(rclpy.node.Node):\n\n def __init__(self):\n super().__init__('Neato_node')\n \n self.declare_parameter('base_frame','base_link')\n self.declare_parameter('odom_frame', 'odom')\n self.declare_parameter('neato_port','/dev/ttyACM0')\n self.declare_parameter('wheel_track', 0.240)\n self.declare_parameter('wheel_radius', 0.0381)\n self.declare_parameter('laser_frame','laser')\n self.declare_parameter('max_x_speed', 0.30)\n self.declare_parameter('max_z_speed', 0) # unlimited\n self.declare_parameter('enable_odom', True)\n self.declare_parameter('enable_scan', True)\n self.declare_parameter('enable_joint', True)\n \n self.get_logger().info('base_frame: '+ self.get_parameter('base_frame').get_parameter_value()._string_value)\n self.get_logger().info('odom_frame: '+ self.get_parameter('odom_frame').get_parameter_value()._string_value)\n self.get_logger().info('laser_frame: '+ self.get_parameter('laser_frame').get_parameter_value()._string_value)\n self.get_logger().info('neato_port: ' + self.get_parameter('neato_port').get_parameter_value()._string_value)\n self.get_logger().info('wheel_track: '+ str(self.get_parameter('wheel_track').get_parameter_value()._double_value))\n self.get_logger().info('wheel_radius: '+ str(self.get_parameter('wheel_radius').get_parameter_value()._double_value))\n self.get_logger().info('max_x_speed: '+ str(self.get_parameter('max_x_speed').get_parameter_value()._double_value))\n self.get_logger().info('max_z_speed: '+ str(self.get_parameter('max_z_speed').get_parameter_value()._double_value))\n self.get_logger().info('enable_odom: '+ str(self.get_parameter('enable_odom').get_parameter_value().bool_value))\n self.get_logger().info('enable_scan: '+ str(self.get_parameter('enable_scan').get_parameter_value().bool_value))\n self.get_logger().info('enable_joint: '+ str(self.get_parameter('enable_joint').get_parameter_value().bool_value))\n \n self.left_wheel_pos_prev = 0\n self.right_wheel_pos_prev = 0\n self.x = 0\n self.y = 0\n self.th = 0\n self.moving_prev = 0\n self.useOdom = self.get_parameter('enable_odom').get_parameter_value().bool_value\n self.useLaser = self.get_parameter('enable_scan').get_parameter_value().bool_value\n self.useJoint = self.get_parameter('enable_joint').get_parameter_value().bool_value\n self.delta_left_wheel = 0\n self.delta_right_wheel = 0\n self.cmd_vel = None\n self.max_x_speed = self.get_parameter('max_x_speed').get_parameter_value()._double_value\n self.max_z_speed = self.get_parameter('max_z_speed').get_parameter_value()._double_value\n \n self.start_neato()\n\n if self.useJoint:\n self.init_joint()\n \n if self.useOdom:\n self.init_odom()\n \n if self.useLaser:\n self.init_laser()\n\n self.init_lidar_service() \n \n self.cmd_sub = self.create_subscription(Twist, '/cmd_vel', self.cmd_callback, 1)\n \n def init_joint(self):\n self.wheel_pub = self.create_publisher(JointState, '/joint_states', 1)\n self.wheel_msg = JointState()\n self.wheel_msg.name.append(\"wheel_left_joint\")\n self.wheel_msg.name.append(\"wheel_right_joint\")\n self.wheel_msg.position.append(0.0)\n self.wheel_msg.position.append(0.0)\n\n def init_lidar_service(self) :\n self.lidar_srv = self.create_service(LidarOnOff, 'neato_node/lidar_on_off', self.lidar_srv_callback, qos_profile=rclpy.qos.qos_profile_services_default) \n\n def init_odom(self):\n self.odom_pub = self.create_publisher(Odometry, '/odom', 1)\n self.odom_broadcaster = TransformBroadcaster(self)\n \n def init_laser(self):\n self.laser_pub = self.create_publisher(LaserScan, '/scan', rclpy.qos.qos_profile_sensor_data)\n SetLDSRotation(True)\n \n self.scan = LaserScan()\n \n # set fixed things\n self.scan.angle_min = 0.0\n self.scan.angle_max = 359.0 * math.pi / 180.0\n self.scan.angle_increment = math.pi / 180.0\n self.scan.range_min = 0.016\n self.scan.range_max = 6.0\n \n def lidar_srv_callback(self, request, response):\n self.get_logger().info('Request received, Lidar: %d' % (request.lidar_enable))\n\n try:\n response.success = True \n if request.lidar_enable:\n SetLDSRotation(True)\n self.useLaser = True\n elif request.lidar_enable is False:\n SetLDSRotation(False)\n self.useLaser = False\n else:\n response.success = False\n \n except:\n response.success = False\n \n return response \n\n def start_neato(self):\n try:\t\n port = self.get_parameter(\"neato_port\").get_parameter_value().string_value\n init(port,False)\n self.get_logger().info('succesfully established connection with neato on ' + port)\n TestMode(True)\n SetLED(BacklightStatus.On, ButtonColors.Green)\n PlaySound(Sounds.WakingUp)\n except serial.serialutil.SerialException:\n self.get_logger().error('could not open ' + port)\n rclpy.shutdown()\n\n def odomPub(self):\n wheel_radius = self.get_parameter('wheel_radius').get_parameter_value()._double_value\n lastTime = self.get_clock().now()\n motors = GetMotors(leftWheel=True, rightWheel=True)\n left_wheel_pos = motors.get(\"LeftWheel_PositionInMM\") / 1000.0\n right_wheel_pos = motors.get(\"RightWheel_PositionInMM\") / 1000.0\n \n # if statement replaces having to set the (right/left)_wheel_pos_prev in the init_odom() in ROS1 neato package by brannonvann as to not have big jumps in position when there was none\n if self.left_wheel_pos_prev != 0 and self.right_wheel_pos_prev != 0 and right_wheel_pos != 0 and left_wheel_pos != 0:\n self.delta_left_wheel = (\n left_wheel_pos - self.left_wheel_pos_prev\n ) # left wheel delta in meters\n self.delta_right_wheel = (\n right_wheel_pos - self.right_wheel_pos_prev\n ) # right wheel delta in meters\n \n delta_time = lastTime - self.get_clock().now() # find delta time to be used in nav_msgs/Odometry message\n self.left_wheel_pos_prev = left_wheel_pos\n self.right_wheel_pos_prev = right_wheel_pos\n \n if self.useJoint:\t\n self.wheel_msg.header.stamp = self.get_clock().now().to_msg()\n if left_wheel_pos != 0:\n self.wheel_msg.position[0] = left_wheel_pos/wheel_radius\n if right_wheel_pos != 0:\n self.wheel_msg.position[1] = right_wheel_pos/wheel_radius\n\n self.wheel_pub.publish(self.wheel_msg)\n \n ds = (\n self.delta_left_wheel + self.delta_right_wheel\n ) / 2.0\n \n dth = math.atan2(self.delta_right_wheel - self.delta_left_wheel, self.get_parameter('wheel_track').get_parameter_value()._double_value) # turn angle\n \n self.x += ds * math.cos(self.th + dth / 2.0)\n self.y += ds * math.sin(self.th + dth / 2.0)\n self.th += dth\n\n # setup TransformStamped() that will broadcast (default: odom -> base_link)\n t = TransformStamped()\n\n # setup header\n t.header.stamp = self.get_clock().now().to_msg()\n t.header.frame_id = self.get_parameter('odom_frame').get_parameter_value().string_value\n t.child_frame_id = self.get_parameter('base_frame').get_parameter_value().string_value\n\n # initialize transform\n t.transform.translation.x = self.x\n t.transform.translation.y = self.y\n t.transform.translation.z = 0.0\n\n q = quaternion_from_euler(0, 0, self.th) # convert eular to quaternion rotation\n t.transform.rotation.x = q[0]\n t.transform.rotation.y = q[1]\n t.transform.rotation.z = q[2]\n t.transform.rotation.w = q[3]\n \n self.odom_broadcaster.sendTransform(t) # broadcast transform\n \n # setup Odometry message\n odom = Odometry()\n\n #setup header\n odom.header.stamp = self.get_clock().now().to_msg()\n odom.header.frame_id = self.get_parameter('odom_frame').get_parameter_value().string_value\n odom.child_frame_id = self.get_parameter('base_frame').get_parameter_value().string_value\n \n # initialize Odometry message\n odom.pose.pose.position.x = self.x\n odom.pose.pose.position.y = self.y\n odom.pose.pose.position.z = 0.0\n odom.pose.pose.orientation.x = q[0]\n odom.pose.pose.orientation.y = q[1]\n odom.pose.pose.orientation.z = q[2]\n odom.pose.pose.orientation.w = q[3]\n odom.twist.twist.linear.x = ds / (delta_time.nanoseconds/1000000000)\n odom.twist.twist.angular.z = dth / (delta_time.nanoseconds/1000000000)\n \n self.odom_pub.publish(odom) # publish message\n \n def cmd_callback(self, cmdvel):\n self.cmd_vel = cmdvel\n \n def handle_cmd_vel(self):\n wheel_track=self.get_parameter('wheel_track').get_parameter_value()._double_value\n self.max_x_speed = self.get_parameter('max_x_speed').get_parameter_value()._double_value\n self.max_z_speed = self.get_parameter('max_z_speed').get_parameter_value()._double_value\n \n if self.cmd_vel: \n req_theta = self.cmd_vel.angular.z * (wheel_track / 2.0)\n\n # limits requested z velocity to max z velocity\n if self.max_z_speed: # if max_z_speed is not 0\n req_theta = min(abs(req_theta), self.max_z_speed)\n\n if req_theta < 0:\n req_theta *= -1\n \n dist_left = self.cmd_vel.linear.x - req_theta\n dist_right = self.cmd_vel.linear.x + req_theta\n req_velocity = abs(max(dist_left, dist_right))\n drive_vel = min(req_velocity, 0.3) # .3 m/s is the max allowed by neato api\n\n # limits requested x velocity to max x velocity\n if self.max_x_speed: # if max_x_speed is not 0\n drive_vel = min(drive_vel,self.max_x_speed)\n \n if drive_vel == 0:\n if self.moving_prev:\n SetMotorWheels(\n 1, 1, 1\n ) # 0,0,0 does not stop Neato. Issue 1,1,1 to go forward 1mm and stop.\n else:\n SetMotorWheels(\n int(dist_left * 1000), int(dist_right * 1000), int(drive_vel * 1000)\n )\n\n self.moving_prev = drive_vel > 0\n \n def laserPub(self):\n # set header\n self.scan.header.stamp = self.get_clock().now().to_msg()\n self.scan.header.frame_id = self.get_parameter('laser_frame').get_parameter_value().string_value\n \n self.scan.ranges = []\n self.scan.intensities = []\n \n scan_reading = GetLDSScan()\n \n for i in range(360):\n if scan_reading[i][2] == 0:\n self.scan.ranges.append(scan_reading[i][0] / 1000.0)\n self.scan.intensities.append(scan_reading[i][1])\n else: # error condition, ignore\n self.scan.ranges.append(0)\n self.scan.intensities.append(0)\n \n self.laser_pub.publish(self.scan) \t\n \ndef quaternion_from_euler(ai, aj, ak):\n ai /= 2.0\n aj /= 2.0\n ak /= 2.0\n ci = math.cos(ai)\n si = math.sin(ai)\n cj = math.cos(aj)\n sj = math.sin(aj)\n ck = math.cos(ak)\n sk = math.sin(ak)\n cc = ci*ck\n cs = ci*sk\n sc = si*ck\n ss = si*sk\n\n q = np.empty((4, ))\n q[0] = cj*sc - sj*cs\n q[1] = cj*ss + sj*cc\n q[2] = cj*cs - sj*sc\n q[3] = cj*cc + sj*ss\n\n return q\n \ndef main(args=None):\n rclpy.init(args=args)\n\n node = neato_node()\n \n while rclpy.ok():\n try:\n rclpy.spin_once(node, timeout_sec=0.3)\n if node.useOdom:\n node.odomPub()\n if node.useLaser:\n node.laserPub()\n node.handle_cmd_vel()\n except:\n break\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n \n node.destroy_node()\n rclpy.shutdown()\n SetLED(BacklightStatus.Off, ButtonColors.Off)\n SetLDSRotation(False)\n TestMode(False)\n PlaySound(Sounds.UserTerminatedCleaning)\n \n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bribribriambriguy/neatonav2","sub_path":"neatonav2/neatonav2/neato_node.py","file_name":"neato_node.py","file_ext":"py","file_size_in_byte":12972,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"20011365970","text":"from collections import defaultdict\n\n\nclass Graph:\n def __init__(self, v):\n self.v = v\n self.graph = defaultdict(list)\n\n def addEdge(self, u, v):\n self.graph[u].append(v)\n self.graph[v].append(u)\n\n def BFS(self, source, level):\n visited = [False for i in range(self.v)]\n q = []\n Level = [0 for j in range(self.v)]\n visited[source] = True\n q.append(source)\n Level[source] = 0\n while len(q):\n s = q.pop(0)\n for i in self.graph[s]:\n if not visited[i]:\n visited[i] = True\n Level[i] = Level[s] + 1\n q.append(i)\n count = 0\n for i in Level:\n if i == level:\n count += 1\n return count\n\n\nif __name__ == '__main__':\n g = Graph(6)\n g.addEdge(0, 1)\n g.addEdge(0, 2)\n g.addEdge(1, 3)\n g.addEdge(2, 4)\n g.addEdge(2, 5)\n level = 2\n print(g.BFS(0, level))\n","repo_name":"CodeForContribute/Algos-DataStructures","sub_path":"GraphCodes/countNumberNodes.py","file_name":"countNumberNodes.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"71006779058","text":"a = []\nwhile True:\n n1 = int(input(\"Digite um valor: \"))\n if n1 not in a:\n a.append(n1)\n print(\"numero adicionado com sucesso...\")\n else:\n print(\"Valor duplicado!! Nao irei adicionar...\")\n b = str(input(\"Quer continuar? [S/N]\"))\n if b in 'nN':\n break\na.sort()\nprint(a)\n","repo_name":"GitGuii/PythonExs","sub_path":"PythonDownload/pythonexercicios/ex079.py","file_name":"ex079.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"7705907233","text":"\"\"\"Jumper_model.py: Create the agent of model of the one legged jumping tensegrity robot and provide an easy interface to be used with RL algorithms\"\"\"\n__author__ = \"Hany Hamed\"\n__credits__ = [\"Hany Hamed\", \"Vlad Kurenkov\", \"Sergie Savin\"]\n__version__ = \"1.0.0\"\n__email__ = \"h.hamed.elanwar@gmail.com / h.hamed@innopolis.university\"\n__status__ = \"Paper Result\"\n\n\nimport socket\nimport sys\nimport signal\nimport json\nfrom time import *\nimport os\nimport subprocess\nimport numpy as np\n\npath_to_model = os.path.join(os.environ[\"TENSEGRITY_HOME\"], \"build/dev/jumper/AppJumperModel\")\nsim_exec = \"gnome-terminal -e {}\".format(path_to_model)\n\nclass JumperModel():\n def __init__(self, host_name='localhost', port_num=10040, packet_size=5000,\n sim_exec=sim_exec, dl=0.1, controllers_num=8, control_type=\"rest_length\", \n starting_coordinates=[0,100,0], starting_angle=[0,0], starting_leg_angle=[0,0]):\n self.starting_coordinates = starting_coordinates\n self.starting_angle = starting_angle\n self.starting_leg_angle = starting_leg_angle\n self.host_name = host_name\n self.port_num = port_num\n self.packet_size = packet_size\n self.actions_json = {\n 'Controllers_val': [0,0,0,0,0,0,0,0],\n 'Reset': 0\n }\n self.sim_json = {\"Rest_cables_lengths\":\n [0,0,0,0,0,0,0,0],\n \"Current_cables_lengths\":\n [0,0,0,0,0,0,0,0],\n \"End_points\":\n [[0.,0.,0.], [0.,0.,0.], [0.,0.,0.], [0.,0.,0.], \n [0.,0.,0.],[0.,0.,0.]],\n \"End_points_velocities\":\n [[0.,0.,0.], [0.,0.,0.], [0.,0.,0.], [0.,0.,0.], \n [0.,0.,0.],[0.,0.,0.]],\n \"Leg_end_points_world\":\n [[0.,0.,0.], [0.,0.,0.]],\n \"Time\": 0.,\n \"ZFinished\": 1,\n \"Flags\":[1,0,0]}\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # print(self.port_num)\n if(self.port_num is None):\n self.port_num = 0\n self.server_address = [self.host_name, self.port_num] # Bind the socket to the port\n\n self.connection = None\n self.client_address = None\n self.child_process = None\n print('#########\\nstarting up on {}\\n#########'.format(self.server_address))\n try:\n self.sock.bind(tuple(self.server_address))\n\n except socket.error as exc:\n self.port_num += 1\n self.server_address[1] = self.port_num\n print('#########\\nstarting up on {} after getting an error of busy port first\\n#########'.format(self.server_address))\n self.sock.bind(tuple(self.server_address))\n print('#########\\nConnected to port: {:}\\n#########'.format(self.sock.getsockname()[1]))\n print('#########\\nServer binding is finished\\n#########')\n self.sock.listen(1) # Listen for incoming connections\n self.reset_flag = False\n self.close_flag = False\n self.dl = dl # Self modified parameter\n self.end_points_num = 6\n self.controllers_num = controllers_num # Self modified parameter\n self.leg_end_points = [4,5]\n self.leg_length = 20\n self.port_num = self.sock.getsockname()[1]\n self.control_type = control_type\n self.control_type_index = {\"rest_length\": 0, \"current_length\": 1, \"rest_length_mod\": 2, \"current_length_mod\": 3}\n self.orginal_sim_exec = sim_exec\n self.set_sim_exec(self.orginal_sim_exec)\n\n\n def set_sim_exec(self, sim_exec):\n self.sim_exec = sim_exec + ' {:} {:} {:} {:} {:} {:} {:} {:} {:} {:}'.format(self.host_name, self.port_num, self.control_type_index[self.control_type], self.starting_coordinates[0], self.starting_coordinates[1], self.starting_coordinates[2] , self.starting_angle[0], self.starting_angle[1], self.starting_leg_angle[0], self.starting_leg_angle[1])\n print(\"EXEC: {:}\".format(self.sim_exec))\n\n def __del__(self):\n self.closeSimulator()\n # sys.exit(0)\n\n # function for writing data into TCP connection\n def write(self, data):\n try:\n self.connection.sendall(data.encode())\n except Exception as e:\n print(\"$$$$$$$$$$$$ ERROR in Writing $$$$$$$$$$$$\")\n print(\"Error: \" + str(e))\n\n # function for reading data from TCP connection\n def read(self):\n try:\n data = []\n counter = 1\n # Receive the data in small chunks and retransmit it\n while True:\n data.append(self.connection.recv(self.packet_size).decode(\"utf-8\")) #reading part\n if 'ZFinished' in str(data[-1][-14:-1]):\n break\n counter += 1\n return \"\".join(data)\n except ValueError:\n print(ValueError)\n print(\"$$$$$$$$$$$$ ERROR in Reading $$$$$$$$$$$$\")\n # sleep(2)\n return None\n\n def startSimulator(self):\n self.close_flag = False\n self.reset_flag = False\n if(self.sim_exec == sim_exec):\n print(\"#Warning: Starting an old version\")\n #Headless\n self.child_process = subprocess.Popen(self.sim_exec, shell=True) \n #print('#########\\nwaiting for a connection\\n#########')\n self.connection, self.clientAddress = self.sock.accept() #wait until it get a client\n #print('connection from', self.clientAddress)\n\n def closeSimulator(self):\n self.close_flag = True\n # kill the shell script of the simulator\n if self.connection is not None:\n self.connection.close()\n if self.child_process is not None:\n os.kill(self.child_process.pid, signal.SIGKILL)\n\n def render(self):\n pass\n \n def reset(self):\n self.reset_flag = True\n self.closeSimulator()\n self.startSimulator()\n\n def step(self):\n if (self.close_flag == False):\n if (self.reset_flag == True):\n self.reset()\n \n self.write(json.dumps(self.actions_json)) # Write to the simulator module the json object with the required info\n sim_raw_data = self.read()\n if(sim_raw_data is not None):\n self.sim_json = json.loads(sim_raw_data) # Parse the data from string to json\n else:\n self.closeSimulator()\n\n def getRestCablesLengths(self, i=None):\n if(i is None):\n return self.sim_json[\"Rest_cables_lengths\"]\n return self.sim_json[\"Rest_cables_lengths\"][i]\n \n \n def getCurrentCablesLengths(self, i=None):\n if(i is None):\n return self.sim_json[\"Current_cables_lengths\"]\n return self.sim_json[\"Current_cables_lengths\"][i]\n\n def getEndPoints(self):\n end_points = []\n # Notice that the end_points are in the form (y,z,x) as it is coming from the simulator like this\n for i in range(self.end_points_num):\n end_points.append(self.sim_json[\"End_points\"][i])\n return end_points\n\n def getEndPointsVelocities(self):\n end_points_velocities = []\n # Notice that the end_points are in the form (y,z,x) as it is coming from the simulator like this\n for i in range(self.end_points_num):\n end_points_velocities.append(self.sim_json[\"End_points_velocities\"][i])\n return end_points_velocities\n\n def getLegEndPoints(self):\n return [self.sim_json[\"Leg_end_points_world\"][0], self.sim_json[\"Leg_end_points_world\"][1]]\n\n # point_a: is the end point of the leg from down\n # point_b: is the end point of the virtual horizontal leg from up\n # point_c: is the end point of the actual leg from up\n def getLegAngle(self):\n point_a = np.array(self.sim_json[\"End_points\"][self.leg_end_points[0]])\n point_b = [0,0,0]\n point_b[:] = point_a[:]\n point_b[1] += self.leg_length\n point_c = np.array(self.sim_json[\"End_points\"][self.leg_end_points[1]])\n v1 = point_b - point_a\n v2 = point_c - point_a\n dot_product = np.dot(v1,v2)\n v1_mag = np.linalg.norm(v1)\n v2_mag = np.linalg.norm(v2)\n angle = np.arccos(dot_product/(v1_mag*v2_mag))\n return angle\n\n \"\"\"\n (b)|\n |\n (a)|_____(c)\n \"\"\"\n def getSquareSidesAngles(self):\n point_a = np.array(self.sim_json[\"End_points\"][1])\n point_b = np.array(self.sim_json[\"End_points\"][2])\n point_c = np.array(self.sim_json[\"End_points\"][0])\n\n point_d = [0,0,0]\n point_e = [0,0,0]\n\n point_d[:] = point_b[:]\n point_e[:] = point_c[:]\n\n point_d[1] = point_a[1]\n point_e[1] = point_a[1]\n\n v_ab = point_b - point_a\n v_ac = point_c - point_a\n v_ad = point_d - point_a\n v_ae = point_e - point_a\n\n dot_v_ad_v_ab = np.dot(v_ad, v_ab)\n dot_v_ae_v_ac = np.dot(v_ae, v_ac)\n\n mag_v_ab = np.linalg.norm(v_ab)\n mag_v_ac = np.linalg.norm(v_ac)\n mag_v_ad = np.linalg.norm(v_ad)\n mag_v_ae = np.linalg.norm(v_ae)\n\n angle_x = np.arccos(dot_v_ad_v_ab/(mag_v_ad*mag_v_ab))\n angle_y = np.arccos(dot_v_ae_v_ac/(mag_v_ae*mag_v_ac))\n \n return [angle_x, angle_y]\n \n def getTime(self):\n return self.sim_json[\"Time\"]\n \n def setStartingAngle(self, angle):\n self.starting_angle = angle\n self.set_sim_exec(self.orginal_sim_exec)\n \n def setStartingLegAngle(self, angle):\n self.starting_leg_angle = angle\n self.set_sim_exec(self.orginal_sim_exec)\n\n def setStartingHeight(self, height):\n self.starting_coordinates[1] = height\n self.set_sim_exec(self.orginal_sim_exec)","repo_name":"hany606/tensegrity-vertical-stability","sub_path":"src/dev/gym-tensegrity/gym_tensegrity/envs/jumper_model.py","file_name":"jumper_model.py","file_ext":"py","file_size_in_byte":9961,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"21911334487","text":"\"\"\"Auxiliary functions for the quadratic BNTR trust-region subsolver.\"\"\"\nfrom functools import reduce\nfrom typing import NamedTuple, Union\n\nimport numpy as np\nfrom estimagic.optimization.subsolvers._conjugate_gradient import (\n minimize_trust_cg,\n)\nfrom estimagic.optimization.subsolvers._steihaug_toint import (\n minimize_trust_stcg,\n)\nfrom estimagic.optimization.subsolvers._trsbox import minimize_trust_trsbox\n\nEPSILON = np.finfo(float).eps ** (2 / 3)\n\n\nclass ActiveBounds(NamedTuple):\n lower: Union[np.ndarray, None] = None\n upper: Union[np.ndarray, None] = None\n fixed: Union[np.ndarray, None] = None\n active: Union[np.ndarray, None] = None\n inactive: Union[np.ndarray, None] = None\n\n\ndef bntr(\n model,\n lower_bounds,\n upper_bounds,\n x_candidate,\n *,\n conjugate_gradient_method,\n maxiter,\n maxiter_gradient_descent,\n gtol_abs,\n gtol_rel,\n gtol_scaled,\n gtol_abs_conjugate_gradient,\n gtol_rel_conjugate_gradient,\n):\n \"\"\"Minimize a bounded trust-region subproblem via Newton Conjugate Gradient method.\n\n The BNTR (Bounded Newton Trust Rregion) algorithm uses an active-set approach\n to solve the symmetric system of equations:\n\n hessian @ x = - gradient\n\n only for the inactive parameters of x that lie within the bounds. The active-set\n estimation employed here is based on Bertsekas (:cite:`Bertsekas1982`).\n\n In the main loop, BNTR globalizes the Newton step using a trust-region method\n based on the predicted versus actual reduction in the criterion function.\n The trust-region radius is increased only if the accepted step is at the\n trust-region boundary.\n\n\n Args:\n model (NamedTuple): NamedTuple containing the parameters of the\n main model, i.e.:\n - ``linear_terms`` (np.ndarray): 1d array of shape (n,)\n - ``square_terms`` (np.ndarray): 2d array of shape (n,n).\n lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds\n for the parameter vector x.\n upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds\n for the parameter vector x.\n x_candidate (np.ndarray): Initial guess for the solution of the subproblem.\n conjugate_gradient_method (str): Method for computing the conjugate gradient\n step. Available conjugate gradient methods are:\n - \"cg\"\n - \"steihaug_toint\"\n - \"trsbox\" (default)\n maxiter (int): Maximum number of iterations. If reached, terminate.\n maxiter_gradient_descent (int): Maximum number of steepest descent iterations\n to perform when the trust-region subsolver BNTR is used.\n gtol_abs (float): Convergence tolerance for the absolute gradient norm.\n gtol_rel (float): Convergence tolerance for the relative gradient norm.\n gtol_scaled (float): Convergence tolerance for the scaled gradient norm.\n gtol_abs_conjugate_gradient (float): Convergence tolerance for the absolute\n gradient norm in the conjugate gradient step of the trust-region\n subproblem (\"BNTR\").\n gtol_rel_conjugate_gradient (float): Convergence tolerance for the relative\n gradient norm in the conjugate gradient step of the trust-region\n subproblem (\"BNTR\").\n\n Returns:\n (dict): Result dictionary containing the following keys:\n - ``x`` (np.ndarray): Solution vector of the subproblem of shape (n,)\n - ``criterion`` (float): Minimum function value associated with the\n solution.\n - ``n_iterations`` (int): Number of iterations the algorithm ran before\n termination.\n - ``success`` (bool): Boolean indicating whether a solution has been found\n before reaching maxiter.\n\n \"\"\"\n options_update_radius = {\n \"eta1\": 1.0e-4,\n \"eta2\": 0.25,\n \"eta3\": 0.50,\n \"eta4\": 0.90,\n \"alpha1\": 0.25,\n \"alpha2\": 0.50,\n \"alpha3\": 1.00,\n \"alpha4\": 2.00,\n \"alpha5\": 4.00,\n \"min_radius\": 1e-10,\n \"max_radius\": 1e10,\n \"default_radius\": 100.00,\n }\n\n (\n x_candidate,\n f_candidate,\n gradient_unprojected,\n hessian_bounds_inactive,\n trustregion_radius,\n active_bounds_info,\n converged,\n convergence_reason,\n ) = _take_preliminary_gradient_descent_step_and_check_for_solution(\n x_candidate,\n model,\n lower_bounds,\n upper_bounds,\n maxiter_gradient_descent,\n gtol_abs,\n gtol_rel,\n gtol_scaled,\n )\n\n for niter in range(maxiter + 1):\n if converged:\n break\n\n x_old = x_candidate\n f_old = f_candidate\n accept_step = False\n\n while not accept_step and not converged:\n gradient_bounds_inactive = gradient_unprojected[active_bounds_info.inactive]\n hessian_bounds_inactive = _find_hessian_submatrix_where_bounds_inactive(\n model, active_bounds_info\n )\n (\n conjugate_gradient_step,\n conjugate_gradient_step_inactive_bounds,\n cg_step_norm,\n ) = _compute_conjugate_gradient_step(\n x_candidate,\n gradient_bounds_inactive,\n hessian_bounds_inactive,\n lower_bounds,\n upper_bounds,\n active_bounds_info,\n trustregion_radius,\n conjugate_gradient_method=conjugate_gradient_method,\n gtol_abs_conjugate_gradient=gtol_abs_conjugate_gradient,\n gtol_rel_conjugate_gradient=gtol_rel_conjugate_gradient,\n options_update_radius=options_update_radius,\n )\n\n x_unbounded = x_candidate + conjugate_gradient_step\n x_candidate = _apply_bounds_to_x_candidate(\n x_unbounded, lower_bounds, upper_bounds\n )\n\n predicted_reduction = (\n _compute_predicted_reduction_from_conjugate_gradient_step(\n conjugate_gradient_step,\n conjugate_gradient_step_inactive_bounds,\n gradient_unprojected,\n gradient_bounds_inactive,\n hessian_bounds_inactive,\n active_bounds_info,\n )\n )\n\n f_candidate = _evaluate_model_criterion(\n x_candidate, model.linear_terms, model.square_terms\n )\n actual_reduction = f_old - f_candidate\n\n trustregion_radius_old = trustregion_radius\n (\n trustregion_radius,\n accept_step,\n ) = _update_trustregion_radius_conjugate_gradient(\n f_candidate,\n predicted_reduction,\n actual_reduction,\n cg_step_norm,\n trustregion_radius,\n options_update_radius,\n )\n\n if accept_step:\n gradient_unprojected = (\n model.linear_terms + model.square_terms @ x_candidate\n )\n\n active_bounds_info = _get_information_on_active_bounds(\n x_candidate,\n gradient_unprojected,\n lower_bounds,\n upper_bounds,\n )\n else:\n x_candidate = x_old\n f_candidate = f_old\n\n if trustregion_radius == trustregion_radius_old:\n converged = True\n break\n\n converged, convergence_reason = _check_for_convergence(\n x_candidate,\n f_candidate,\n gradient_unprojected,\n model,\n lower_bounds,\n upper_bounds,\n converged,\n convergence_reason,\n niter,\n maxiter=maxiter,\n gtol_abs=gtol_abs,\n gtol_rel=gtol_rel,\n gtol_scaled=gtol_scaled,\n )\n\n result = {\n \"x\": x_candidate,\n \"criterion\": f_candidate,\n \"n_iterations\": niter,\n \"success\": converged,\n \"message\": convergence_reason,\n }\n\n return result\n\n\ndef _take_preliminary_gradient_descent_step_and_check_for_solution(\n x_candidate,\n model,\n lower_bounds,\n upper_bounds,\n maxiter_gradient_descent,\n gtol_abs,\n gtol_rel,\n gtol_scaled,\n):\n \"\"\"Take a preliminary gradient descent step and check if we found a solution.\"\"\"\n options_update_radius = {\n \"mu1\": 0.35,\n \"mu2\": 0.50,\n \"gamma1\": 0.0625,\n \"gamma2\": 0.5,\n \"gamma3\": 2.0,\n \"gamma4\": 5.0,\n \"theta\": 0.25,\n \"min_radius\": 1e-10,\n \"max_radius\": 1e10,\n \"default_radius\": 100.0,\n }\n\n converged = False\n convergence_reason = \"Continue iterating.\"\n\n criterion_candidate = _evaluate_model_criterion(\n x_candidate, model.linear_terms, model.square_terms\n )\n\n active_bounds_info = _get_information_on_active_bounds(\n x_candidate,\n model.linear_terms,\n lower_bounds,\n upper_bounds,\n )\n\n gradient_unprojected = model.linear_terms + model.square_terms @ x_candidate\n gradient_projected = _project_gradient_onto_feasible_set(\n gradient_unprojected, active_bounds_info\n )\n\n converged, convergence_reason = _check_for_convergence(\n x_candidate,\n criterion_candidate,\n gradient_unprojected,\n model,\n lower_bounds,\n upper_bounds,\n converged,\n convergence_reason,\n niter=None,\n maxiter=None,\n gtol_abs=gtol_abs,\n gtol_rel=gtol_rel,\n gtol_scaled=gtol_scaled,\n )\n\n if converged:\n hessian_inactive = model.square_terms\n trustregion_radius = options_update_radius[\"default_radius\"]\n else:\n hessian_inactive = _find_hessian_submatrix_where_bounds_inactive(\n model, active_bounds_info\n )\n\n (\n x_candidate_gradient_descent,\n f_min_gradient_descent,\n step_size_gradient_descent,\n trustregion_radius,\n radius_lower_bound,\n ) = _perform_gradient_descent_step(\n x_candidate,\n criterion_candidate,\n gradient_projected,\n hessian_inactive,\n model,\n lower_bounds,\n upper_bounds,\n active_bounds_info,\n maxiter_gradient_descent,\n options_update_radius,\n )\n\n if f_min_gradient_descent < criterion_candidate:\n criterion_candidate = f_min_gradient_descent\n\n x_unbounded = (\n x_candidate_gradient_descent\n - step_size_gradient_descent * gradient_projected\n )\n x_candidate = _apply_bounds_to_x_candidate(\n x_unbounded, lower_bounds, upper_bounds\n )\n\n gradient_unprojected = model.linear_terms + model.square_terms @ x_candidate\n active_bounds_info = _get_information_on_active_bounds(\n x_candidate,\n gradient_unprojected,\n lower_bounds,\n upper_bounds,\n )\n\n gradient_projected = _project_gradient_onto_feasible_set(\n gradient_unprojected, active_bounds_info\n )\n hessian_inactive = _find_hessian_submatrix_where_bounds_inactive(\n model, active_bounds_info\n )\n\n converged, convergence_reason = _check_for_convergence(\n x_candidate,\n criterion_candidate,\n gradient_projected,\n model,\n lower_bounds,\n upper_bounds,\n converged,\n convergence_reason,\n niter=None,\n maxiter=None,\n gtol_abs=gtol_abs,\n gtol_rel=gtol_rel,\n gtol_scaled=gtol_scaled,\n )\n\n if not converged:\n trustregion_radius = np.clip(\n max(trustregion_radius, radius_lower_bound),\n options_update_radius[\"min_radius\"],\n options_update_radius[\"max_radius\"],\n )\n\n return (\n x_candidate,\n criterion_candidate,\n gradient_unprojected,\n hessian_inactive,\n trustregion_radius,\n active_bounds_info,\n converged,\n convergence_reason,\n )\n\n\ndef _compute_conjugate_gradient_step(\n x_candidate,\n gradient_inactive,\n hessian_inactive,\n lower_bounds,\n upper_bounds,\n active_bounds_info,\n trustregion_radius,\n *,\n conjugate_gradient_method,\n gtol_abs_conjugate_gradient,\n gtol_rel_conjugate_gradient,\n options_update_radius,\n):\n \"\"\"Compute the bounded Conjugate Gradient trust-region step.\"\"\"\n conjugate_gradient_step = np.zeros_like(x_candidate)\n\n if active_bounds_info.inactive.size == 0:\n # Save some computation and return an adjusted zero step\n step_inactive = _apply_bounds_to_x_candidate(\n x_candidate, lower_bounds, upper_bounds\n )\n step_norm = np.linalg.norm(step_inactive)\n\n conjugate_gradient_step = _apply_bounds_to_conjugate_gradient_step(\n step_inactive,\n x_candidate,\n lower_bounds,\n upper_bounds,\n active_bounds_info,\n )\n\n else:\n if conjugate_gradient_method == \"cg\":\n step_inactive = minimize_trust_cg(\n gradient_inactive,\n hessian_inactive,\n trustregion_radius,\n gtol_abs=gtol_abs_conjugate_gradient,\n gtol_rel=gtol_rel_conjugate_gradient,\n )\n step_norm = np.linalg.norm(step_inactive)\n elif conjugate_gradient_method == \"steihaug_toint\":\n step_inactive = minimize_trust_stcg(\n gradient_inactive,\n hessian_inactive,\n trustregion_radius,\n )\n step_norm = np.linalg.norm(step_inactive)\n elif conjugate_gradient_method == \"trsbox\":\n step_inactive = minimize_trust_trsbox(\n gradient_inactive,\n hessian_inactive,\n trustregion_radius,\n lower_bounds=lower_bounds[active_bounds_info.inactive],\n upper_bounds=upper_bounds[active_bounds_info.inactive],\n )\n step_norm = np.linalg.norm(step_inactive)\n else:\n raise ValueError(\n \"Invalid method: {conjugate_gradient_method}. \"\n \"Must be one of cg, steihaug_toint, trsbox.\"\n )\n\n if trustregion_radius == 0:\n if step_norm > 0:\n # Accept\n trustregion_radius = np.clip(\n step_norm,\n options_update_radius[\"min_radius\"],\n options_update_radius[\"max_radius\"],\n )\n\n else:\n # Re-solve\n trustregion_radius = np.clip(\n options_update_radius[\"default_radius\"],\n options_update_radius[\"min_radius\"],\n options_update_radius[\"max_radius\"],\n )\n\n if conjugate_gradient_method == \"cg\":\n step_inactive = minimize_trust_cg(\n gradient_inactive,\n hessian_inactive,\n trustregion_radius,\n gtol_abs=gtol_abs_conjugate_gradient,\n gtol_rel=gtol_rel_conjugate_gradient,\n )\n step_norm = np.linalg.norm(step_inactive)\n elif conjugate_gradient_method == \"steihaug_toint\":\n step_inactive = minimize_trust_stcg(\n gradient_inactive,\n hessian_inactive,\n trustregion_radius,\n )\n step_norm = np.linalg.norm(step_inactive)\n elif conjugate_gradient_method == \"trsbox\":\n step_inactive = minimize_trust_trsbox(\n gradient_inactive,\n hessian_inactive,\n trustregion_radius,\n lower_bounds=lower_bounds[active_bounds_info.inactive],\n upper_bounds=upper_bounds[active_bounds_info.inactive],\n )\n step_norm = np.linalg.norm(step_inactive)\n\n if step_norm == 0:\n raise ValueError(\"Initial direction is zero.\")\n\n conjugate_gradient_step = _apply_bounds_to_conjugate_gradient_step(\n step_inactive,\n x_candidate,\n lower_bounds,\n upper_bounds,\n active_bounds_info,\n )\n\n return (\n conjugate_gradient_step,\n step_inactive,\n step_norm,\n )\n\n\ndef _compute_predicted_reduction_from_conjugate_gradient_step(\n conjugate_gradient_step,\n conjugate_gradient_step_inactive,\n gradient_unprojected,\n gradient_inactive,\n hessian_inactive,\n active_bounds_info,\n):\n \"\"\"Compute predicted reduction induced by the Conjugate Gradient step.\"\"\"\n if active_bounds_info.active.size > 0:\n # Projection changed the step, so we have to recompute the step\n # and the predicted reduction. Leave the rust radius unchanged.\n cg_step_recomp = conjugate_gradient_step[active_bounds_info.inactive]\n gradient_inactive_recomp = gradient_unprojected[active_bounds_info.inactive]\n\n predicted_reduction = _evaluate_model_criterion(\n cg_step_recomp, gradient_inactive_recomp, hessian_inactive\n )\n else:\n # Step did not change, so we can just recover the\n # pre-computed prediction\n predicted_reduction = _evaluate_model_criterion(\n conjugate_gradient_step_inactive,\n gradient_inactive,\n hessian_inactive,\n )\n\n return -predicted_reduction\n\n\ndef _perform_gradient_descent_step(\n x_candidate,\n f_candidate_initial,\n gradient_projected,\n hessian_inactive,\n model,\n lower_bounds,\n upper_bounds,\n active_bounds_info,\n maxiter_steepest_descent,\n options_update_radius,\n):\n \"\"\"Perform gradient descent step and update trust-region radius.\"\"\"\n f_min = f_candidate_initial\n gradient_norm = np.linalg.norm(gradient_projected)\n\n trustregion_radius = options_update_radius[\"default_radius\"]\n radius_lower_bound = 0\n step_size_accepted = 0\n\n for _ in range(maxiter_steepest_descent):\n x_old = x_candidate\n\n step_size_candidate = trustregion_radius / gradient_norm\n x_candidate = x_old - step_size_candidate * gradient_projected\n\n x_candidate = _apply_bounds_to_x_candidate(\n x_candidate, lower_bounds, upper_bounds\n )\n f_candidate = _evaluate_model_criterion(\n x_candidate, model.linear_terms, model.square_terms\n )\n\n x_diff = x_candidate - x_old\n\n if f_candidate < f_min:\n f_min = f_candidate\n step_size_accepted = step_size_candidate\n\n x_inactive = x_diff[active_bounds_info.inactive]\n square_terms = x_inactive.T @ hessian_inactive @ x_inactive\n\n predicted_reduction = trustregion_radius * (\n gradient_norm\n - 0.5 * trustregion_radius * square_terms / (gradient_norm**2)\n )\n actual_reduction = f_candidate_initial - f_candidate\n\n (\n trustregion_radius,\n radius_lower_bound,\n ) = _update_trustregion_radius_and_gradient_descent(\n trustregion_radius,\n radius_lower_bound,\n predicted_reduction,\n actual_reduction,\n gradient_norm,\n options_update_radius,\n )\n\n return (\n x_candidate,\n f_min,\n step_size_accepted,\n trustregion_radius,\n radius_lower_bound,\n )\n\n\ndef _update_trustregion_radius_conjugate_gradient(\n f_candidate,\n predicted_reduction,\n actual_reduction,\n x_norm_cg,\n trustregion_radius,\n options,\n):\n \"\"\"Update the trust-region radius based on predicted and actual reduction.\"\"\"\n accept_step = False\n\n if predicted_reduction < 0 or ~np.isfinite(predicted_reduction):\n # Reject and start over\n trustregion_radius = options[\"alpha1\"] * min(trustregion_radius, x_norm_cg)\n\n else:\n if ~np.isfinite(actual_reduction):\n trustregion_radius = options[\"alpha1\"] * min(trustregion_radius, x_norm_cg)\n else:\n if abs(actual_reduction) <= max(1, abs(f_candidate) * EPSILON) and abs(\n predicted_reduction\n ) <= max(1, abs(f_candidate) * EPSILON):\n kappa = 1\n else:\n kappa = actual_reduction / predicted_reduction\n\n if kappa < options[\"eta1\"]:\n # Reject the step\n trustregion_radius = options[\"alpha1\"] * min(\n trustregion_radius, x_norm_cg\n )\n else:\n accept_step = True\n\n # Update the trust-region radius only if the computed step is at the\n # trust-radius boundary\n if x_norm_cg == trustregion_radius:\n if kappa < options[\"eta2\"]:\n # Marginal bad step\n trustregion_radius = options[\"alpha2\"] * trustregion_radius\n elif kappa < options[\"eta3\"]:\n # Reasonable step\n trustregion_radius = options[\"alpha3\"] * trustregion_radius\n elif kappa < options[\"eta4\"]:\n trustregion_radius = options[\"alpha4\"] * trustregion_radius\n else:\n # Very good step\n trustregion_radius = options[\"alpha5\"] * trustregion_radius\n\n trustregion_radius = np.clip(\n trustregion_radius, options[\"min_radius\"], options[\"max_radius\"]\n )\n\n return trustregion_radius, accept_step\n\n\ndef _get_information_on_active_bounds(\n x,\n gradient_unprojected,\n lower_bounds,\n upper_bounds,\n):\n \"\"\"Return the index set of active bounds.\"\"\"\n active_lower = np.where((x <= lower_bounds) & (gradient_unprojected > 0))[0]\n active_upper = np.where((x >= upper_bounds) & (gradient_unprojected < 0))[0]\n active_fixed = np.where(lower_bounds == upper_bounds)[0]\n active_all = reduce(np.union1d, (active_fixed, active_lower, active_upper))\n inactive = np.setdiff1d(np.arange(len(x)), active_all)\n\n active_bounds_info = ActiveBounds(\n lower=active_lower,\n upper=active_upper,\n fixed=active_fixed,\n active=active_all,\n inactive=inactive,\n )\n\n return active_bounds_info\n\n\ndef _find_hessian_submatrix_where_bounds_inactive(model, active_bounds_info):\n \"\"\"Find the submatrix of the initial hessian where bounds are inactive.\"\"\"\n hessian_inactive = model.square_terms[\n active_bounds_info.inactive[:, np.newaxis], active_bounds_info.inactive\n ]\n\n return hessian_inactive\n\n\ndef _check_for_convergence(\n x_candidate,\n f_candidate,\n gradient_candidate,\n model,\n lower_bounds,\n upper_bounds,\n converged,\n reason,\n niter,\n *,\n maxiter,\n gtol_abs,\n gtol_rel,\n gtol_scaled,\n):\n \"\"\"Check if we have found a solution.\"\"\"\n direction_fischer_burmeister = _get_fischer_burmeister_direction_vector(\n x_candidate, gradient_candidate, lower_bounds, upper_bounds\n )\n gradient_norm = np.linalg.norm(direction_fischer_burmeister)\n gradient_norm_initial = np.linalg.norm(model.linear_terms)\n\n if gradient_norm < gtol_abs:\n converged = True\n reason = \"Norm of the gradient is less than absolute_gradient_tolerance.\"\n elif f_candidate != 0 and abs(gradient_norm / f_candidate) < gtol_rel:\n converged = True\n reason = (\n \"Norm of the gradient relative to the criterion value is less than \"\n \"relative_gradient_tolerance.\"\n )\n elif (\n gradient_norm_initial != 0\n and gradient_norm / gradient_norm_initial < gtol_scaled\n ):\n converged = True\n reason = (\n \"Norm of the gradient divided by norm of the gradient at the \"\n \"initial parameters is less than scaled_gradient_tolerance.\"\n )\n elif gradient_norm_initial != 0 and gradient_norm == 0 and gtol_scaled == 0:\n converged = True\n reason = (\n \"Norm of the gradient divided by norm of the gradient at the \"\n \"initial parameters is less than scaled_gradient_tolerance.\"\n )\n elif f_candidate <= -np.inf:\n converged = True\n reason = \"Criterion value is negative infinity.\"\n elif niter is not None and niter == maxiter:\n reason = \"Maximum number of iterations reached.\"\n\n return converged, reason\n\n\ndef _apply_bounds_to_x_candidate(x, lower_bounds, upper_bounds, bound_tol=0):\n \"\"\"Apply upper and lower bounds to the candidate vector.\"\"\"\n x = np.where(x <= lower_bounds + bound_tol, lower_bounds, x)\n x = np.where(x >= upper_bounds - bound_tol, upper_bounds, x)\n\n return x\n\n\ndef _project_gradient_onto_feasible_set(gradient_unprojected, active_bounds_info):\n \"\"\"Project gradient onto feasible set, where search directions unconstrained.\"\"\"\n gradient_projected = np.zeros_like(gradient_unprojected)\n gradient_projected[active_bounds_info.inactive] = gradient_unprojected[\n active_bounds_info.inactive\n ]\n\n return gradient_projected\n\n\ndef _apply_bounds_to_conjugate_gradient_step(\n step_inactive,\n x_candidate,\n lower_bounds,\n upper_bounds,\n active_bounds_info,\n):\n \"\"\"Apply lower and upper bounds to the Conjugate Gradient step.\"\"\"\n cg_step = np.zeros_like(x_candidate)\n cg_step[active_bounds_info.inactive] = step_inactive\n\n if active_bounds_info.lower.size > 0:\n x_active_lower = x_candidate[active_bounds_info.lower]\n lower_bound_active = lower_bounds[active_bounds_info.lower]\n\n cg_step[active_bounds_info.lower] = lower_bound_active - x_active_lower\n\n if active_bounds_info.upper.size > 0:\n x_active_upper = x_candidate[active_bounds_info.upper]\n upper_bound_active = upper_bounds[active_bounds_info.upper]\n\n cg_step[active_bounds_info.upper] = upper_bound_active - x_active_upper\n\n if active_bounds_info.fixed.size > 0:\n cg_step[active_bounds_info.fixed] = 0\n\n return cg_step\n\n\ndef _update_trustregion_radius_and_gradient_descent(\n trustregion_radius,\n radius_lower_bound,\n predicted_reduction,\n actual_reduction,\n gradient_norm,\n options,\n):\n \"\"\"Update the trust-region radius and its upper bound.\"\"\"\n if abs(actual_reduction) <= EPSILON and abs(predicted_reduction) <= EPSILON:\n kappa = 1\n else:\n kappa = actual_reduction / predicted_reduction\n\n tau_1 = (\n options[\"theta\"]\n * gradient_norm\n * trustregion_radius\n / (\n options[\"theta\"] * gradient_norm * trustregion_radius\n + (1 - options[\"theta\"]) * predicted_reduction\n - actual_reduction\n )\n )\n tau_2 = (\n options[\"theta\"]\n * gradient_norm\n * trustregion_radius\n / (\n options[\"theta\"] * gradient_norm * trustregion_radius\n - (1 + options[\"theta\"]) * predicted_reduction\n + actual_reduction\n )\n )\n\n tau_min = min(tau_1, tau_2)\n tau_max = max(tau_1, tau_2)\n\n if abs(kappa - 1) <= options[\"mu1\"]:\n # Great agreement\n radius_lower_bound = max(radius_lower_bound, trustregion_radius)\n\n if tau_max < 1:\n tau = options[\"gamma3\"]\n elif tau_max > options[\"gamma4\"]:\n tau = options[\"gamma4\"]\n else:\n tau = tau_max\n\n elif abs(kappa - 1) <= options[\"mu2\"]:\n # Good agreement\n radius_lower_bound = max(radius_lower_bound, trustregion_radius)\n\n if tau_max < options[\"gamma2\"]:\n tau = options[\"gamma2\"]\n elif tau_max > options[\"gamma3\"]:\n tau = options[\"gamma3\"]\n else:\n tau = tau_max\n\n else:\n # Not good agreement\n if tau_min > 1:\n tau = options[\"gamma2\"]\n elif tau_max < options[\"gamma1\"]:\n tau = options[\"gamma1\"]\n elif (tau_min < options[\"gamma1\"]) and (tau_max >= 1):\n tau = options[\"gamma1\"]\n elif (\n (tau_1 >= options[\"gamma1\"])\n and (tau_1 < 1.0)\n and ((tau_2 < options[\"gamma1\"]) or (tau_2 >= 1.0))\n ):\n tau = tau_1\n elif (\n (tau_2 >= options[\"gamma1\"])\n and (tau_2 < 1.0)\n and ((tau_1 < options[\"gamma1\"]) or (tau_2 >= 1.0))\n ):\n tau = tau_2\n else:\n tau = tau_max\n\n trustregion_radius = trustregion_radius * tau\n\n return trustregion_radius, radius_lower_bound\n\n\ndef _get_fischer_burmeister_direction_vector(x, gradient, lower_bounds, upper_bounds):\n \"\"\"Compute the constrained direction vector via the Fischer-Burmeister function.\"\"\"\n fischer_vec = np.vectorize(_get_fischer_burmeister_scalar)\n\n fischer_burmeister = reduce(\n fischer_vec, (upper_bounds - x, -gradient, x - lower_bounds)\n )\n direction = np.where(\n lower_bounds == upper_bounds, lower_bounds - x, fischer_burmeister\n )\n\n return direction\n\n\ndef _get_fischer_burmeister_scalar(a, b):\n \"\"\"Get the value of the Fischer-Burmeister function for two scalar inputs.\n\n This method was suggested by Bob Vanderbei. Since the Fischer-Burmeister\n is symmetric, the order of the scalar inputs does not matter.\n\n Args:\n a (float): First input.\n b (float): Second input.\n\n Returns:\n float: Value of the Fischer-Burmeister function for inputs a and b.\n\n \"\"\"\n if a + b <= 0:\n fischer_burmeister = np.sqrt(a**2 + b**2) - (a + b)\n else:\n fischer_burmeister = -2 * a * b / (np.sqrt(a**2 + b**2) + (a + b))\n\n return fischer_burmeister\n\n\ndef _evaluate_model_criterion(\n x,\n gradient,\n hessian,\n):\n \"\"\"Evaluate the criterion function value of the main model.\n\n Args:\n x (np.ndarray): Parameter vector of shape (n,).\n gradient (np.ndarray): Gradient of shape (n,) for which the main model\n shall be evaluated.\n hessian (np.ndarray): Hessian of shape (n, n) for which the main model\n shall be evaulated.\n\n Returns:\n float: Criterion value of the main model.\n\n \"\"\"\n return gradient.T @ x + 0.5 * x.T @ hessian @ x\n","repo_name":"OpenSourceEconomics/estimagic","sub_path":"src/estimagic/optimization/subsolvers/bntr.py","file_name":"bntr.py","file_ext":"py","file_size_in_byte":30929,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"57"} +{"seq_id":"38747651752","text":"# -*- coding: utf-8 -*-\nfrom dino_agent import DinoAgent, train_network, init_cache, build_model\nfrom game import Game, Game_sate\n\n\n# main function\ndef playGame(observe=False):\n game = Game()\n dino = DinoAgent(game)\n game_state = Game_sate(dino, game)\n model = build_model()\n try:\n train_network(model, game_state, observe=observe)\n except StopIteration:\n game.end()\n\n\nif __name__ == \"__main__\":\n playGame(observe=False)\n","repo_name":"chenywang/dino_reinforce_learning","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1883232831","text":"# ce code a été fait par Ow ow.charlon@gmail.com \r\n# ce code est sous licence wtfpl\r\n\r\nimport random\r\n\r\ndef Hasard(a) :\r\n\tliste1 = \"\"\r\n\twhile len(liste1) < a :\r\n\t\tliste1 = liste1 + chr(random.randint(97, 122))\r\n\t\r\n\treturn str(liste1)\r\n\t\r\ncount = 1\r\nwhile count < 100000 :\r\n\tprint(Hasard(count))\r\n\tcount += 1","repo_name":"existentielisteprogrammer/python","sub_path":"lettreRandom.py","file_name":"lettreRandom.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18996289151","text":"\"\"\"\nErik Cohen\nStudent #000915169\n\"\"\"\nfrom datetime import datetime\nfrom delivery import get_deliveries_from_package_id_list, get_deliveries_with_unassigned_trucks\nfrom route import Route\nimport config as cfg\n\n\nclass Truck:\n def __init__(self, _id):\n self.id = _id\n self.route = Route()\n self.miles_traveled = 0\n self.max_speed = 18 # Truck can reach a max speed of 18 MPH.\n self.max_packages = 16 # Truck can contain a max of 16 packages per route\n self.completed_route = False\n self.departure_time = None\n self.started_delivering = False\n\n def at_base(self):\n \"\"\"\n Complexity: Big O(1)\n Returns if the truck is at starting location\n \"\"\"\n return self.route._current_location == self.route.starting_location\n\n def assign_delivery(self, delivery, add_index):\n \"\"\"\n Complexity: Big O(1)\n assign a single delivery at a specific index\n \"\"\"\n self.route.add_delivery(self.id, delivery, add_index)\n\n def assign_deliveries(self, deliveries_list):\n \"\"\"\n Complexity: Big O(n)\n Assign a List of deliveries to this Truck\n \"\"\"\n self.route.add_deliveries(self.id, deliveries_list)\n\n def will_fit(self, delivery):\n \"\"\"\n Complexity: Big O(n)\n Checks to see if this delivery will fit in the Truck\n \"\"\"\n return len(self.get_packages()) + len(delivery.packages) <= self.max_packages\n\n def will_fit_list(self, delivery_list):\n \"\"\"\n Complexity: Big O(n)\n Checks to see if this list of deliveries will fit in the Truck\n \"\"\"\n package_list_sum = sum([len(delivery.packages) for delivery in delivery_list])\n return len(self.get_packages()) + package_list_sum <= 16\n\n def get_deliveries(self):\n \"\"\"\n Complexity: Big O(n)\n Get all the deliveries assigned to this truck\n \"\"\"\n return [delivery for delivery in self.route.deliveries]\n\n def get_packages(self):\n \"\"\"\n Complexity: Big O(n)\n Get all the packages assigned to this truck\n \"\"\"\n package_list = []\n for delivery in self.route.deliveries:\n package_list += delivery.packages\n return package_list\n\n def start_delivering(self, time):\n \"\"\"\n Complexity: Big O(1)\n Start the route and timestamp the departure time of the truck\n \"\"\"\n self.started_delivering = True\n self.route.init(time)\n self.departure_time = time\n print(f\"Truck {self.id} has started its route at {time}\")\n\n def minutes_passed(self, minutes):\n \"\"\"\n Complexity: Big O(n)\n Notify this truck a specified amount of minutes have passed.\n Convert the time to miles and advance the route\n \"\"\"\n miles_left = self.route.get_miles_left\n miles_traveled = 0.3 * minutes\n if self.completed_route or miles_left() == 0:\n if self.route._current_location != self.route.starting_location:\n self.route.return_to_base(self.id)\n self.completed_route = True\n print(f\"Truck {self.id} has completed route\")\n return\n print(f\"Truck {self.id} has {miles_left()} miles left to go\")\n self.miles_traveled += miles_traveled\n self.route.advance_by_miles(miles_traveled)\n print(f\"Truck {self.id} has driven {round(self.miles_traveled, 2)} miles\")\n if miles_left() == 0:\n self.completed_route = True\n print(f\"Truck {self.id} has completed route\")\n return\n\n def get_ETA_back_at_depot(self):\n return self.route.get_ETA_back_at_depot()\n\n def populate_ETA(self, departure_time):\n \"\"\"\n Complexity: Big O(n^2)\n \"\"\"\n self.route.populate_ETA(departure_time)\n\n\ndef distribute_deliveries_to_trucks():\n \"\"\"\n Complexity: Big O(n^2)\n Calculate the added milage for each route assigned to each truck.\n Determine the truck which the delivery would increase the milage\n the least and its position within the route.\n Assign it to the best truck at the best position\n \"\"\"\n assign_deliveries_truck_2()\n deliveries_with_unassigned_trucks = get_deliveries_with_unassigned_trucks()\n now = datetime.today()\n start_of_day_time = datetime.strptime(\"08:00:00\", \"%H:%M:%S\").time()\n start_of_day = datetime.combine(now, start_of_day_time).time()\n for delivery in deliveries_with_unassigned_trucks:\n closest_truck = None\n closest_truck_distance = None\n route_add_index = None\n for truck in cfg.trucks:\n if truck.will_fit(delivery):\n delivery_departure = None\n if (truck.id == 1 or truck.id == 2):\n delivery_departure = start_of_day\n elif (truck.id == 3):\n estimated_time_back_at_depot = datetime.strptime(\"10:30:00\", \"%H:%M:%S\").time()\n delivery_departure = datetime.combine(now, estimated_time_back_at_depot).time()\n (added_distance, add_index) = truck.route.added_distance(delivery_departure, delivery)\n if (added_distance is None and add_index is None):\n continue\n if closest_truck is None or added_distance < closest_truck_distance:\n closest_truck = truck\n closest_truck_distance = added_distance\n route_add_index = add_index\n\n closest_truck.assign_delivery(delivery, route_add_index)\n\n\ndef assign_deliveries_truck_2():\n \"\"\"\n Complexity: Big O(n)\n \"\"\"\n deliveries_that_must_go_on_truck_2 = get_deliveries_from_package_id_list(cfg.packages_that_must_be_on_truck_2)\n cfg.truck2.assign_deliveries(deliveries_that_must_go_on_truck_2)\n","repo_name":"ErikCohenDev/traveling_salesman","sub_path":"truck.py","file_name":"truck.py","file_ext":"py","file_size_in_byte":5828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"662637192","text":"from lxml import etree, objectify\nfrom io import BytesIO\nfrom Timeframes import TimeFrames\nfrom Elements import Elements\n\n# Used sysdev cookbook\n\nclass TimeFrameToXML:\n def __init__(self, timeframes: TimeFrames):\n self.timeframes = timeframes\n\n def write_file(self):\n root = etree.Element(\"time\")\n for timeframe in self.timeframes:\n timeframe_element = Elements.create_timeframe(timeframe)\n root.append(timeframe_element)\n\n objectify.deannotate(root)\n etree.cleanup_namespaces(root)\n\n parser = etree.XMLParser(remove_blank_text=True)\n file_obj = BytesIO(etree.tostring(root))\n tree = etree.parse(file_obj, parser)\n\n try:\n with open(\"Timeframe.xml\", \"wb\") as xml_writer:\n tree.write(xml_writer, pretty_print=True, encoding=\"utf-8\", xml_declaration=True)\n except IOError:\n pass","repo_name":"frejabauch/Systemudvikling_FSIS","sub_path":"Classes/TimeFrameToXML.py","file_name":"TimeFrameToXML.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"70426622900","text":"from django.test import TestCase\nfrom posts.models import Author, Post\n\n\nclass BaseTest(TestCase):\n\n # The setUp method is for defining all the fields that should be used for testing purposes in the testing methods\n def setUp(self):\n self.user_1 = Author.objects.create(\n username='testuser1',\n email='testuser1@mail.com',\n first_name='test',\n last_name='user',\n )\n self.post_1 = Post.objects.create(\n user=self.user_1,\n title='some random title',\n content='some random content',\n social_network='Facebook',\n link='https://www.facebook.com/'\n )\n\n\nclass TestPostModel(BaseTest):\n\n # testing the Post's model getUserFullName() method by checking if it's returning the correct full name of the parent Author model\n def test_post_model_returning_users_full_name(self):\n self.assertEquals(self.post_1.getUserFullName(), 'test user')\n","repo_name":"aryanlilian/Social-Posts","sub_path":"posts/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74362962418","text":"# -*- coding:utf-8 -*-\nfrom time import sleep\nfrom selenium.webdriver import Chrome\n\ndriver=Chrome()\ncook={'domain': '120.78.128.25',\n 'name': 'fengwoo',\n 'path': '/',\n 'value':'2shlsj8b6qju618hgj9ggjs2e3'}\n\ndriver.get('http://120.78.128.25:8765/Index/login.html')\ndriver.add_cookie(cook)\ndriver.get('http://120.78.128.25:8765')\n# ele=driver.find_element_by_xpath(\"//a[text()='我的帐户[python10]']\")\nele=driver.find_element_by_xpath(\"//a[contains(text(),'我的帐户')]\")\nele2=driver.find_element_by_xpath(\"//a[contains(@href,'Member')]\")\nprint(ele.text)\nprint(ele2.text)","repo_name":"xueluowuhenyue/Scripts","sub_path":"WEB_LX/练习/前程贷跳过登录.py","file_name":"前程贷跳过登录.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11869295494","text":"import os\nimport webdev\nfrom generalHelperFunctions import *\nfrom crawlHelperFunctions import *\n\n\n# maps all the data found from a url using the url as its key in the dictionary\ncrawledData = {}\n# A list that stores the links that are to be crawled/ or is already crawled. The queue keeps on getting filled as new links come in and the links are simultaneously accessed as the loop happens.\n# The accessed items arent removed as we only need to access them once and their precence in the list means that it has been/or will be accessed once only.\nlinkQueue = []\nuniqueWords = [] #list of all the unique words that are present in all of the urls crawled\n\n## CHECK THE README.TXT file for file information\n\ndef crawl(seed):\n clearPrevCrawl()\n linkQueue.append(seed)\n pagesWordsCount = { # {pageURL : {'totalWordNum': totalWordsInURL,'uniqueWord1': count1, uniqueWord2: count2,.........,}} #this dictionary stores the tfs for every page. This dict is used such that we can compute the values as the crawl loop is active simultaneously such that another loop does not have to be used to compute these values.\n }\n for url in (linkQueue):\n data = webdev.read_url(url)\n pagesWordsCount[url] = {'totalWordNum': 0}\n title, words, links= parseHtml(data, linkQueue, url, pagesWordsCount,uniqueWords)\n add_titles_to_file(url, title)\n createDataFile(changeLinkToFileName(url), words+'\\n'+ links)\n if not os.path.isdir('computationData'): #all computed data is stored in the computationData directory\n os.mkdir('computationData')\n for uniqueItem in uniqueWords: #generate and save the idf value for all unique words in a file\n generateIdf(uniqueItem, saveFile=True)\n\n generate_tf_tfIdf(pagesWordsCount, uniqueWords)\n return len(linkQueue)\n\n# crawl('http://people.scs.carleton.ca/~davidmckenney/tinyfruits/N-0.html')","repo_name":"Titan0932/Crawler-and-search-Engine","sub_path":"backup/testingResourcesV2/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28170361930","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot_bar_graph(plot_info, savePlotToFile, title = \"\", rot = 0, xlabel = \"\"):\n \"\"\"Plots bar graph for different analyses\n\n Args:\n plot info (df): containing the following columns\n Total (list): number of total instances for each category\n Total_Correct_Predictions (list): number of correctly predicted instances for each category\n accuracy (list): accuracy of total_correct_predictions/ total\n savePlotToFile (str): File name for saving plot, empty string will not save a plot\n title (str): title of graph\n rot (int): rotation for x-axis ticks\n xlabel (str): x label\n \"\"\"\n if plot_info.shape[0] > 2:\n plot_info= plot_info.sort_values(by = \"Total\",axis = 0,ignore_index =True)\n labels = plot_info.iloc[:,0] \n totals = plot_info[\"Total\"]\n correct_predictions = plot_info[\"Total_Correct_Predictions\"]\n accuracy = plot_info[\"Accuracy\"]\n \n ha = \"center\"\n fsize=10\n if len(labels) >2:\n rot = 45\n ha = \"right\"\n if len(labels) >12:\n fsize= 8\n \n \n fig, ax = plt.subplots()\n ax.bar(labels, totals, color = \"red\", label = \"Total\", edgecolor='black')\n ax.bar(labels, correct_predictions, color = \"blue\", \n label = \"Correct Predicitons\", edgecolor = 'black')\n \n \n # Set an offset that is used to bump the label up a bit above the bar.\n y_offset = 4\n # Add labels to each bar.\n for i, total in enumerate(totals):\n ax.text(totals.index[i], total + y_offset, str(round(accuracy[i]*100,1)) + \"%\", ha='center')\n \n plt.legend(prop={'size': 8})\n plt.title(title)\n plt.xticks(ticks = np.arange(labels.shape[0]), labels = labels, rotation = rot,ha=ha, rotation_mode='anchor')\n plt.tick_params(axis='x', labelsize=fsize)\n plt.xlabel(xlabel)\n plt.ylabel(\"Number of Instances\")\n \n if savePlotToFile != \"\":\n plt.savefig(savePlotToFile)\n print(\"saving figure to \" + savePlotToFile)\n plt.show()\n\ndef plot_histogram(title = \"\",hist_bins = 10,legend_location = 'upper right', \n xlabel = \"\", ylabel = \"Num. of Instances\", \n list_of_values = [], \n correct_preds = [],\n accuracy = [],\n savePlotToFile= \"\"):\n \"\"\"Plots histogram specifically for str_len_analysis, but could potentially be used for more\n\n Args:\n list_of_values (list): number of total instances for each category\n correct_preds (list): number of correctly predicted instances for each category\n accuracy (list): accuracy of total_correct_predictions/ total\n savePlotToFile (str): File name for saving plot, empty string will not save a plot\n title (str): title of graph\n hist_bins (int): number of bins to use\n legend_location (str): legend location\n rot (int): rotation for x-axis ticks\n xlabel (str): x label\n y_label(str):y label\n \"\"\"\n \n fig, ax = plt.subplots()\n a, bins, _ = ax.hist(list_of_values,bins=hist_bins, color=\"red\", \n label = \"Total\",edgecolor='black')\n _, _, _ = ax.hist(correct_preds,bins = bins, \n color=\"blue\", \n label = \"Correct Prediciton\", \n edgecolor='black')\n \n # Set an offset that is used to bump the label up a bit above the bar.\n y_offset = 4\n # Add labels to each bar.\n for i, total in enumerate(a):\n ax.text((bins[i] + bins[i+1])/2, total + y_offset, str(round(accuracy[i]*100,1)) + \"%\", ha='center')\n \n \n plt.legend(loc = legend_location,prop={'size': 8})\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.xticks(bins)\n \n if savePlotToFile != \"\":\n plt.savefig(savePlotToFile)\n print(\"saving figure to \" + savePlotToFile)\n plt.show()\n \n \n\ndef histogram_values(list_of_values = [], \n correct_preds = []) -> list:\n bin_vals, bins, _ = plt.hist(list_of_values, color=\"red\", \n label = \"Total\",edgecolor='black')\n bin_vals_correct, _, _ = plt.hist(correct_preds,bins = bins, \n color=\"blue\", \n label = \"Correct Prediciton\", \n edgecolor='black')\n plt.close()\n return bins, bin_vals, bin_vals_correct","repo_name":"alexispalmer/olea","sub_path":"olea/viz/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"41834321969","text":"faqTitlesTemplate = 'https://dutch.miraheze.org/wiki/{}?action=raw'\nfaqTitlesParams = {\n \"format\": \"json\",\n \"list\": \"prefixsearch\",\n \"action\": \"query\",\n \"pssearch\": \"Taalbot/faq/\",\n \"pslimit\": 500\n}\nwikiApiUrl = 'https://dutch.miraheze.org/w/api.php'\nfaqUpdateParams = {\n \"format\": \"json\",\n \"list\": \"recentchanges\",\n \"action\": \"query\",\n \"rclimit\": \"1\",\n \"rctitle\": \"Taalbot/faq\"\n}\n\ndef getRecentChangesParams(title):\n return {\n \"format\": \"json\",\n \"list\": \"recentchanges\",\n \"action\": \"query\",\n \"rclimit\": \"1\",\n \"rctitle\": title\n }","repo_name":"jassummisko/NuTaalbot","sub_path":"utils/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73213133299","text":"import csv\nfrom data_analysis.models import Order\n\n\nwith open('daf.csv', 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n next(csvreader) # Skip the header row\n for row in csvreader:\n obj = Order()\n obj.order_id = row[0]\n obj.date = row[1]\n obj.user_id = row[2]\n obj.total_purchase = row[3]\n obj.latitude = row[4]\n obj.longitude = row[5]\n obj.save()\n","repo_name":"ahamidida/ecommerce_da","sub_path":"ecommerce_da/import_csv.py","file_name":"import_csv.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"21927533670","text":"from pynput import keyboard\nfrom tello import Tello\nimport time\nimport numpy as np\nimport cv2\n\nvx = 0\nvy = 0\nvz = 0\nrot = 0\nvel = 30\ntello = Tello('', 9005)\n\ndef on_press(key):\n global vx,vy,vz,rot, vel, tello\n #print(key)\n try:\n if key.char == 'w':\n vz = vel\n elif key.char == 's':\n vz = -vel\n elif key.char == 'a':\n rot = -vel\n elif key.char == 'd':\n rot = vel\n elif key.char == 'p':\n print(\"p\")\n except AttributeError:\n if key == keyboard.Key.up:\n vx = vel\n elif key == keyboard.Key.down:\n vx = -vel\n elif key == keyboard.Key.right:\n vy = vel\n elif key == keyboard.Key.left:\n vy = -vel\n\n tello.set_velocities(vy,vx,vz,rot)\n \n \n\ndef on_release(key):\n global vx,vy,vz,rot, vel, tello\n #print(key)\n try:\n if key.char == 'w':\n vz = 0\n elif key.char == 's':\n vz = 0\n elif key.char == 'a':\n rot = 0\n elif key.char == 'd':\n rot = 0\n except AttributeError:\n if key == keyboard.Key.up:\n vx = 0\n elif key == keyboard.Key.down:\n vx = 0\n elif key == keyboard.Key.right:\n vy = 0\n elif key == keyboard.Key.left:\n vy = 0\n\n elif key == keyboard.Key.esc:\n tello.land()\n tello.close() \n return False\n\n tello.set_velocities(vy,vx,vz,rot)\n \nprint(tello.get_battery())\ntello.takeoff()\n#time.sleep(6)\n\n# Collect events until released\nlistener = keyboard.Listener(\n on_press=on_press,\n on_release=on_release)\nlistener.daemon = True\nlistener.start()\n\n\nwhile(True):\n # Capture frame-by-frame\n #ret, frame = cap.read()\n frame = tello.get_image()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n # Display the resulting frame\n cv2.imshow('frame',gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\n#cap.release()\nlistener.stop()\ntello.close()\ncv2.destroyAllWindows()","repo_name":"RoboticsLabURJC/2019-tfm-aitor-martinez","sub_path":"tello_teleop/tello_teleop.py","file_name":"tello_teleop.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9436948801","text":"import random\nimport multiprocessing\n\n\"\"\"\n*** MULTIPROCESSING ***\nAdvantage:\n- Advantage, if you have multiple CPUs core\n- You can to kill the child processes.\n- CPU-bound applications\n\nDisadvantages:\n- lot of memory used\n- Not shared memory between process\n\n\"\"\"\n\ndef list_append(count, id, out_list):\n for i in range(count):\n out_list.append(random.random())\n\nif __name__ == \"__main__\":\n\n for xx in range(0, 5):\n # process\n size = 3000000\n procs = 8 # We will run 8 processes\n\n jobs = [] # We will store all processes in a list.\n for i in range(0, procs):\n out_list = list()\n process = multiprocessing.Process(target=list_append, args=(size, i, out_list)) # Create a process object and set the constructor.\n jobs.append(process) # Append processes to list\n\n # Start the processes (i.e. calculate the random number lists)\n for j in jobs:\n j.start() # Start all processes\n\n # Ensure all of the processes have finished\n for j in jobs:\n j.join() # If we use join, then the main process will be block.\n\n\n\n","repo_name":"itdevline/process_vs_thread","sub_path":"processTest.py","file_name":"processTest.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19043960996","text":"class PriorityQueue:\n def __init__(self):\n self.heap = [None for i in range(40)]\n self.size=-1\n \n #retrieving the parent node of the child node\n def parent(self,i):\n return (i-1)//2\n \n #retrieving the left child of the parent node\n def left_child(self,i):\n return i+1\n \n #retrieving the right child of the parent \n def right_child(self,i):\n return i+2\n \n #Returning the element having the highest priority \n def get_Max(self):\n return self.heap[0]\n \n #Returning the element having the minimum priority \n def get_Min(self):\n return self.heap[self.size]\n \n #function to move the node up the tree in order to restore the heap property.\n def moveUp(self,i):\n while(i>0):\n #swapping parent node with a child node\n if(self.heap[self.parent(i)]self.heap[index]):\n index = left\n #getting the location of the Right Child\n right = self.right_child(k)\n if(right<=self.size and self.heap[right]>self.heap[index]):\n index = right\n #If k is not equal to index\n if(k!=index):\n self.heap[index],self.heap[k]=self.heap[k],self.heap[index]\n self.moveDown(index)\n \n #Removing the element of maximum priority\n def removeMax(self):\n removed = self.heap[0]\n self.heap[0] = self.heap[self.size]\n self.size=self.size-1\n # print(\"Max element of the heap which is removed: \",removed)\n self.moveDown(0)\n \n #inserting the element in a priority queue\n def insert(self,p):\n self.size=self.size+1\n self.heap[self.size]=p\n #move Up to maintain heap property \n self.moveUp(self.size)\n \n #Removing the element from the priority queue at a given index i. \n def delete(self,i):\n #This denotes the replacing the node that we want with (max_element of the heap + 1) , \n # so , it will be replaced as the new root node , so we can remove this node , \n # very easy from the heap\n #Replace the node that we want to remove with (value of the max node of max_heap + 1)\n self.heap[i]=self.heap[0]+1\n #move the node stored at ith location (that we want to remove) is shifted to the root node,\n # which helps us to remove the ith location node\n self.moveUp(i)\n # Removing the node having maximum priority \n # (the ith location node , which is shifted as the new root node , is removed)\n self.removeMax()\n\npq=PriorityQueue()\npq.insert(20)\npq.insert(19)\npq.insert(21)\npq.insert(18)\npq.insert(12)\npq.insert(17)\npq.insert(15)\npq.insert(16)\npq.insert(14)\n\nprint(\"Elements in a priority queue are: \")\nfor i in range(pq.size+1):\n if(pq.heap[i]!=None):\n print(pq.heap[i],end=' ')\nprint()\n\n#deleting the element whose index is 2\npq.delete(2)\n\nprint(\"Elements in a priority queue after deleting the element are : \")\nfor i in range(pq.size+1):\n if(pq.heap[i]!=None):\n print(pq.heap[i],end=' ')\nprint()\n\nmax = pq.get_Max()\nprint(\"The element which is having the highest priority is \",max)\n\nmin = pq.get_Min()\nprint(\"The element which is having the minimum priority is : \",min)","repo_name":"jsai2001/DSA-with-Python","sub_path":"Queue Basic Programs/PriorityQueue_MaxHeap_Array.py","file_name":"PriorityQueue_MaxHeap_Array.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35120084403","text":"class ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\ndef swap_pairs(head: ListNode):\n # head 와 head.next 가 None 이 될 때까지\n # swap_pairs 를 재귀 호출한다\n # if 문을 만족하지 않을때 두가지 경우가 있다\n # 1) head 가 None 이 아니고 head.next 가 None 이라면\n # head 가 가리키는 단일 노드가 리턴 된다\n # 리턴된 값을 받는 호출 부분은\n # head 의 다음 노드를 가리키던 p 와 head 가 바뀐다\n # p -> head -> 리턴된 값 (단일 노드)\n # 이런 형태로 값을 갖게 되고 여기서 p 를 다시 리턴 하고\n # 이 값을 받는 부분의 p 와 head 도 바뀌고 끝에는\n # p 가 리턴 되면서 함수가 종료된다\n # 재귀의 끝에서 부터 뒤집힌 채로 값이 누적된 p 가 리턴된다\n # 2) head 와 head.next 둘 다 None 이라면\n # None 이 리턴 된다\n # 리턴된 값을 받는 호출 부분은 1) 과 같이 p 와 head 가 바뀐다\n # p -> head -> 리턴된 값 (None)\n if head and head.next:\n p = head.next\n head.next = swap_pairs(p.next)\n p.next = head\n return p\n return head\n\n\nnode = ListNode(1)\nnode.next = ListNode(2)\nnode.next.next = ListNode(3)\nnode.next.next.next = ListNode(4)\nanswer = swap_pairs(node)\n\nprint(answer.val)\nprint(answer.next.val)\nprint(answer.next.next.val)\nprint(answer.next.next.next.val)\n","repo_name":"Gyusik-Choi/algorithm","sub_path":"leet-code/24. Swap Nodes in Pairs/24_swap_nodes_in_pairs_3.py","file_name":"24_swap_nodes_in_pairs_3.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72079391502","text":"#abc294c\r\nN,M=[int(nm) for nm in input().split()]\r\nA=[[int(a),i+1] for i,a in enumerate(input().split())]\r\nB=[[int(b),-(i+1)] for i,b in enumerate(input().split())]\r\n\r\nC = sorted(A+B)\r\n\r\nans_A = [0 for i in range(N)]\r\nans_B = [0 for i in range(M)]\r\n\r\n#iが0より大きい→Aの要素\r\n#それ以外はBの要素\r\nfor j, (c, i) in enumerate(C):\r\n if i > 0:\r\n ans_A[i - 1] = j + 1\r\n else:\r\n ans_B[-i-1] = j + 1\r\n\r\nprint(*ans_A)\r\nprint(*ans_B)","repo_name":"hida2420/hidaAlgorism2","sub_path":"enumerateで要素と元の添え字を同時に管理.py","file_name":"enumerateで要素と元の添え字を同時に管理.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36947390646","text":"from __future__ import annotations\nimport csv\nimport aiohttp\nimport asyncio\nfrom typing import Any\nimport traceback\nimport time\n\nCOLUMNS = ['vin', 'open_recalls', 'recall_status', 'name_of_recall', 'campaign', 'date_of_recall_announcement',\n 'brief_description_of_recall', 'safety_risk', 'remedy', 'customer_satisfaction_programs', 'raw_json']\n\n\ndef chunks(xs, n):\n n = max(1, n)\n return (xs[i:i + n] for i in range(0, len(xs), n))\n\n\ndef get_exception_traceback(exception):\n tb = ''.join(traceback.format_exception(exception))\n return tb\n\n\ndef write_to_csv(file_path: str, data: list[Any]):\n with open(file_path, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=COLUMNS)\n writer.writeheader()\n writer.writerows(data)\n\n\ndef dictionary_helper(details_dict: dict, vin_no: str, open_recalls):\n details_dict['vin'] = vin_no\n details_dict['open_recalls'] = open_recalls\n details_dict['recall_status'] = ''\n details_dict['name_of_recall'] = ''\n details_dict['campaign'] = ''\n details_dict['date_of_recall_announcement'] = ''\n details_dict['brief_description_of_recall'] = ''\n details_dict['safety_risk'] = ''\n details_dict['remedy'] = ''\n\n return details_dict\n\n\nasync def request_page(vin_no: str, session: aiohttp.ClientSession) -> aiohttp.ClientResponse:\n retries = 3\n is_success = False\n response = None\n error = None\n api = f\"https://www.chevrolet.com/ownercenter/api/{vin_no}/gfas?cb=16775740181110.5469480851262047\"\n\n for _ in range(retries):\n try:\n response = await session.get(url=api, ssl=True)\n if response.status in [200, 404]:\n is_success = True\n break\n except asyncio.exceptions.TimeoutError as e:\n error = e\n\n if not is_success:\n if error:\n raise error\n else:\n raise Exception(f\"Request failed with status code: {response.status}, text: {await response.text()}\")\n\n return response\n\n\ndetails_list = []\n\n\nasync def parse_vehical_recalls(vin_no: str, session: aiohttp.ClientSession):\n page = await request_page(vin_no, session)\n vehicle_json = await page.json()\n details_dict = {}\n\n if 'VEHICLE_INVALID_VIN' in vehicle_json['messages']:\n dictionary_helper(details_dict, vin_no, open_recalls=\"Invalid Vin\")\n details_list.append(details_dict.copy())\n else:\n vehicle_recall_json = vehicle_json['data']['gfas']\n\n if len(vehicle_recall_json) > 0:\n open_recalls = \"Yes\"\n for x in vehicle_recall_json:\n try:\n name_of_recall = x['gfaTexts'][0]['subject']\n except IndexError:\n name_of_recall = 'unknown status'\n recall_status = x['vinStatusInfo']['vinStatus']\n\n try:\n campaign = x['governmentAgencies'][0]['govtAgencyNum']\n except IndexError:\n campaign = ''\n\n try:\n date_of_recall_announcement = x['governmentAgencies'][0]['notificationDate']\n except IndexError:\n date_of_recall_announcement = ''\n\n try:\n brief_description_of_recall = x['gfaTexts'][0]['description']\n except Exception as e:\n brief_description_of_recall = ''\n\n try:\n safety_risk = x['gfaTexts'][0]['safetyRisk']\n except Exception as e:\n safety_risk = ''\n\n try:\n remedy = x['gfaTexts'][0]['remedy']\n except Exception as e:\n remedy = ''\n\n details_dict['vin'] = vin_no\n details_dict['open_recalls'] = open_recalls\n details_dict['recall_status'] = recall_status\n details_dict['name_of_recall'] = name_of_recall.lower().title()\n details_dict['campaign'] = campaign\n details_dict['date_of_recall_announcement'] = date_of_recall_announcement\n details_dict['brief_description_of_recall'] = brief_description_of_recall.lower().title()\n details_dict['safety_risk'] = safety_risk.lower().title()\n details_dict['remedy'] = remedy.lower().title()\n details_list.append(details_dict.copy())\n\n if len(vehicle_recall_json) == 0:\n dictionary_helper(details_dict, vin_no, open_recalls=\"No\")\n details_list.append(details_dict.copy())\n\n return details_list\n\n\nasync def runner(all_vins_file_path: str, output_data_file_path: str, concurrency: int, batch_size: int):\n # get all vin nos\n with open(all_vins_file_path) as infile:\n all_vin_nos = [vin.strip() for vin in infile]\n print(f\"Total vins: {len(all_vin_nos)}\")\n\n vins_to_scrape = all_vin_nos[:10]\n print(f\"Vins to scrape: {len(vins_to_scrape)}\")\n print()\n\n # Start scraping in batches\n async def _wrapper(func, id, *args, **kwargs) -> (bool, Any | Exception):\n try:\n result = await func(*args, **kwargs)\n return True, id, result\n except Exception as e:\n return False, id, e\n\n batches = chunks(vins_to_scrape, batch_size)\n start_time = time.time()\n passed = []\n failed = []\n\n async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(limit=concurrency)) as session:\n for batch_index, batch in enumerate(batches):\n batch_start_time = time.time()\n batch_data = []\n print(\"------------------------------------\")\n print(f\"Processing batch {batch_index + 1}, {len(batch)} vins\")\n batch_result = await asyncio.gather(*[_wrapper(parse_vehical_recalls, vin, vin, session) for vin in batch])\n batch_end_time = time.time()\n print(\n f'Took {batch_end_time - batch_start_time} seconds to process batch {batch_index + 1} of size {len(batch)}')\n for result in batch_result:\n if result[0]:\n batch_data.append(result[2])\n passed.append(result[1])\n else:\n print(f\"Failed for vin: {result[1]}\")\n print(f'\\033[2;31;40m {get_exception_traceback(result[2])}')\n failed.append(result[1])\n\n # Write to file\n if len(batch_data) > 0:\n # pprint(batch_data)\n write_to_csv(output_data_file_path, batch_data[0])\n\n end_time = time.time()\n print(\"-------------------\")\n print()\n print(f\"Took {end_time - start_time} seconds\")\n print(f\"Tried to scrape: {len(vins_to_scrape)}\")\n print(f\"Success: {len(passed)}\")\n print(f\"Failed: {len(failed)}\")\n for vin in failed:\n print(vin)\n print()\n\n\nif __name__ == '__main__':\n ALL_VINS_FILE_PATH = \"vin_nos_chevrolet\"\n OUTPUT_DATA_FILE_PATH = \"vehicle_record_data_chevrolet_1.csv\"\n CONCURRENCY = 45\n BATCH_SIZE = 10\n\n asyncio.run(runner(all_vins_file_path=ALL_VINS_FILE_PATH, output_data_file_path=OUTPUT_DATA_FILE_PATH,\n concurrency=CONCURRENCY, batch_size=BATCH_SIZE))\n","repo_name":"abhijitkumar39/vehicle_recall_scraper","sub_path":"chevrolet_recalls/chevrolet_vehicle_recall_data_scraper.py","file_name":"chevrolet_vehicle_recall_data_scraper.py","file_ext":"py","file_size_in_byte":7226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12355464298","text":"from collections import defaultdict, OrderedDict\n\nfrom heapq import heappush, heappop\n \nallowed = set(['AU','UA','CG','GC','GU','UG'])\n\ndef best(x):\n \n def _best(i,j):\n if (i,j) in opt:\n return opt[i,j]\n curr = -1\n for k in range(i, j):\n if _best(i,k) + _best(k+1,j) > curr:\n curr = _best(i,k) + _best(k+1,j)\n back[i, j] = k\n \n \n if x[i] + x[j] in allowed:\n if _best(i+1, j-1) + 1 > curr:\n curr = _best(i+1, j-1) + 1\n back[i, j] = -1\n opt[i, j] = curr\n return curr\n \n \n def solution(i, j):\n if i == j:\n return \".\"\n if i > j:\n return \"\"\n \n k = back[i, j]\n if k == -1:\n return \"(%s)\" % solution(i+1, j-1)\n else:\n return solution(i, k) + solution(k+1, j)\n \n opt = defaultdict(lambda:0)\n back = {}\n n = len(x)\n for i in range(n):\n opt[i, i] = 0\n opt[i, i-1] = 0\n \n \n return _best(0, n-1), solution(0, n-1)\n \n#print(best('CGAGGUGGCACUGACCAAACACCACCGAAAC'))\n\n\ndef total(x):\n\n def _total(i,j):\n if (i,j) in opt:\n return opt[i,j]\n curr = 0\n for k in range(i, j):\n if x[k] + x[j] in allowed:\n curr += _total(i, k-1) * _total(k+1,j-1)\n \n curr += _total(i, j-1)\n opt[i, j] = curr\n return curr\n \n \n opt = defaultdict(lambda:0)\n n = len(x)\n for i in range(n):\n opt[i, i] = 1\n opt[i, i-1] = 1\n \n \n return _total(0, n-1)\n \n \n#print(total(\"ACAGU\"))\n \ndef kbest(x, k):\n def _kbest(i, j, dep=0):\n def trypush_b(s, p, q):\n if p < len(topk[i,s]) and q < len(topk[s+1, j]) and (s,p,q) not in visited:\n heappush(h, (-(topk[i,s][p][0] + topk[s+1,j][q][0]), (s,p,q)))\n visited.add((s,p,q))\n \n def trypush_u(p):\n if p < len(topk[i+1, j-1]):\n heappush(h, (-(topk[i+1, j-1][p][0] + 1), (p,)))\n \n if (i,j) in topk:\n return topk[i,j]\n# if i == j:\n# topk[i,j] = [(0, '.')]\n# return\n# elif j == i-1:\n# topk[i,i-1] = [(0,'')]\n# return\n \n h = []\n visited = set()\n for s in range(i,j):\n _kbest(i, s, dep+1)\n _kbest(s+1, j, dep+1)\n trypush_b(s, 0, 0)\n if x[i] + x[j] in allowed:\n _kbest(i+1, j-1, dep+1)\n trypush_u(0)\n \n \n used = set()\n while len(topk[i,j]) < k:\n if h == []:\n break\n score, indicies = heappop(h)\n try:\n s,p,q = indicies\n foldStr = \"%s%s\" % (topk[i,s][p][1], topk[s+1,j][q][1])\n if (-score, foldStr) not in used:\n topk[i,j].append((-score, foldStr))\n used.add((-score, foldStr))\n trypush_b(s,p+1, q)\n trypush_b(s, p, q+1)\n except:\n p = indicies[0]\n foldStr = \"(%s)\" % topk[i+1, j-1][p][1]\n if (-score, foldStr) not in used:\n topk[i, j].append((-score, foldStr))\n used.add((-score, foldStr))\n trypush_u(p+1)\n \n \n topk = defaultdict(list)\n n = len(x)\n for i in range(n):\n topk[i, i] = [(0, '.')]\n topk[i, i-1] = [(0, '')]\n _kbest(0, n-1)\n \n\n fin = OrderedDict()\n k_v = 0\n for j in reversed(range(n)):\n for v in topk[0,j]:\n if k_v < k:\n if v not in fin:\n fin[v] = v\n k_v += 1\n\n\n out = list(fin.keys())\n\n \n return out\n \n#print(kbest(\"UCAGAGGCAUCAAACCU\", 300))\n\n","repo_name":"puppol/cs325","sub_path":"hw10/rna.py","file_name":"rna.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18199430183","text":"from bank_app_modules import (\n start_app,\n create_account,\n login,\n transaction,\n check_balance,\n accounts,\n new_transaction,\n deposit,\n withdraw,\n transfer\n)\n\naccount_transaction_code = start_app()\n\naccount_user = None\n\nif account_transaction_code == 1:\n users = create_account()\n accounts = users['accounts']\n account_user = users['new_account']\n print('\\nAll accounts\\n' + str(accounts))\n account_transaction_code = start_app()\n\n\ndef transaction_actions(account_user):\n while account_user == None:\n account_user = login()\n if account_user == None:\n users = create_account()\n accounts = users['accounts']\n account_user = users['new_account']\n print('\\nAll accounts\\n' + str(accounts))\n print(account_user)\n transaction_code = transaction()\n\n if transaction_code == 1:\n account_balance = check_balance(account_user)\n print('\\nYour account balance is: ' + '=N=' + str(account_balance))\n ok = input('Press ENTER: ')\n while ok != '':\n ok = input('Press ENTER: ')\n return new_transaction()\n\n if transaction_code == 2:\n bank_deposit = deposit(account_user)\n amount_deposit = bank_deposit['amount']\n account_balance = bank_deposit['new_balance']\n print(\n '\\nCredit: ' + '=N=' + str(amount_deposit) +\n '\\nYour account balance is: ' + '=N=' + str(account_balance)\n )\n ok = input('Press ENTER: ')\n while ok != '':\n ok = input('Press ENTER: ')\n return new_transaction()\n\n if transaction_code == 3:\n bank_withdrawal = withdraw(account_user)\n withdrawal = bank_withdrawal['amount']\n account_balance = bank_withdrawal['new_balance']\n print(\n '\\nDebit: ' + '=N=' + str(withdrawal) +\n '\\nYour account balance is: ' + '=N=' + str(account_balance)\n )\n ok = input('Press ENTER: ')\n while ok != '':\n ok = input('Press ENTER: ')\n return new_transaction()\n\n if transaction_code == 4:\n fund_transfer = transfer(account_user)\n if fund_transfer != None:\n amount = fund_transfer['amount']\n account_balance = fund_transfer['new_balance']\n recipient = fund_transfer['recipient_email']\n recipient_balance = fund_transfer['recipient_balance']\n print(\n '\\nTransaction type: Transfer' +\n '\\nRecipient: ' + recipient +\n '\\nDebit: ' + '=N=' + str(amount) +\n '\\nYour account balance is: ' + '=N=' + str(account_balance) +\n '\\nRecipient balance: ' + '=N=' + str(recipient_balance)\n )\n ok = input('Press ENTER: ')\n while ok != '':\n ok = input('Press ENTER: ')\n return new_transaction()\n\n\ndef transaction_function(account_user):\n transaction_action = transaction_actions(account_user)\n\n if transaction_action == 0:\n return print(\n '\\nThank you for banking with us!' +\n '\\nFrom: Tobi Akanji'\n )\n\n if transaction_action == 1:\n transaction_function(account_user)\n\n\nif account_transaction_code == 2:\n transaction_function(account_user)\n","repo_name":"Tboy-AK/vgg-bank-app-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23710395527","text":"import pytest\n\nfrom backend.container_service.clusters.constants import BcsCCNodeStatus\nfrom backend.container_service.clusters.tools import node as node_tools\nfrom backend.resources.constants import NodeConditionStatus\n\nfake_inner_ip = \"127.0.0.1\"\nfake_node_name = \"bcs-test-node\"\n\n\ndef test_query_cluster_nodes(client, create_and_delete_node, ctx_cluster):\n cluster_nodes = node_tools.query_cluster_nodes(ctx_cluster)\n assert fake_inner_ip in cluster_nodes\n assert cluster_nodes[fake_inner_ip][\"name\"] == fake_node_name\n assert cluster_nodes[fake_inner_ip][\"status\"] == NodeConditionStatus.Ready\n assert not cluster_nodes[fake_inner_ip][\"unschedulable\"]\n\n\n@pytest.mark.parametrize(\n \"cluster_node_status,unschedulable,bcs_cc_node_status,expected_status\",\n [\n (NodeConditionStatus.Ready, False, BcsCCNodeStatus.Normal, BcsCCNodeStatus.Normal),\n (NodeConditionStatus.Ready, True, BcsCCNodeStatus.Normal, BcsCCNodeStatus.Removable),\n (NodeConditionStatus.Ready, True, BcsCCNodeStatus.ToRemoved, BcsCCNodeStatus.ToRemoved),\n (NodeConditionStatus.NotReady, True, BcsCCNodeStatus.NotReady, BcsCCNodeStatus.NotReady),\n (NodeConditionStatus.NotReady, True, BcsCCNodeStatus.Removable, BcsCCNodeStatus.NotReady),\n (NodeConditionStatus.Unknown, True, BcsCCNodeStatus.Removable, BcsCCNodeStatus.Unknown),\n ],\n)\ndef test_transform_status(cluster_node_status, unschedulable, bcs_cc_node_status, expected_status):\n assert expected_status == node_tools.transform_status(cluster_node_status, unschedulable, bcs_cc_node_status)\n\n\n@pytest.fixture\ndef cluster_name():\n return \"cluster_name\"\n\n\nclass TestNodesData:\n def test_compose_data_by_bcs_cc_nodes(self, bcs_cc_nodes, cluster_nodes, cluster_id, cluster_name):\n client = node_tools.NodesData(\n bcs_cc_nodes=bcs_cc_nodes, cluster_nodes=cluster_nodes, cluster_id=cluster_id, cluster_name=cluster_name\n )\n node_data = client._compose_data_by_bcs_cc_nodes()\n assert len(node_data) == len(\n [node for inner_ip, node in bcs_cc_nodes.items() if node[\"status\"] != BcsCCNodeStatus.Normal]\n )\n assert node_data[0][\"cluster_name\"] == cluster_name\n\n def test_compose_data_by_cluster_nodes(self, bcs_cc_nodes, cluster_nodes, cluster_id):\n client = node_tools.NodesData(\n bcs_cc_nodes=bcs_cc_nodes, cluster_nodes=cluster_nodes, cluster_id=cluster_id, cluster_name=\"cluster_name\"\n )\n node_data = client._compose_data_by_cluster_nodes()\n assert len(node_data) == len(cluster_nodes)\n assert node_data[0][\"status\"] == BcsCCNodeStatus.Normal\n","repo_name":"Tencent/bk-bcs-saas","sub_path":"bcs-app/backend/tests/container_service/clusters/tools/test_node.py","file_name":"test_node.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":266,"dataset":"github-code","pt":"47"} +{"seq_id":"20085219714","text":"from __future__ import absolute_import, division, print_function\nimport ctypes\nimport unittest\nfrom unittest.mock import MagicMock, patch\n\nfrom pqos import Pqos, CPqosConfig\nfrom pqos.native_struct import (\n CPqosSysconfig, CPqosCap, CPqosCpuInfo, CPqosDevinfo\n)\n\n\nclass TestPqos(unittest.TestCase):\n \"Tests for Pqos class.\"\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_singleton(self, _load_lib):\n \"Tests if the same object is constructed each time Pqos() is invoked.\"\n\n pqos = Pqos()\n pqos2 = Pqos()\n\n self.assertIs(pqos, pqos2)\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_init(self, _load_lib):\n \"Tests library initialization.\"\n # pylint: disable=no-self-use\n\n def pqos_init_mock(_cfg_ref):\n \"Mock pqos_init().\"\n\n return 0\n\n pqos = Pqos()\n\n pqos.lib.pqos_init = MagicMock(side_effect=pqos_init_mock)\n\n pqos.init('MSR')\n\n pqos.lib.pqos_init.assert_called_once()\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_fini(self, _load_lib):\n \"Tests library finalization.\"\n # pylint: disable=no-self-use\n\n def pqos_init_mock(_cfg_ref):\n \"Mock pqos_init().\"\n\n return 0\n\n def pqos_fini_mock():\n \"Mock pqos_fini().\"\n\n return 0\n\n pqos = Pqos()\n\n pqos.lib.pqos_init = MagicMock(side_effect=pqos_init_mock)\n pqos.lib.pqos_fini = MagicMock(side_effect=pqos_fini_mock)\n\n pqos.init('OS')\n pqos.fini()\n\n pqos.lib.pqos_init.assert_called_once()\n pqos.lib.pqos_fini.assert_called_once()\n\n def _test_init_verbose(self, verbose, expected_verbose):\n \"\"\"\n Tests if verbosity level is correctly validated during library\n initialization.\n \"\"\"\n\n def pqos_init_mock(cfg_ref):\n \"Mock pqos_init().\"\n p_cfg = ctypes.cast(cfg_ref, ctypes.POINTER(CPqosConfig))\n self.assertEqual(p_cfg.contents.verbose, expected_verbose)\n\n return 0\n\n pqos = Pqos()\n\n pqos.lib.pqos_init = MagicMock(side_effect=pqos_init_mock)\n\n pqos.init('OS_RESCTRL_MON', verbose=verbose)\n\n pqos.lib.pqos_init.assert_called_once()\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_init_verbose_silent(self, _load_lib):\n \"Tests if 'silent' verbosity level is properly handled.\"\n self._test_init_verbose('silent', CPqosConfig.LOG_VER_SILENT)\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_init_verbose_default(self, _load_lib):\n \"Tests if 'default' verbosity level is properly handled.\"\n self._test_init_verbose('default', CPqosConfig.LOG_VER_DEFAULT)\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_init_verbose_none(self, _load_lib):\n \"Tests if None as verbosity level is properly handled.\"\n self._test_init_verbose(None, CPqosConfig.LOG_VER_DEFAULT)\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_init_verbose_verbose(self, _load_lib):\n \"Tests if 'verbose' verbosity level is properly handled.\"\n self._test_init_verbose('verbose', CPqosConfig.LOG_VER_VERBOSE)\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_init_verbose_super(self, _load_lib):\n \"Tests if 'super' verbosity level is properly handled.\"\n self._test_init_verbose('super', CPqosConfig.LOG_VER_SUPER_VERBOSE)\n\n @patch('ctypes.cdll.LoadLibrary')\n def test_get_sysconfig(self, _load_lib):\n \"Tests get_sysconfig() method.\"\n\n def pqos_init_mock(_cfg_ref):\n \"Mock pqos_init().\"\n\n return 0\n\n def pqos_fini_mock():\n \"Mock pqos_fini().\"\n\n return 0\n\n # Build stub system configuration\n cap = CPqosCap(mem_size=5, version=2, num_cap=0)\n cpu = CPqosCpuInfo(mem_size=7, vendor=3, num_cores=0)\n dev = CPqosDevinfo(num_channels=3, num_devs=1)\n cfg = CPqosSysconfig(cap=ctypes.pointer(cap), cpu=ctypes.pointer(cpu),\n dev=ctypes.pointer(dev))\n\n def pqos_sysconfig_get_mock(cfg_ref):\n \"Mock pqos_sysconfig_get().\"\n\n cfg_ptr = ctypes.pointer(cfg)\n addr = ctypes.addressof(cfg_ptr)\n size = ctypes.sizeof(cfg_ptr)\n ctypes.memmove(cfg_ref, addr, size)\n\n return 0\n\n pqos = Pqos()\n\n # Setup mock functions\n pqos.lib.pqos_init = MagicMock(side_effect=pqos_init_mock)\n pqos.lib.pqos_sysconfig_get = MagicMock(side_effect=pqos_sysconfig_get_mock)\n pqos.lib.pqos_fini = MagicMock(side_effect=pqos_fini_mock)\n\n # Get system configuration\n pqos.init('OS')\n syscfg = pqos.get_sysconfig()\n pqos.fini()\n\n # Ensure mock function has been called\n pqos.lib.pqos_sysconfig_get.assert_called_once()\n\n # Verify capabilities\n self.assertEqual(syscfg.cap.contents.mem_size, 5)\n self.assertEqual(syscfg.cap.contents.version, 2)\n self.assertEqual(syscfg.cap.contents.num_cap, 0)\n\n # Verify cpuinfo\n self.assertEqual(syscfg.cpu.contents.mem_size, 7)\n self.assertEqual(syscfg.cpu.contents.vendor, 3)\n self.assertEqual(syscfg.cpu.contents.num_cores, 0)\n\n # Verify devinfo\n self.assertEqual(syscfg.dev.contents.num_channels, 3)\n self.assertEqual(syscfg.dev.contents.num_devs, 1)\n","repo_name":"intel/intel-cmt-cat","sub_path":"lib/python/pqos/test/test_pqos.py","file_name":"test_pqos.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","stars":644,"dataset":"github-code","pt":"47"} +{"seq_id":"13759770797","text":"\nimport sys\n\nfrom epyk.core.css import Properties\nfrom epyk.core.css.styles.classes.CssStyle import Style\n\n\nclass Attrs(Properties.CssMixin):\n\n def __init__(self, component):\n self.attrs = {}\n self.component = component\n self._report = component.page\n self.page = component.page\n\n def css(self, attrs, value=None, important=False):\n \"\"\"\n Description:\n ------------\n Set multiple CSS attributes to the HTML component.\n\n Attributes:\n ----------\n :param attrs: Dictionary | String. optional. The attributes to be added.\n :param value: String. Optional. The value for a given item.\n :param important: Boolean. Optional. Flag the attribute to be important.\n \"\"\"\n if not isinstance(attrs, dict):\n if value is None:\n return self.attrs.get(attrs)\n\n if important:\n value = \"%s !IMPORTANT\" % value\n self.attrs[attrs] = value\n\n for k, v in attrs.items():\n if important:\n v = \"%s !IMPORTANT\" % v\n self.attrs[k] = v\n return self.attrs\n\n def remove(self, attr=None, set_none=False):\n \"\"\"\n Description:\n ------------\n Remove a CSS attribute to the HTML component.\n\n This function will either remove it if it is part of the existing CSS attribute or set it to auto in case it is\n coming from a CSS class.\n\n Attributes:\n ----------\n :param attr: String. Optional. The attribute to be removed.\n :param set_none: Boolean. Optional. Set the CSS attribute value to None on the CSS.\n \"\"\"\n key = attr or sys._getframe().f_back.f_code.co_name.replace(\"_\", \"-\")\n if set_none:\n self.attrs[key] = \"none\"\n self.component.attr['css'][key] = \"none\"\n else:\n if key in self.attrs:\n del self.attrs[key]\n if key in self.component.attr['css']:\n del self.component.attr['css'][key]\n else:\n self.attrs[key] = \"unset\"\n self.component.attr['css'][key] = \"auto\"\n\n def __str__(self):\n css_tag = [\"%s:%s\" % (k, v) for k, v in self.attrs.items()]\n return \";\".join(css_tag)\n\n\nclass Commons(Attrs):\n\n def __init__(self, component):\n super(Commons, self).__init__(component)\n self.font_size = 'inherit'\n self.font_family = 'inherit'\n self.box_sizing = 'border-box'\n\n\nclass Empty(Attrs):\n\n def __init__(self, component):\n super(Empty, self).__init__(component)\n\n\nclass Body(Attrs):\n\n def __init__(self, component):\n super(Body, self).__init__(component)\n self.font_size = component.style.globals.font.normal()\n self.font_family = component.style.globals.font.family\n self.margin = 0\n\n\nclass CssInline(Attrs):\n\n def __init__(self, component=None):\n self.attrs = {}\n self.component = component\n if component is not None:\n self._report = component.page\n self.page = component.page\n\n @property\n def stroke_dasharray(self):\n return self.css(\"stroke-dasharray\")\n\n @stroke_dasharray.setter\n def stroke_dasharray(self, val):\n self.css({\"stroke-dasharray\": val})\n\n @property\n def stroke_width(self):\n return self.css(\"stroke-width\")\n\n @stroke_width.setter\n def stroke_width(self, val):\n self.css({\"stroke-width\": val})\n\n @property\n def fill(self):\n return self.css(\"fill\")\n\n @fill.setter\n def fill(self, val):\n self.css({\"fill\": val})\n\n @property\n def fill_opacity(self):\n return self.css(\"fill-opacity\")\n\n @fill_opacity.setter\n def fill_opacity(self, num):\n self.css({\"fill-opacity\": num})\n\n def to_dict(self, copy=False):\n \"\"\"\n Description:\n ------------\n Returns the underlying CSS attributes.\n This is the internal object and not a copy by default.\n\n Attributes:\n ----------\n :param copy: Boolean. Optional. Specify if a copy must be returned.\n \"\"\"\n if copy:\n return dict(self.attrs)\n\n return self.attrs\n\n def important(self, attrs=None):\n \"\"\"\n Description:\n ------------\n\n If attrs is not defined all the attributes will be important.\n\n Attributes:\n ----------\n :param attrs: Dictionary. The Css Python property to be changed.\n \"\"\"\n if attrs is None:\n for k in self.attrs.items():\n self.attrs[k] = \"%s !IMPORTANT\" % self.attrs[k]\n else:\n for k in attrs:\n setattr(self, k, \"%s !IMPORTANT\" % getattr(self, k))\n\n def to_class(self, classname):\n \"\"\"\n Description:\n ------------\n The CSS class object.\n\n :param classname: String. The class name.\n \"\"\"\n v_cls = type(classname, (Style, ), {\"_attrs\": self.attrs})\n return v_cls(None)\n\n def define_class(self, classname, page):\n v_cls = page.body.style.custom_class({\"_attrs\": self.attrs}, classname)\n return v_cls\n","repo_name":"TrendingTechnology/epyk-ui","sub_path":"epyk/core/css/styles/attributes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"70080891342","text":"import sys\ninput = sys.stdin.readline\ns = input().strip('\\n')\ncnt=0\nN = len(s)\nfor i in range(N):\n if s[i] == '2':\n if i == N-1:\n cnt += 1\n elif s[i+1]!='5':\n cnt += 1\nprint(cnt)","repo_name":"bobxiong88/Competitive-Programming-Solutions","sub_path":"DMOJ solutions/Uncategorized/ucc21p1.py","file_name":"ucc21p1.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31660136314","text":"from __future__ import print_function\n'''Implement a function to check if a tree is balanced. For the purposes of this\nquestion, a balanced tree is defined to be a tree such that no two leaf nodes differ\nin distance from the root by more than one'''\n\nclass BiTree(object):\n def __init__(self, value):\n self.value = value\n self.left_child = None\n self.right_child = None\n\ndef max_d(root, d):\n if not root:\n return d\n left = max_d(root.left_child, d+1)\n right = max_d(root.right_child, d+1)\n return max(left, right)\n\ndef min_d(root, d):\n if not root:\n return d\n left = min_d(root.left_child, d+1)\n right = min_d(root.right_child, d+1)\n return min(left, right)\n\ndef is_balanced(root):\n '''Balanced if max_depth - min_depth < 2'''\n if not root:\n return True\n if (max_d(root, 0) - min_d(root, 0)) < 2:\n return True\n return False\n\ndef get_balanced():\n A = BiTree('A')\n B = BiTree('B')\n C = BiTree('C')\n D = BiTree('D')\n A.left_child = B\n A.right_child = C\n C.left_child = D\n return A\n\ndef get_unbalanced():\n A = BiTree('A')\n B = BiTree('B')\n C = BiTree('C')\n D = BiTree('D')\n E = BiTree('E')\n F = BiTree('F')\n A.left_child = B\n A.right_child = C\n C.left_child = D\n C.right_child = F\n D.right_child = E\n\n return A\n\ndef asserts(predicate, message):\n if not predicate:\n raise RuntimeError(message)\n print('.', end='')\n\nif __name__ == '__main__':\n asserts(is_balanced(get_balanced())==True, \"test 1 failed\")\n asserts(is_balanced(get_unbalanced())==False, \"test 2 failed\")\n print('')\n","repo_name":"rylans/ctci","sub_path":"ch4-trees-and-graphs/question-1.py","file_name":"question-1.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1455022232","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Import Splinter and BeautifulSoup\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\n\n\n# In[2]:\n\n\n# Setting up Splinter\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n\n# In[3]:\n\n\n# Visit the URL to scrape\nurl = 'https://redplanetscience.com/'\nbrowser.visit(url)\n# Optional delay for loading the page\nbrowser.is_element_present_by_css('div.list_text', wait_time=1)\n\n\n# In[4]:\n\n\n# Parse the HTML\nhtml = browser.html\nnews_soup = soup(html, 'html.parser')\nslide_elem = news_soup.select_one('div.list_text')\n\n\n# In[5]:\n\n\n# Capturing the title of the newest article\nslide_elem.find('div', class_='content_title')\n\n\n# In[6]:\n\n\n# Use the parent element to find the first 'a' tag and save it as 'news_title'\nnews_title = slide_elem.find('div', class_='content_title').get_text()\nnews_title\n\n\n# In[7]:\n\n\n# Use the parent element to find the first summary and save it as news_p\nnews_p = slide_elem.find('div', class_='article_teaser_body').get_text()\nnews_p\n\n\n# ### Featured Image\n\n# In[8]:\n\n\n# Visit the URL\nurl = 'https://spaceimages-mars.com'\nbrowser.visit(url)\n\n\n# In[9]:\n\n\n# Find and click the full image button\nfull_image_elem = browser.find_by_tag('button')[1]\nfull_image_elem.click()\n\n\n# In[10]:\n\n\n# Parsing the HTML\nhtml = browser.html\nimg_soup = soup(html, 'html.parser')\n\n\n# In[11]:\n\n\n# Finding the relative image url\nimg_url_rel = img_soup.find('img', class_='fancybox-image').get('src')\nimg_url_rel\n\n\n# In[12]:\n\n\n# Use the base URL to create an absolute URL\nimg_url = f'https://spaceimages-mars.com/{img_url_rel}'\nimg_url\n\n\n# ### Mars Facts\n\n# In[13]:\n\n\n# Visit the URL\nurl = 'https://galaxyfacts-mars.com/'\nbrowser.visit(url)\n\n\n# In[14]:\n\n\n# Pull the first table found at the URL and turn it into a DataFrame\ndf = pd.read_html('https://galaxyfacts-mars.com')[0]\ndf.columns=['description', 'Mars', 'Earth']\ndf.set_index('description', inplace=True)\ndf\n\n\n# In[15]:\n\n\n# Looking at the second table and turning it into a DataFrame\ndf2 = pd.read_html('https://galaxyfacts-mars.com')[1] #read_html searches for HTML tables, can add index # if more than one\ndf2.columns=['description', \"Mars\"]\ndf2.set_index('description', inplace=True)\ndf2\n\n\n# In[16]:\n\n\n# Turning the first table back into HTML code\ndf.to_html()\n\n\n# In[17]:\n\n\n# Quitting the browser\nbrowser.quit()\n\n","repo_name":"js816/Mission-to-Mars","sub_path":"Mission_to_Mars.py","file_name":"Mission_to_Mars.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"845860977","text":"#!/usr/bin/env python3\n\n\nimport re\nimport collections\n\n\n\n\n\n\nfrom ._TokenizerActionTuple import _TokenizerActionTuple\nfrom ._PatternTableRecord import _PatternTableRecord\nfrom ._TokenizerPatternTuple import _TokenizerPatternTuple\nfrom .Token import *\nfrom .ParserErrorException import *\nfrom .TokenizerAction import *\nfrom .TokenizerPattern import *\n\n\n\n\n\n\n\n#\n# This class represents a table that is part of the initialization data of a tokenizer.\n#\nclass TokenizingTable(object):\n\n\tdef __init__(self, tableID):\n\t\tself.tableID = tableID\n\t\tself.tableName = None\n\t\tself.rows = []\n\t\tself.onOtherActions = None\n\t\tself.onEOSActions = None\n\t#\n\n\tdef addPatternRow(self, pattern, actions):\n\t\tassert isinstance(pattern, _TokenizerPatternTuple)\n\t\tassert isinstance(actions, (tuple, list))\n\t\tfor action in actions:\n\t\t\tassert isinstance(action, _TokenizerActionTuple)\n\t\tself.rows.append(_PatternTableRecord(pattern, tuple(actions)))\n\t#\n\n\tdef setOther(self, actions):\n\t\tassert isinstance(actions, (tuple, list))\n\t\tfor action in actions:\n\t\t\tassert isinstance(action, _TokenizerActionTuple)\n\t\tself.onOtherActions = actions\n\t#\n\n\tdef setEOS(self, actions):\n\t\tassert isinstance(actions, (tuple, list))\n\t\tfor action in actions:\n\t\t\tassert isinstance(action, _TokenizerActionTuple)\n\t\tself.onEOSActions = actions\n\t#\n\n\tdef collectAllTokenTypes(self, outSet):\n\t\tfor r in self.rows:\n\t\t\tfor t in r.actions:\n\t\t\t\tif t.actionID in [ EnumAction.EMITBUFFER, EnumAction.EMITELEMENT, EnumAction.EMITGENERATE ]:\n\t\t\t\t\toutSet.add(t.data)\n\n\t\tif self.onOtherActions:\n\t\t\tfor t in self.onOtherActions:\n\t\t\t\tif t.actionID in [ EnumAction.EMITBUFFER, EnumAction.EMITELEMENT, EnumAction.EMITGENERATE ]:\n\t\t\t\t\toutSet.add(t.data)\n\n\t\tif self.onEOSActions:\n\t\t\tfor t in self.onEOSActions:\n\t\t\t\tif t.actionID in [ EnumAction.EMITBUFFER, EnumAction.EMITELEMENT, EnumAction.EMITGENERATE ]:\n\t\t\t\t\toutSet.add(t.data)\n\t#\n\n#\n\n\n\n\n","repo_name":"jkpubsrc/python-module-jk-tokenizingparsing","sub_path":"src/jk_tokenizingparsing/TokenizingTable.py","file_name":"TokenizingTable.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15473672015","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"smartcrawler\", \n version=\"0.0.2\", \n author=\"Saketh Gundlapalli\", \n description=\"Package for crawling items from webpages and store them as json file\",\n long_description=long_description, \n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(), \n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ], \n python_requires='>=3.6', \n py_modules=[\"smartcrawler\",\"exceptions\",\"object\"], \n package_dir={'':'smartcrawler/src'}, \n install_requires=[\n 'selenium==3.141.0',\n 'webdriver_manager==3.4.2'\n ] \n)","repo_name":"Saketh7382/smartcrawler","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73169718862","text":"# -*- coding: utf-8 -*-\n# Rotate SAXIS to new directions about x/y/z axis for SOC calculation\n# Written By czf 4/14/2019 \n# Only valid for INCAR for VASP\n# Usage:python3 [script] [INCAR you wanna process]\n\nimport sys\nimport math\nimport numpy as np\n\nscript, file_to_be_converted = sys.argv \n\nprint (\"\"\"\n###################################\n# #\n# for DFT software VASP #\n# #\n###################################\n\"\"\")\nprint(\"Rotate SAXIS to new directions about x/y/z axis for SOC calculation\")\n\naxis = input(\"\"\"Which axis you wanna rotate about, x or y or z ?\"\"\")\nangle = input(\"What angle you wanna rotate anticlockwise(in degree measure)?\") \ntheta = float(angle) * math.pi / 180\ncontent = ''\n\nif axis is \"z\":\n A = np.array([[math.cos(theta), -1 * math.sin(theta), 0],\n [math.sin(theta), math.cos(theta), 0],\n [0, 0, 1]])\nelif axis is \"x\":\n A = np.array([[1, 0, 0],\n [0, math.cos(theta), -1 * math.sin(theta)],\n [0, math.sin(theta), math.cos(theta)]]) \nelif axis is \"y\":\n A = np.array([[math.cos(theta), 0, math.sin(theta)],\n [0, 1, 0],\n [-1 * math.sin(theta), 0, math.cos(theta)]])\n \nwith open(str(file_to_be_converted), 'r+') as file:\n for line in file.readlines():\n data = line.split()\n if data == []: # avoid empty line to bug the \"data[0]\"\n continue\n if data[0] == \"SAXIS\":\n # coordinate_str = data[2:5] \n coordinate = [float(i) for i in data[2:5]]\n x = np.array(coordinate).reshape((-1,1))\n y = np.dot(A,x)\n # print(y)\n # print(y[0,0])\n # print(y[1,0])\n # print(y[2,0])\n for i in [y[0,0], y[1,0], y[2,0]]:\n if abs(i) < 0.001:\n i = 0\n coordinate.append(i)\n # print(coordinate)\n line = \"SAXIS\" + ' ' + '=' + ' ' + str(coordinate[3]) + ' ' + str(coordinate[4]) + ' ' + str(coordinate[5]) + '\\n'\n content += line\nwith open(str(file_to_be_converted), 'r+') as file:\n file.writelines(content)\n \n \nprint (\"\"\"\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nThe INCAR has been converted already. \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \"\"\")\n\n\n","repo_name":"caizefeng/python_scripts_all","sub_path":"MAE_kit.py","file_name":"MAE_kit.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40100089327","text":"from qiskit import QuantumCircuit\nfrom qiskit.quantum_info import StabilizerState, Statevector, entropy, partial_trace, random_clifford\nfrom qiskit import Aer\nimport numpy as np\nimport random\nimport h5py\n\n\ndef get_stab_list(stab, jlformat=True):\n stab_list = stab._data.stabilizer.to_labels()\n if jlformat:\n return '\\n'.join([s.replace('I', '_') for s in stab_list])\n else:\n return '\\n'.join(stab_list)\n\n\ndef sample_once(n_qubits):\n qc = random_clifford(n_qubits).to_circuit()\n stab_list = get_stab_list(StabilizerState(qc))\n backend = Aer.get_backend('statevector_simulator')\n job = backend.run(qc)\n result = job.result()\n outputstate = result.get_statevector(qc)\n leftend, rightend = sorted([random.randint(0,n_qubits-1), random.randint(0,n_qubits-1)])\n entanglement = entropy(partial_trace(Statevector(outputstate), range(leftend, rightend+1))) # note: cautions about order of qubits\n return stab_list, leftend, rightend, entanglement\n\ndef main():\n test_sizes = [3, 4, 6, 8]\n num_repeat = 200\n path = 'data'\n\n with h5py.File(f'{path}/stab_ent.h5', 'w') as f:\n for size in test_sizes:\n g = f.create_group(f\"size{size}\")\n dset_stab_list = g.create_dataset(\"stab_list\", (num_repeat,), dtype=h5py.special_dtype(vlen=str))\n dset_leftend = g.create_dataset(\"leftend\", (num_repeat,), dtype=int)\n dset_rightend = g.create_dataset(\"rightend\", (num_repeat,), dtype=int)\n dset_entanglement = g.create_dataset(\"entanglement\", (num_repeat,), dtype=float)\n for i_repeat in range(num_repeat):\n stab_list, leftend, rightend, entanglement = sample_once(size)\n dset_stab_list[i_repeat] = stab_list\n dset_leftend[i_repeat] = leftend\n dset_rightend[i_repeat] = rightend\n dset_entanglement[i_repeat] = entanglement\n\n\n\nif __name__=='__main__':\n main()","repo_name":"royess/test-stab-entanglement","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17449044009","text":"from flask import redirect, make_response, g, request, jsonify\nfrom flask_login import current_user\nfrom app.models import User, ExpiredToken\nfrom app.utils.constants.http_codes import *\nfrom app.utils.tools import referrerRequest\nfrom app import db, config\nfrom functools import wraps\nimport jwt\nfrom ..tools import createJwtToken\nfrom app.utils.analytics import trackUserApiCalls\n\n\nclass access_token_required(object):\n\n def __init__(self, logout: bool = False):\n self.logout = logout\n\n def __call__(self, function):\n @wraps(function)\n def decorator(*args, **kwargs):\n key = config.SECRET_KEY # SECRET KEY used for encrypting is stored in the config-object\n accessToken = None\n refreshToken = None\n\n\n try:\n # trying to load access and refresh tokens out of the request's http-header\n \n accessToken = request.headers[\"x-access-token\"]\n refreshToken = request.headers[\"x-refresh-token\"]\n\n except KeyError as e:\n # on of the two tokens cant be found in the request header\n return make_response(jsonify({\"message\": \"Api-key is missing!\", \"http-code\": \"401\"}),\n HTTP_401_UNAUTHORIZED)\n\n try:\n # trying to extract the token-data in order to identify the user\n accessData = jwt.decode(algorithms=[\"HS256\"], jwt=accessToken, key=key, options={\"verify_exp\": True})\n if accessData[\"scope\"] == \"api-token\":\n token_query = ExpiredToken.query.filter_by(token=accessToken, type=\"access\",).first()\n if not token_query:\n current_user = User.query.filter_by(public_id=accessData[\"public_id\"]).first()\n expirationTime = accessData[\"exp\"]\n # if the extraction was successful the server checks whether the token has been labeled expired\n\n # the server deletes every token that is stored despite being expired\n \n\n if current_user:\n # token is valid\n # user is authorized\n # this function tracks the amount of tokens created per day and increases the api_calls counter\n trackUserApiCalls(current_user)\n g.user_id = current_user.id\n\n if self.logout: # checks whether the current user needs to be logged out\n\n # current access-token is stored in the ExpiredToken-Tabel and is therefor invalid\n\n new_expired_access_token = ExpiredToken(token=accessToken,\n type=\"access\",\n expiration_date=expirationTime,\n user_id=current_user.id)\n\n db.session.add(new_expired_access_token)\n raise jwt.ExpiredSignatureError # this exception makes it possible to jump to the refresh-token validation\n\n db.session.commit()\n return function(current_user, *args, **kwargs) # route function gets called\n \n \n else:\n # user-id can't be found -> user got deleted, entire table got dropped or the SECRET KEY is public\n print(f\"[MAJOR SECURTITY BREECH]: EITHER THE DATABASE HAS BEEN DELETED OR THE API-SECRET-KEY IS PUBLIC\")\n return make_response(\n jsonify({\"message\": \"This account has recently been deleted!\", \"http-code\": \"404\"}),\n HTTP_404_NOT_FOUND)\n\n\n else:\n raise jwt.ExpiredSignatureError # access token has been labled as expired -> refresh token has to be checked\n\n else:\n return make_response(\n jsonify({\"message\": \"Woring token-scope!\", \"http-code\": \"401\"}),\n HTTP_404_NOT_FOUND)\n\n except jwt.ExpiredSignatureError:\n # when the access-token is expired the following code gets executed\n\n try:\n # the server now tries to decrypt the refresh token\n\n refreshData = jwt.decode(algorithms=[\"HS256\"], jwt=refreshToken, key=key,\n options={\"verify_exp\": True})\n if accessData[\"scope\"] == \"api-token\":\n token_query = ExpiredToken.query.filter_by(token=refreshToken, type=\"refresh\").first()\n\n if not token_query:\n # decryption was succesfull, server now checks whether the token has already been used, thus checks for it\n # in the expired token list stored in the database\n current_user = User.query.filter_by(public_id=refreshData[\"public_id\"]).first()\n expirationTime = refreshData[\"exp\"]\n \n\n if current_user:\n # the token is valid, the user is therefore authorized\n # the refresh token is now stored in the expired token tabel, since it has been used\n # this code-snippet therefore implements the refresh-token-rotation\n # a new access and refresh-token pair is generated and send to the user\n\n new_expired_refresh_token = ExpiredToken(token=refreshToken,\n type=\"refresh\",\n expiration_date=expirationTime,\n user_id=current_user.id)\n\n db.session.add(new_expired_refresh_token)\n\n if not self.logout: # a new pair of tokens is only created when the current user doesnt need to get logged out\n trackUserApiCalls(current_user, new_access_token=True, new_refresh_token=True)\n g.user_id = current_user.id\n newAccessToken = createJwtToken(key, user=current_user, access_token=True)[0]\n newRefreshToken = createJwtToken(key, user=current_user, refresh_token=True)[0]\n db.session.commit()\n return make_response(jsonify({\"message\": \"Expired, new token pair created\", \"x-access-token\": newAccessToken, \"x-refresh-token\": newRefreshToken},))\n else:\n # if the user needs to get logged out the loggout view-function gets returned\n db.session.commit()\n return function(*args, **kwargs)\n \n else:\n # user-id cant be found -> user got deleted, eintire table got dropped or the SECRET KEY is public\n print(f\"[MAJOR SECURTITY BREECH]: EITHER THE DATABASE HAS BEEN DELETED OR THE API-SECRET-KEY IS PUBLIC\")\n return make_response(\n jsonify({\"message\": \"This account has recently been deleted!\", \"http-code\": \"404\"}),\n HTTP_404_NOT_FOUND)\n\n else:\n return make_response(jsonify({\"message\": \"Token is invalid!\", \"http-code\": \"401\"}),\n HTTP_401_UNAUTHORIZED)\n \n else:\n return make_response(\n jsonify({\"message\": \"Woring token-scope!\", \"http-code\": \"401\"}),\n HTTP_404_NOT_FOUND)\n\n except (jwt.DecodeError, jwt.InvalidTokenError) as e:\n # decryption of the refresh token was not successful\n # the user couldn't authorized\n print(f\"[API-LOGIN-EXCEPTION]: {e}\")\n return make_response(jsonify({\"message\": \"Token is invalid!\", \"http-code\": \"401\"}),\n HTTP_401_UNAUTHORIZED)\n\n except (jwt.DecodeError, jwt.InvalidTokenError) as e:\n # access-token did not expire, decryption still was not successfull\n # -> signature invalid etc. -> user cannot be authorized\n print(f\"[API-LOGIN-EXCEPTION]: {e}\")\n\n return make_response(jsonify({\"message\": \"Token is invalid!\", \"http-code\": \"401\"}),\n HTTP_401_UNAUTHORIZED)\n\n return decorator\n\n\n\ndef logoutRequired(function):\n # this decorator is used for the view login and sign-up routes\n # if the user is already logged in and tries to access these pages he gets automatically redirected\n @wraps(function)\n def decorated(*args, **kwargs):\n if current_user.is_authenticated:\n return redirect(referrerRequest())\n else:\n return function(*args, **kwargs)\n return decorated\n\n\ndef verifiedRequired(function):\n @wraps(function)\n def decorated(*args, **kwargs):\n if not current_user.is_authenticated or not current_user.verified : \n return redirect(referrerRequest())\n else:\n return function(*args, **kwargs)\n return decorated\n\n\ndef notVerifiedRequired(function):\n @wraps(function)\n def decorated(*args, **kwargs):\n if not current_user.is_authenticated or current_user.verified:\n print(1)\n return redirect(referrerRequest())\n else:\n \n return function(*args, **kwargs)\n return decorated","repo_name":"Drizzr/SensorApp","sub_path":"app/utils/security/decorators/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":10311,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"8764744696","text":"\"\"\"\nGiven an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.\nExample:\nInput: [0,1,0,3,12]\nOutput: [1,3,12,0,0]\n\"\"\"\n\n\nclass Solution:\n def moveZeroes(self, a: List[int]) -> None:\n n = len(a)\n slow, fast = 0, 0\n while True:\n while slow < n and a[slow] != 0:\n slow += 1\n fast = slow + 1\n while fast < n and a[fast] == 0:\n fast += 1\n if fast == n or slow == n:\n break\n a[slow] = a[fast]\n a[fast] = 0","repo_name":"pkdism/leetcode","sub_path":"30-day-leetcoding-challenge/d4-move-zeros.py","file_name":"d4-move-zeros.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32933829943","text":"from monero_glue.compat.collections import namedtuple\nfrom monero_glue.xmr import crypto\nfrom monero_glue.xmr.sub.xmr_net import (\n NetworkTypes,\n net_version,\n MainNet,\n TestNet,\n StageNet,\n)\n\n\nPubAddress = namedtuple(\"PubAddress\", (\"spend_public_key\", \"view_public_key\"))\n\n\nclass AddrInfo(object):\n def __init__(self, ver=None, data=None, payment_id=None):\n self.view_key = None\n self.spend_key = None\n self.net_type = None\n self.is_sub_address = None\n self.is_integrated = None\n self.payment_id = payment_id\n self.addr = None\n self.base_addr = None\n if ver is not None and data is not None:\n self.set_addr(ver, data, self.payment_id)\n\n def set_addr(self, ver, data, payment_id=None):\n self.net_type = get_addr_type(ver)\n self.is_sub_address = is_subaddress(ver)\n self.is_integrated = is_integrated(ver)\n self.spend_key = data[0:32]\n self.view_key = data[32:64]\n if self.is_integrated:\n self.payment_id = data[64:]\n else:\n self.payment_id = payment_id\n self.recompute_addr()\n return self\n\n def recompute_addr(self):\n addr = build_address(self.spend_key, self.view_key)\n self.base_addr = public_addr_encode(addr, self.is_sub_address, self.net_type)\n self.addr = public_addr_encode(\n addr, self.is_sub_address, self.net_type, self.payment_id\n )\n return self\n\n def recompute_sub(self, spend_key, view_key, major=0, minor=0):\n self.spend_key = spend_key\n self.view_key = view_key\n self.is_sub_address = major != 0 and minor != 0\n self.recompute_addr()\n\n\ndef addr_to_hash(addr):\n \"\"\"\n Creates hashable address representation\n :param addr:\n :return:\n \"\"\"\n return bytes(addr.spend_public_key + addr.view_public_key)\n\n\ndef encode_addr(version, spend_pub, view_pub, payment_id=None):\n \"\"\"\n Encodes public keys as versions\n :param version:\n :param spend_pub:\n :param view_pub:\n :param payment_id:\n :return:\n \"\"\"\n buf = spend_pub + view_pub\n if payment_id:\n buf += bytes(payment_id)\n return crypto.xmr_base58_addr_encode_check(ord(version), bytes(buf))\n\n\ndef decode_addr(addr):\n \"\"\"\n Given address, get version and public spend and view keys.\n\n :param addr:\n :return:\n \"\"\"\n d, version = crypto.xmr_base58_addr_decode_check(bytes(addr))\n return AddrInfo(version, d)\n\n\ndef build_address(spend_key, view_key):\n \"\"\"\n Builds address compatible object from byte encoded keys\n :param spend_key:\n :param view_key:\n :return:\n \"\"\"\n return PubAddress(spend_key, view_key)\n\n\ndef build_address_encode(spend_key, view_key):\n \"\"\"\n Builds address compatible object from object keys\n :param spend_key:\n :param view_key:\n :return:\n \"\"\"\n return PubAddress(crypto.encodepoint(spend_key), crypto.encodepoint(view_key))\n\n\ndef public_addr_encode(\n pub_addr, is_sub=False, net=NetworkTypes.MAINNET, payment_id=None\n):\n \"\"\"\n Encodes public address to Monero address\n :param pub_addr:\n :type pub_addr: apps.monero.xmr.serialize_messages.addr.AccountPublicAddress\n :param is_sub:\n :param net:\n :param payment_id: for integrated address\n :return:\n \"\"\"\n if payment_id and len(payment_id) != 8:\n raise ValueError(\"Payment ID has to have exactly 8B for an integrated address\")\n net_ver = net_version(net, is_sub, payment_id is not None)\n return encode_addr(\n net_ver, pub_addr.spend_public_key, pub_addr.view_public_key, payment_id\n )\n\n\ndef classify_subaddresses(tx_dests, change_addr):\n \"\"\"\n Classify destination subaddresses\n void classify_addresses()\n :param tx_dests:\n :type tx_dests: list[apps.monero.xmr.serialize_messages.tx_construct.TxDestinationEntry]\n :param change_addr:\n :return:\n \"\"\"\n num_stdaddresses = 0\n num_subaddresses = 0\n single_dest_subaddress = None\n addr_set = set()\n for tx in tx_dests:\n if change_addr and addr_eq(change_addr, tx.addr):\n continue\n addr_hashed = addr_to_hash(tx.addr)\n if addr_hashed in addr_set:\n continue\n addr_set.add(addr_hashed)\n if tx.is_subaddress:\n num_subaddresses += 1\n single_dest_subaddress = tx.addr\n else:\n num_stdaddresses += 1\n return num_stdaddresses, num_subaddresses, single_dest_subaddress\n\n\ndef addr_eq(a, b):\n \"\"\"\n Address comparisson. Allocation free.\n :param a:\n :param b:\n :return:\n \"\"\"\n return (\n a.spend_public_key == b.spend_public_key\n and a.view_public_key == b.view_public_key\n )\n\n\ndef get_change_addr_idx(outputs, change_dts):\n \"\"\"\n Returns ID of the change output from the change_dts and outputs\n :param tsx_data:\n :return:\n \"\"\"\n if change_dts is None:\n return None\n\n change_idx = None\n change_coord = change_dts.amount, change_dts.addr\n for idx, dst in enumerate(outputs):\n if (\n change_coord\n and change_coord[0]\n and change_coord[0] == dst.amount\n and addr_eq(change_coord[1], dst.addr)\n ):\n change_idx = idx\n return change_idx\n\n\ndef is_integrated(ver):\n return ver in [\n MainNet.PUBLIC_INTEGRATED_ADDRESS_BASE58_PREFIX,\n TestNet.PUBLIC_INTEGRATED_ADDRESS_BASE58_PREFIX,\n StageNet.PUBLIC_INTEGRATED_ADDRESS_BASE58_PREFIX,\n ]\n\n\ndef is_subaddress(ver):\n return ver in [\n MainNet.PUBLIC_SUBADDRESS_BASE58_PREFIX,\n TestNet.PUBLIC_SUBADDRESS_BASE58_PREFIX,\n StageNet.PUBLIC_SUBADDRESS_BASE58_PREFIX,\n ]\n\n\ndef get_addr_type(ver):\n if ver in [\n MainNet.PUBLIC_ADDRESS_BASE58_PREFIX,\n MainNet.PUBLIC_INTEGRATED_ADDRESS_BASE58_PREFIX,\n MainNet.PUBLIC_SUBADDRESS_BASE58_PREFIX,\n ]:\n return NetworkTypes.MAINNET\n elif ver in [\n TestNet.PUBLIC_ADDRESS_BASE58_PREFIX,\n TestNet.PUBLIC_INTEGRATED_ADDRESS_BASE58_PREFIX,\n TestNet.PUBLIC_SUBADDRESS_BASE58_PREFIX,\n ]:\n return NetworkTypes.TESTNET\n elif ver in [\n StageNet.PUBLIC_ADDRESS_BASE58_PREFIX,\n StageNet.PUBLIC_INTEGRATED_ADDRESS_BASE58_PREFIX,\n StageNet.PUBLIC_SUBADDRESS_BASE58_PREFIX,\n ]:\n return NetworkTypes.STAGENET\n else:\n raise ValueError(\"Unknown address type\")\n","repo_name":"ph4r05/monero-agent","sub_path":"monero_glue/xmr/sub/addr.py","file_name":"addr.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"47"} +{"seq_id":"30067909327","text":"from typing import List\n\nimport pandas as pd\nimport pytest\n\nfrom gps_activity.abstract import AbstractNode\nfrom gps_activity.nodes import UnixtimeExtractor\n\n\n@pytest.fixture\ndef gps_timing_source_points() -> pd.DataFrame:\n return pd.DataFrame(\n {\n \"datetime\": [\n pd.Timestamp(\"2022-04-25 00:02:18\"),\n pd.Timestamp(\"2022-04-25 01:02:07\"),\n pd.Timestamp(\"2022-04-25 02:01:57\"),\n ],\n },\n )\n\n\n@pytest.fixture\ndef exptected_delta_sec() -> List[float]:\n return [3589.0, 3590.0]\n\n\n@pytest.fixture\ndef gps_timing_src_col() -> str:\n return \"datetime\"\n\n\n@pytest.fixture\ndef gps_timing_trgt_col() -> str:\n return \"unixtime\"\n\n\n@pytest.fixture\ndef unixtime_extractor(\n gps_timing_src_col: str,\n gps_timing_trgt_col: str,\n) -> UnixtimeExtractor:\n return UnixtimeExtractor(\n source_column=gps_timing_src_col,\n target_column=gps_timing_trgt_col,\n )\n\n\nclass TestUnixtimeExtractor:\n def __check_expected_time_delta(\n self,\n computed_gps: pd.DataFrame,\n exptected_delta_sec: List[float],\n ):\n delta_sec = computed_gps[\"unixtime\"] - computed_gps[\"unixtime\"].shift(1)\n delta_sec = delta_sec[1:].to_list()\n assert delta_sec == exptected_delta_sec\n\n def __conduct_test(\n self,\n computed_gps: pd.DataFrame,\n unixtime_extractor: UnixtimeExtractor,\n exptected_delta_sec: pd.Series,\n ):\n assert isinstance(unixtime_extractor, AbstractNode)\n self.__check_expected_time_delta(computed_gps, exptected_delta_sec)\n\n def test_transform_unixtime_extractor(\n self,\n unixtime_extractor: UnixtimeExtractor,\n gps_timing_source_points: pd.DataFrame,\n exptected_delta_sec: pd.Series,\n ):\n computed_gps = unixtime_extractor.transform(gps_timing_source_points)\n self.__conduct_test(\n computed_gps=computed_gps,\n unixtime_extractor=unixtime_extractor,\n exptected_delta_sec=exptected_delta_sec,\n )\n\n def test_fit_transform_unixtime_extractor(\n self,\n unixtime_extractor: UnixtimeExtractor,\n gps_timing_source_points: pd.DataFrame,\n exptected_delta_sec: pd.Series,\n ):\n computed_gps = unixtime_extractor.fit_transform(gps_timing_source_points)\n self.__conduct_test(\n computed_gps=computed_gps,\n unixtime_extractor=unixtime_extractor,\n exptected_delta_sec=exptected_delta_sec,\n )\n","repo_name":"WasteLabs/gps_activity","sub_path":"tests/nodes/test_timing.py","file_name":"test_timing.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34274829917","text":"import pytest\nimport os\nimport numpy as np\nimport torch\nimport tensorflow as tf\n\nif 'PATH_TO_OPEN3D_ML' in os.environ.keys():\n base = os.environ['PATH_TO_OPEN3D_ML']\nelse:\n base = '.'\n\n\ndef test_randlanet_torch():\n import open3d.ml.torch as ml3d\n\n net = ml3d.models.RandLANet(num_points=5000, num_classes=10, dim_input=6)\n net.device = 'cpu'\n\n data = {\n 'point':\n np.array(np.random.random((1000, 3)), dtype=np.float32),\n 'feat':\n np.array(np.random.random((1000, 3)), dtype=np.float32),\n 'label':\n np.array([np.random.randint(10) for i in range(1000)],\n dtype=np.int32)\n }\n attr = {'split': 'train'}\n\n data = net.preprocess(data, attr)\n inputs = net.transform(data, attr)\n inputs = {\n 'xyz': [torch.from_numpy(np.array([item])) for item in inputs['xyz']],\n 'neigh_idx': [\n torch.from_numpy(np.array([item])) for item in inputs['neigh_idx']\n ],\n 'sub_idx': [\n torch.from_numpy(np.array([item])) for item in inputs['sub_idx']\n ],\n 'interp_idx': [\n torch.from_numpy(np.array([item])) for item in inputs['interp_idx']\n ],\n 'features': torch.from_numpy(np.array([inputs['features']])),\n 'labels': torch.from_numpy(np.array([inputs['labels']]))\n }\n out = net(inputs).detach().numpy()\n\n assert out.shape == (1, 5000, 10)\n\n\ndef test_randlanet_tf():\n import open3d.ml.tf as ml3d\n\n net = ml3d.models.RandLANet(num_points=5000,\n num_classes=10,\n dim_input=6,\n num_layers=4)\n\n data = {\n 'point':\n np.array(np.random.random((1000, 3)), dtype=np.float32),\n 'feat':\n np.array(np.random.random((1000, 3)), dtype=np.float32),\n 'label':\n np.array([np.random.randint(10) for i in range(1000)],\n dtype=np.int32)\n }\n attr = {'split': 'train'}\n\n data = net.preprocess(data, attr)\n pc, feat, label, _ = ml3d.datasets.utils.trans_crop_pc(\n data['point'], data['feat'], data['label'], data['search_tree'], 0,\n 5000)\n\n inputs = net.transform(tf.convert_to_tensor(pc), tf.convert_to_tensor(feat),\n tf.convert_to_tensor(label))\n for i in range(18): # num_layers * 4 + 2\n inputs[i] = tf.expand_dims(inputs[i], 0)\n\n out = net(inputs).numpy()\n\n assert out.shape == (1, 5000, 10)\n\n\ndef test_kpconv_torch():\n import open3d.ml.torch as ml3d\n\n net = ml3d.models.KPFCNN(lbl_values=[0, 1, 2, 3, 4, 5],\n num_classes=4,\n ignored_label_inds=[0],\n in_features_dim=5)\n net.device = 'cpu'\n\n data = {\n 'point':\n np.array(np.random.random((1000, 3)), dtype=np.float32),\n 'feat':\n np.array(np.random.random((1000, 3)), dtype=np.float32),\n 'label':\n np.array([np.random.randint(5) for i in range(1000)],\n dtype=np.int32)\n }\n attr = {'split': 'train'}\n batcher = ml3d.dataloaders.ConcatBatcher('cpu')\n\n data = net.preprocess(data, attr)\n inputs = {'data': net.transform(data, attr), 'attr': attr}\n inputs = batcher.collate_fn([inputs])\n out = net(inputs['data']).detach().numpy()\n\n assert out.shape[1] == 5\n\n\ndef test_kpconv_tf():\n import open3d.ml.tf as ml3d\n\n net = ml3d.models.KPFCNN(lbl_values=[0, 1, 2, 3, 4, 5],\n num_classes=4,\n ignored_label_inds=[0],\n in_features_dim=5)\n\n data = {\n 'point':\n np.array(np.random.random((10000, 3)), dtype=np.float32),\n 'feat':\n np.array(np.random.random((10000, 3)), dtype=np.float32),\n 'label':\n np.array([np.random.randint(5) for i in range(10000)],\n dtype=np.int32)\n }\n attr = {'split': 'train'}\n\n data = net.preprocess(data, attr)\n p_list = tf.convert_to_tensor(data['point'][:1000])\n c_list = tf.convert_to_tensor(\n np.concatenate([data['point'][:1000], data['feat'][:1000]], axis=1))\n pl_list = tf.convert_to_tensor(data['label'][:1000])\n\n pi_list = tf.convert_to_tensor(\n np.array([i for i in range(1000)], dtype=np.int32))\n ci_list = tf.convert_to_tensor(np.array([0], dtype=np.int32))\n\n inputs = net.transform(\n p_list, c_list, pl_list,\n tf.convert_to_tensor(np.array([500, 500], dtype=np.int32)), pi_list,\n ci_list)\n\n out = net(inputs)\n\n assert out.shape == (1000, 5)\n\n\ndef test_pointpillars_torch():\n import open3d.ml.torch as ml3d\n from open3d.ml.utils import Config\n\n cfg_path = base + '/ml3d/configs/pointpillars_kitti.yml'\n cfg = Config.load_from_file(cfg_path)\n\n net = ml3d.models.PointPillars(**cfg.model, device='cpu')\n\n batcher = ml3d.dataloaders.ConcatBatcher('cpu', model='PointPillars')\n data = {\n 'point': np.array(np.random.random((10000, 4)), dtype=np.float32),\n 'calib': None,\n 'bounding_boxes': [],\n }\n data = net.preprocess(data, {'split': 'test'})\n data = net.transform(data, {'split': 'test'})\n data = batcher.collate_fn([{'data': data, 'attr': {'split': 'test'}}])\n\n net.eval()\n with torch.no_grad():\n results = net(data)\n boxes = net.inference_end(results, data)\n assert type(boxes) == list\n\n\ndef test_pointpillars_tf():\n import open3d.ml.tf as ml3d\n from open3d.ml.utils import Config\n\n cfg_path = base + '/ml3d/configs/pointpillars_kitti.yml'\n cfg = Config.load_from_file(cfg_path)\n\n net = ml3d.models.PointPillars(**cfg.model, device='cpu')\n\n data = [\n tf.constant(np.random.random((10000, 4)), dtype=tf.float32), None, None,\n [tf.constant(np.stack([np.eye(4), np.eye(4)], axis=0))]\n ]\n\n results = net(data, training=False)\n boxes = net.inference_end(results, data)\n\n assert type(boxes) == list\n","repo_name":"kylevedder/SparsePointPillars","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"47"} +{"seq_id":"43188292785","text":"N, M = (int(x) for x in input().split())\nS = input()\nT = input()\n\nval = 0\nif T.endswith(S):\n val += 1\n\nif T.startswith(S):\n val += 2\n\nprint(3-val)","repo_name":"hitochan777/kata","sub_path":"atcoder/abc322/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71082812943","text":"#A library to service common queries against the StarMetrics API\n\nimport urllib2, json, itertools\n\nproposalaccessallowed = False\napiurl = 'http://rd-dashboard.nitrd.gov/star/api/'\n# Check to see if we have access to nsfstarmetrics server\n# This is done once when the module is loaded\ntry:\n urllib2.urlopen(\"http://nsf.api.smetrics.org/v1/access\",None,2)\nexcept urllib2.URLError:\n pass\nelse:\n proposalaccessallowed = True\n apiurl = \"http://nsf.api.smetrics.org/v1/\"\n\n#legend placeholders\nlegend_topics = {} #this will come from the API\nlegend_divisions = {\n\"OCI\":\"Office of Cyberinfrastructure\",\n\"OGC\":\"Office of the General Counsel\",\n\"OIA\":\"Office of Integrative Activities\",\n\"OISE\":\"Office of International Science and Engineering\",\n\"ODI\":\"Office of Diversity and Inclusion (ODI)\",\n\"OLPA\":\"Office of Legislative & Public Affairs\",\n\"ANT\":\"Antarctic Sciences\",\n\"ARC\":\"Arctic Sciences\",\n\"AIL\":\"Antarctic Infrastructure and Logistics\",\n\"PEHS\":\"Office of Polar Environment, Health and Safety\",\n\"A/D\":\"Front Office\",\n\"NSB\":\"Office of the Assistant Director\",\n\"OIG\":\"Office of the Assistant Director\",\n\"MCB\":\"Division of Molecular & Cellular Biosciences\",\n\"DBI\":\"Division of Biological Infrastructure\",\n\"IOS\":\"Division of Integrative Organismal Systems\",\n\"DEB\":\"Division of Environmental Biology\",\n\"EF\":\"Emerging Frontiers Office\",\n\"CCF\":\"Division of Computing and Communication Foundations\",\n\"CNS\":\"Division of Computer and Network Systems\",\n\"IIS\":\"Division of Information and Intelligent Systems\",\n\"DRL\":\"Division of Research on Learning in Formal and Informal Settings\",\n\"DGE\":\"Division of Graduate Education\",\n\"HRD\":\"Division of Human Resource Development\",\n\"DUE\":\"Division of Undergraduate Education\",\n\"CBET\":\"Division of Chemical, Bioengineering, Environmental, and Transport Systems\",\n\"CMMI\":\"Division of Civil, Mechanical & Manufacturing Innovation\",\n\"ECCS\":\"Division of Electrical, Communications & Cyber Systems\",\n\"EEC\":\"Division of Engineering Education & Centers\",\n\"EFRI\":\"Office of Emerging Frontiers in Research & Innovation\",\n\"IIP\":\"Division of Industrial Innovation & Partnerships\",\n\"ENG\":\"Office of the Assistant Director\",\n\"AGS\":\"Division of Atmospheric and Geospace Sciences\",\n\"EAR\":\"Division of Earth Sciences\",\n\"OCE\":\"Division of Ocean Sciences\",\n\"GEO\":\"Office of the Assistant Director\",\n\"AST\":\"Division of Astronomical Sciences\",\n\"CHE\":\"Division of Chemistry\",\n\"DMR\":\"Division of Materials Research\",\n\"DMS\":\"Division of Mathematical Sciences\",\n\"PHY\":\"Division of Physics\",\n\"MPS\":\"Office of the Assistant Director\",\n\"SES\":\"Division of Social and Economic Sciences\",\n\"BCS\":\"Division of Behavioral and Cognitive Sciences\",\n\"NCSE\":\"National Center for Science and Engineering Statistics\",\n\"SMA\":\"SBE Office of Multidisciplinary Activities\",\n\"SBE\":\"Office of the Assistant Director\",\n\"BD\":\"Budget Division\",\n\"DACS\":\"Division of Acquisition and Cooperative Support\",\n\"DFM\":\"Division of Financial Management\",\n\"DGA\":\"Division of Grants & Agreements\",\n\"DIAS\":\"Division of Institution and Award Support\",\n\"HRM\":\"Division of Human Resource Management\",\n\"DIS\":\"Division of Information Systems\",\n\"DAS\":\"Division of Administrative Services\",\n\"EPSCoR\":\"Office of Experimental Program To Stimulate Competitive Research\",\n\"EPS\":\"Office of Experimental Program to Stimulate Competitive Research\"\n}\n\n#RESOURCES\n#exposed methods to retrieve data\n#define a topics resource\ndef topics(options=None):\n #set up options\n if options is None:\n options = {}\n #initialized\n if 'org' not in options:\n options['org'] = ''\n if 'year' not in options:\n options['year'] = ''\n #retrieve\n t1_data = getTopics(options,'t1')\n t2_data = getTopics(options,'t2')\n t3_data = getTopics(options,'t3')\n t4_data = getTopics(options,'t4')\n collated = collateTopics(t1_data,t2_data,t3_data,t4_data)\n #set topic legends\n result = setTopicLegend(collated)\n\n return result\n\n#topics legend resource\ndef topiclegends():\n if len(legend_topics)==0:\n return getTopicLegend()\n else:\n #return\n return legend_topics\n \n#define a programs resource\ndef programs(options=None):\n #set up options\n if options is None:\n options = {}\n #initialized\n if 'org' not in options:\n options['org'] = ''\n if 'year' not in options:\n options['year'] = ''\n if 't' not in options:\n options['t'] = ''\n #retrieve\n data = getPrograms(options)\n result = setProgramLegend(data)\n\n return result\n\n#program legend resource\ndef programlegends(options):\n #options is a comma separated list of pge codes\n return getProgramLegend(options)\n \n#define a divisions resource\ndef divisions(options=None):\n #set up options\n if options is None:\n options = {}\n #initialized\n if 'org' not in options:\n options['org'] = ''\n if 'year' not in options:\n options['year'] = ''\n if 't' not in options:\n options['t'] = ''\n #retrieve\n data = getDivisions(options)\n result = setDivisionLegend(data)\n\n return result\n\n#define a proposals resource\ndef proposals(options=None):\n #set up options\n if options is None:\n options = {}\n #initialized\n if 'org' not in options:\n options['org'] = ''\n if 'year' not in options:\n options['year'] = ''\n if 't' not in options:\n options['t'] = ''\n #retrieve\n result = getProposals(options)\n\n return result\n\n#define a proposal resource\ndef proposal(propid):\n result = getProposal(propid)\n\n return result\n\n#define a institutions resource\ndef institutions(options=None):\n #set up options\n if options is None:\n options = {}\n #initialized\n if 'org' not in options:\n options['org'] = ''\n if 'year' not in options:\n options['year'] = ''\n if 't' not in options:\n options['t'] = ''\n #retrieve\n result = getInstitutions(options)\n\n return result\n\n#define a institution resource\ndef institution(instid):\n result = getInstitution(instid)\n\n return result\n\n#define a researchers resource\ndef researchers(options=None):\n #set up options\n if options is None:\n options = {}\n #initialized\n if 'org' not in options:\n options['org'] = ''\n if 'year' not in options:\n options['year'] = ''\n if 't' not in options:\n options['t'] = ''\n if 'id' not in options:\n options['id'] = ''\n #retrieve\n result = getResearchers(options)\n\n return result\n\n#define a researcher resource\ndef researcher(researcherid):\n result = getResearcher(researcherid)\n\n return result\n\n#Lower level functions to service the resources\n#topics data handling\ndef getTopics(params,topicrelevance):\n params['summ'] = 'status,year,'+topicrelevance #t1,t2,t3,t4\n try:\n url = apiurl+'topic?'+toParam(params)\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n rawdata = data[\"data\"]\n\n #make a list of the years\n years = [item['year'] for item in rawdata if 'year' in item]\n years = sorted(set(years))\n\n #prepare data\n #group by t\n grouped = {}\n rawdata = sorted(rawdata, key = lambda row: row[topicrelevance] if topicrelevance in row else None)\n for key, group in itertools.groupby(rawdata, lambda row: row[topicrelevance] if topicrelevance in row else None):\n grouped[key] = [thing for thing in group] if group is not None else []\n #now assemble\n collated = []\n for t, group in grouped.iteritems():\n if t is not None:\n topicid = t\n tmp = {'t':topicid,'count':{'award':0,'decline':0,'other':0},'funding':{'award':0,'request':0}}\n #now reduce\n for row in group:\n #counts and funding\n count_awarded = 0\n count_declined = 0\n count_other = 0\n funding_awarded = 0\n funding_requested = 0\n if row[\"status\"]==\"award\":\n funding_awarded = row[\"awarded_dollar\"]\n count_awarded = row[\"count\"]\n elif row[\"status\"]==\"decline\":\n count_declined = row[\"count\"]\n else:\n count_other = row[\"count\"] \n if \"request_dollar\" in row: funding_requested = row[\"request_dollar\"]\n #save\n tmp['count']['award'] += count_awarded\n tmp['count']['decline'] += count_declined\n tmp['count']['other'] += count_other\n tmp['funding']['award'] += funding_awarded\n tmp['funding']['request'] += funding_requested\n\n topic_by_year = {}\n for year in years:\n filtered = filter(lambda item: item['year']==year, group) \n year_tmp = {'count':{'award':0,'decline':0,'other':0},'funding':{'award':0,'request':0}} \n for row in filtered:\n awarded_count = 0\n declined_count = 0\n other_count = 0\n awarded_dollar = 0\n requested_dollar = 0\n if row['status']=='award':\n awarded_count = row[\"count\"]\n awarded_dollar = row[\"awarded_dollar\"]\n elif row['status']=='decline':\n declined_count = row[\"count\"]\n requested_dollar = row[\"requested_dollar\"]\n else:\n other_count = row[\"count\"]\n #save\n year_tmp['count']['award'] += count_awarded\n year_tmp['count']['decline'] += count_declined\n year_tmp['count']['other'] += count_other\n year_tmp['funding']['award'] += funding_awarded\n year_tmp['funding']['request'] += funding_requested\n\n topic_by_year[year] = year_tmp\n\n tmp['years'] = topic_by_year\n\n #save it\n collated.append(tmp) \n\n #make an array hash which is much faster than an array for searching\n data_hash = {}\n for row in collated:\n data_hash[row['t']] = row\n\n #return\n return data_hash \n\n#collate topics\n#take the data from 4 relevance calls and put it all together\ndef collateTopics(t1_data,t2_data,t3_data,t4_data):\n topicsbyrelevance = {'t1':t1_data,'t2':t2_data,'t3':t3_data,'t4':t4_data}\n loaded_topicids = []\n loaded_topics = [] #this will end up looking like [ { t:topicid, label: label, words: words, t1: {count: count, etc. }, t2: {count: count, etc. }}]\n \n loaded_topicids.extend(t1_data.keys())\n loaded_topicids.extend(t2_data.keys())\n loaded_topicids.extend(t3_data.keys())\n loaded_topicids.extend(t4_data.keys())\n \n #unqiue\n unique_topicids = set(loaded_topicids)\n #using the unique list of retrieved topic ids\n for topicid in unique_topicids:\n tmp = {'t':topicid, 'label':None, 'words':None}\n #for each relevance\n for topicrelevance in range(1,4):\n topicrelevance = 't'+str(topicrelevance)\n if topicid in topicsbyrelevance[topicrelevance]:\n topic = topicsbyrelevance[topicrelevance][topicid]\n else:\n topic = {}\n tmp[topicrelevance] = topic\n loaded_topics.append(tmp)\n \n return loaded_topics\n\n#set topic legend\ndef setTopicLegend(topics):\n #load topiclegends if not loaded\n if len(legend_topics)==0:\n getTopicLegend()\n return applyTopicLegend(topics)\n else:\n return applyTopicLegend(topics)\n\n#get topic legends\ndef getTopicLegend():\n try:\n f = urllib2.urlopen(apiurl+'topic?legend=topic')\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n for item in data:\n legend_topics[item[\"topic\"]] = {\"words\":item[\"words\"],\"label\":item[\"label\"]}\n \n#apply topic legends\ndef applyTopicLegend(topics):\n for topic in topics:\n topicid = topic['t']\n #words and labels\n if 'label' not in legend_topics[topicid]:\n label = 'Not Electronically Readable'\n else:\n label = legend_topics[topicid][\"label\"]\n if 'words' not in legend_topics[topicid]:\n words = ''\n else:\n words = legend_topics[topicid][\"words\"]\n\n #set\n topic['label'] = label\n topic['words'] = words\n\n return topics\n\n#programs data handling\ndef getPrograms(params):\n params['summ'] = 'status,year,pge'\n try:\n url = apiurl+'topic?'+toParam(params)\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n rawdata = data[\"data\"]\n\n #make a list of the years\n years = [item['year'] for item in rawdata if 'year' in item]\n years = sorted(set(years))\n\n #prepare data\n #group by pge\n grouped = {}\n rawdata = sorted(rawdata, key = lambda row: row['pge'])\n for key, group in itertools.groupby(rawdata, lambda row: row['pge']):\n grouped[key] = [thing for thing in group]\n #now assemble\n collated = []\n for pge, group in grouped.iteritems():\n tmp = {'pge':pge,'count':{'award':0,'decline':0,'other':0},'funding':{'award':0,'request':0}}\n #now reduce\n for row in group:\n #counts and funding\n count_awarded = 0\n count_declined = 0\n count_other = 0\n funding_awarded = 0\n funding_requested = 0\n if row[\"status\"]==\"award\":\n funding_awarded = row[\"awarded_dollar\"]\n count_awarded = row[\"count\"]\n elif row[\"status\"]==\"decline\":\n count_declined = row[\"count\"]\n else:\n count_other = row[\"count\"] \n if \"request_dollar\" in row: funding_requested = row[\"request_dollar\"]\n #save\n tmp['count']['award'] += count_awarded\n tmp['count']['decline'] += count_declined\n tmp['count']['other'] += count_other\n tmp['funding']['award'] += funding_awarded\n tmp['funding']['request'] += funding_requested\n\n pge_by_year = {}\n for year in years:\n filtered = filter(lambda item: item['year']==year, group) \n year_tmp = {'count':{'award':0,'decline':0,'other':0},'funding':{'award':0,'request':0}} \n for row in filtered:\n awarded_count = 0\n declined_count = 0\n other_count = 0\n awarded_dollar = 0\n requested_dollar = 0\n if row['status']=='award':\n awarded_count = row[\"count\"]\n awarded_dollar = row[\"awarded_dollar\"]\n elif row['status']=='decline':\n declined_count = row[\"count\"]\n requested_dollar = row[\"requested_dollar\"]\n else:\n other_count = row[\"count\"]\n #save\n year_tmp['count']['award'] += count_awarded\n year_tmp['count']['decline'] += count_declined\n year_tmp['count']['other'] += count_other\n year_tmp['funding']['award'] += funding_awarded\n year_tmp['funding']['request'] += funding_requested\n\n pge_by_year[year] = year_tmp\n\n tmp['years'] = pge_by_year\n\n #save it\n collated.append(tmp) \n\n #return\n return collated \n\n#set program legend\ndef setProgramLegend(programs):\n pges = []\n for program in programs:\n pges.append(program['pge'])\n\n #set program legends\n legend = getProgramLegend(','.join(pges))\n return applyProgramLegend(programs,legend)\n\n#get program legends\ndef getProgramLegend(pges):\n try:\n f = urllib2.urlopen(apiurl+'prop?legend=nsf_pge&q='+pges)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n\n return data\n \n#apply program legends\ndef applyProgramLegend(programs,legend):\n #set the labels\n for program in programs:\n pge = filter(lambda item: item['nsf_pge']==program['pge'], legend)\n program['label'] = ''\n if pge:\n program['label'] = pge[0]['label']\n\n return programs \n\n#divisions data handling\ndef getDivisions(params):\n params['summ'] = 'status,year,org'\n try:\n url = apiurl+'topic?'+toParam(params)\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n rawdata = data[\"data\"]\n\n #make a list of the years\n years = [item['year'] for item in rawdata if 'year' in item]\n years = sorted(set(years))\n\n #prepare data\n #group by org\n grouped = {}\n rawdata = sorted(rawdata, key = lambda row: row['org'])\n for key, group in itertools.groupby(rawdata, lambda row: row['org']):\n grouped[key] = [thing for thing in group]\n #now assemble\n collated = []\n for org, group in grouped.iteritems():\n tmp = {'org':org,'count':{'award':0,'decline':0,'other':0},'funding':{'award':0,'request':0}}\n #now reduce\n for row in group:\n #counts and funding\n count_awarded = 0\n count_declined = 0\n count_other = 0\n funding_awarded = 0\n funding_requested = 0\n if row[\"status\"]==\"award\":\n funding_awarded = row[\"awarded_dollar\"]\n count_awarded = row[\"count\"]\n elif row[\"status\"]==\"decline\":\n count_declined = row[\"count\"]\n else:\n count_other = row[\"count\"] \n if \"request_dollar\" in row: funding_requested = row[\"request_dollar\"]\n #save\n tmp['count']['award'] += count_awarded\n tmp['count']['decline'] += count_declined\n tmp['count']['other'] += count_other\n tmp['funding']['award'] += funding_awarded\n tmp['funding']['request'] += funding_requested\n\n org_by_year = {}\n for year in years:\n filtered = filter(lambda item: item['year']==year, group) \n year_tmp = {'count':{'award':0,'decline':0,'other':0},'funding':{'award':0,'request':0}} \n for row in filtered:\n awarded_count = 0\n declined_count = 0\n other_count = 0\n awarded_dollar = 0\n requested_dollar = 0\n if row['status']=='award':\n awarded_count = row[\"count\"]\n awarded_dollar = row[\"awarded_dollar\"]\n elif row['status']=='decline':\n declined_count = row[\"count\"]\n requested_dollar = row[\"requested_dollar\"]\n else:\n other_count = row[\"count\"]\n #save\n year_tmp['count']['award'] += count_awarded\n year_tmp['count']['decline'] += count_declined\n year_tmp['count']['other'] += count_other\n year_tmp['funding']['award'] += funding_awarded\n year_tmp['funding']['request'] += funding_requested\n\n org_by_year[year] = year_tmp\n\n tmp['years'] = org_by_year\n\n #save it\n collated.append(tmp) \n\n #return\n return collated \n\n#set division legends\ndef setDivisionLegend(divisions):\n for division in divisions:\n org = division['org']\n #labels\n if org not in legend_divisions:\n label = 'Not Available'\n else:\n label = legend_divisions[org]\n\n #set\n division['label'] = label\n \n return divisions \n\n#get proposals\ndef getProposals(params):\n params['page'] = 'grant'\n try:\n url = apiurl+'topic?'+toParam(params)\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n\n return data\n\n#get researchers\ndef getResearchers(params):\n params['page'] = 'pi'\n try:\n url = apiurl+'topic?'+toParam(params)\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n\n return data\n\n#get institutions\ndef getInstitutions(params):\n params['page'] = 'org'\n try:\n url = apiurl+'topic?'+toParam(params)\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n\n return data\n\n#get proposal\ndef getProposal(id):\n try:\n url = apiurl+\"prop?id=\"+id\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n\n return data\n\n#get researcher\ndef getResearcher(id):\n try:\n url = apiurl+\"user?id=\"+id\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n\n return data\n\n#get institution\ndef getInstitution(id):\n try:\n url = apiurl+\"org?id=\"+id\n f = urllib2.urlopen(url)\n except urllib2.URLError:\n pass\n else:\n data = json.loads(f.read())\n f.close()\n\n return data\n\n#general use functions\ndef toParam(obj):\n str = ''\n separator = '&'\n for key, val in obj.iteritems():\n if val:\n if str != '':\n str += separator\n str += key + '=' + val\n return str","repo_name":"krishenn/sdk","sub_path":"sm.py","file_name":"sm.py","file_ext":"py","file_size_in_byte":22573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"3313104619","text":"import os\nimport raumcheck\nimport discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nimport datetime\n\nload_dotenv()\nTOKEN = os.getenv(\"DISCORD_TOKEN\")\nFOOTER = os.getenv(\"FOOTER\")\n\nbot = commands.Bot((\"raum \", \"Raum \"))\n\n@bot.event\nasync def on_ready():\n print(\"{} is running\".format(bot.user.name))\n\n@bot.event\nasync def on_command_error(ctx, error):\n print(\"Error ({}) on command: {}\".format(error, ctx.message.content))\n\n@bot.command()\nasync def find(ctx, query, date_str=None):\n \"\"\"Finds free rooms near the given query string (levenshtein).\"\"\"\n print(\"{}: {}\".format(ctx.message.author, ctx.message.content))\n query = query.upper()\n async with ctx.typing():\n try:\n date = get_date(date_str)\n except ValueError as e:\n await ctx.send(embed=discord.Embed(description=e.args[0]))\n return\n\n date_display = \"heute\" if date_str is None else \"am {}\".format(date) \n\n rooms = raumcheck.get_sorted_rooms(query)\n free_rooms = {}\n for room in rooms:\n events = raumcheck.get_availability(room, date)\n if len(events) == 0:\n free_rooms[room] = events\n if len(free_rooms) == 5:\n break\n \n if len(free_rooms) == 0:\n description = \"Ich konnte keine freien Räume finden :(\"\n else:\n description = \"Ich habe folgende Räume gefunden, die {} nicht belegt sind:\\n- **{}**\".format(date_display, \"**\\n- **\".join(free_rooms.keys()))\n \n embed = discord.Embed(description=description)\n embed.set_footer(text=FOOTER)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def check(ctx, room, date_str=None):\n \"\"\"Checks the given room for availability.\"\"\"\n print(\"{}: {}\".format(ctx.message.author, ctx.message.content))\n room = room.upper()\n try:\n date = get_date(date_str)\n except ValueError as e:\n await ctx.send(embed=discord.Embed(description=e.args[0]))\n return\n\n date_display = \"heute\" if date_str is None else \"am {}\".format(date)\n\n async with ctx.typing():\n events = raumcheck.get_availability(room, date)\n\n if events is None:\n embed = discord.Embed(description=\"Der Raum **{}** konnte nicht im Raumplan gefunden werden.\".format(room))\n elif len(events) == 0:\n embed = discord.Embed(description=\"Der Raum **{}** ist {} nicht belegt.\".format(room, date_display))\n else:\n embed = discord.Embed(description=\"Der Raum **{}** ist {} zu folgenden Uhrzeiten belegt:\".format(room, date_display))\n for event in events:\n embed.add_field(name=\"{} bis {}\".format(event[\"start\"], event[\"end\"]), value=event[\"name\"], inline=False)\n\n embed.set_footer(text=FOOTER)\n await ctx.send(embed=embed)\n\nWEEKDAYS = {\n \"mo\": 0,\n \"montag\": 0,\n \"di\": 1,\n \"dienstag\": 1,\n \"mi\": 2,\n \"mittwoch\": 2,\n \"do\": 3,\n \"donnerstag\": 3,\n \"fr\": 4,\n \"freitag\": 4,\n \"sa\": 5,\n \"samstag\": 5,\n \"so\": 6,\n \"sonntag\": 6,\n}\n\ndef get_date(date_str):\n \"\"\"\n Returns one of the following:\n - today's date if the given string is None or 'heute'\n - tomorrow's date if the given string is 'morgen'\n - the date of given weekday if the string date is in the keys of WEEKDAYS\n - the given date, adjusted to fit the format\n \n The format will always be DD.MM.YY. TODO docstring\n \"\"\"\n if date_str is not None:\n date_str = date_str.lower()\n \n if date_str is None or date_str == \"heute\":\n date = datetime.date.today().strftime('%d.%m.%Y')\n elif date_str == \"morgen\":\n date = (datetime.date.today() + datetime.timedelta(days=1)).strftime('%d.%m.%Y')\n elif date_str in WEEKDAYS.keys():\n today = datetime.date.today()\n days_ahead = (WEEKDAYS[date_str] - today.weekday()) % 7\n date = (today + datetime.timedelta(days=days_ahead)).strftime('%d.%m.%Y')\n else:\n try:\n datetime.datetime.strptime(date_str, '%d.%m.%Y')\n except ValueError:\n raise ValueError(\"Bitte gib ein gültiges Datum im Format DD.MM.YYYY an (oder heute, morgen oder ein Wochentag).\")\n\n date = date_str\n\n # Convert from DD.MM.YYYY to DD.MM.YY (results of strftime())\n if len(date) == 10:\n date = date[:-4] + date[-2:]\n\n return date\n\nbot.run(TOKEN)\n","repo_name":"tim-kt/raum-check","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"18462283537","text":"ALLOW_STRING = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\nbase = len(ALLOW_STRING)\n\ndef encode(number):\n result = []\n\n if(number == 0):\n return ALLOW_STRING[number]\n\n while(number > 0):\n result.append(ALLOW_STRING[number % base])\n number = number // base\n result.reverse()\n\n return ''.join(result)\n\n\ndef decode(input):\n counter, decoded = 1, 0\n input_length = len(input)\n\n for character in input:\n decoded += ALLOW_STRING.find(character) * pow(base, input_length-counter)\n counter += 1\n\n return decoded\n","repo_name":"scsmla/URL-Shortner","sub_path":"service/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4757268776","text":"# c++ version pse based on opencv 3+\nfrom pse import decode as pse_decode\nfrom cfglib.config import config as cfg\n\n\nclass TextDetector(object):\n\n def __init__(self, model):\n # evaluation mode\n self.model = model\n model.eval()\n # parameter\n self.scale = cfg.scale\n self.threshold = cfg.threshold\n\n def detect(self, image, img_show):\n # get model output\n preds = self.model.forward(image)\n preds, boxes, contours = pse_decode(preds[0], self.scale, self.threshold)\n\n output = {\n 'image': image,\n 'tr': preds,\n 'bbox': boxes\n }\n return contours, output\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"GXYM/TextBPN-Plus-Plus","sub_path":"util/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"47"} +{"seq_id":"7689406443","text":"from django.shortcuts import render\r\nfrom Ehub.models import *\r\nfrom Mvfx.models import *\r\nfrom Msfx.models import *\r\nfrom Mgraphics.models import *\r\nfrom django.contrib import messages\r\nfrom django.http import JsonResponse,HttpResponse\r\nimport json \r\n\r\nfrom django.contrib.auth.decorators import login_required \r\nfrom Ehub import *\r\n\r\n# Create your views here.\r\n#VFX SECTION \r\ndef graphicsproducts(request):\r\n\tif request.user.is_authenticated:\r\n\t\t\tgetfreegraphicsproducts=FreeGraphicsProduct.objects.all() \r\n\t\t\tgetpaidgraphicsproducts=PaidGraphicsProduct.objects.all() \r\n\r\n\t\t\tgetprofile = request.user.profile\r\n\t\t\torder, created = Order.objects.get_or_create(profile=getprofile, complete=False)\r\n\t\t\tpurchase, created = PurchasedProduct.objects.get_or_create(profile=getprofile, complete=False)\r\n\r\n\telse:\r\n\t\t\torder={}\r\n\t\r\n\t\t\tgetfreegraphicsproducts=FreeGraphicsProduct.objects.all() \r\n\t\t\tgetpaidgraphicsproducts=PaidGraphicsProduct.objects.all() \r\n\r\n\t\t \r\n\tcontext={ \r\n\t\t\t 'order':order,\r\n\t\t\t 'getfreegraphicsproducts':getfreegraphicsproducts,\r\n\t\t\t 'getpaidgraphicsproducts':getpaidgraphicsproducts,\r\n\t\t\t }\r\n\treturn render(request, 'Graphics/landscapes.html',context)\r\n\r\ndef updatepaidgraphicsproducts(request):\r\n\t\tprint('Graphics(s) loaded to the API..')\r\n\t\tdata = json.loads(request.body) \r\n\t\tproductName = data['productName'] \r\n\t\tpublisherId=data['publisherId']\r\n\r\n\t\taction = data['action']\r\n\t\tprint('action:', action) \r\n\t\tprint('productName:', productName) \r\n\r\n\t\tgetprofile = request.user.profile\r\n\r\n\t\tgetpaidgraphicsproduct=PaidGraphicsProduct.objects.get(name=productName)\r\n\t\torder, created = Order.objects.get_or_create(profile=getprofile, complete=False)\r\n\t\tpurchase, created = PurchasedProduct.objects.get_or_create(profile=getprofile, complete=False)\r\n\t\torderpaidgraphicsproduct, created = OrderPaidGraphicsProduct.objects.get_or_create(profile=getprofile, order=order, addtoDpage=purchase, product=getpaidgraphicsproduct,published_by=publisherId,complete=False)\r\n\t\tprint('adnajdak')\r\n\t\tprint(orderpaidgraphicsproduct)\r\n\t\tif action == 'adpdt':\r\n\t\t\tif orderpaidgraphicsproduct.quantity==0:\r\n\t\t\t\torderpaidgraphicsproduct.quantity = (orderpaidgraphicsproduct.quantity + 1)\r\n\t\t\telif orderpaidgraphicsproduct.quantity==1:\r\n\t\t\t\torderpaidgraphicsproduct.quantity =orderpaidgraphicsproduct.quantity \r\n\t\t\tprint(orderpaidgraphicsproduct.quantity)\t\r\n \r\n\t\tif action == 'rmvpdt':\r\n\t\t\torderpaidgraphicsproduct.quantity = (orderpaidgraphicsproduct.quantity - 1)\r\n\r\n\t\torderpaidgraphicsproduct.save()\r\n\r\n\t\tif orderpaidgraphicsproduct.quantity <=0: \r\n\t\t\torderpaidgraphicsproduct.delete()\r\n \r\n\t\t\tprint(orderpaidgraphicsproduct.quantity)\t \r\n\r\n\t\treturn JsonResponse('Product was added', safe=False)\r\n\r\n\r\n\t\t\t\r\n\r\n","repo_name":"Sam231221/Dimensional-Illusions-EcommerceV1","sub_path":"Mgraphics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42318806705","text":"import asyncio\n\nfrom commands import Context, Module\n\nmodule = Module()\n\n\nTRACKMAKER = [\n \"mai hausu is danbooru\",\n \"NEW hausu erekutoro hausu\",\n \"Torakku doraibaa tsuukin rasshu\",\n \"Appu na roodo wa tenshon MAX\",\n \"Hausu dasuto non fikkushon\",\n \"Kafunshou ni wa masuku meron\",\n \"Kaasoru matte yo kurakushon\",\n \"Hora kurikku ririkku torakku meikaa\",\n \"\",\n \"oshare no kyokuchi da fasshon sentaa\",\n \"Ryoute wo kakagete kurappyohenza\",\n \"Jaaji ni waishatsu zettai hen da\",\n \"M.I.D.I torakku meikaa\",\n \"Oshare no seichi da fasshon sentaa\",\n \"Ryoute wo kakagete kurappyohenza\",\n \"Nouki wa ashita da zettai tetsuya\",\n \"Ebidei ebinai torakku meikaa\",\n \"\",\n \"torakku meikaa kurappyohenza\",\n \"Torakku meikaa kurappyohenza\",\n]\n\n\n@module.command()\nasync def kurappyohenza(ctx: Context):\n \"\"\"Echoes the given message.\"\"\"\n for line in TRACKMAKER:\n await ctx.say(line)\n await asyncio.sleep(2)\n","repo_name":"pckv/djulkalender-bot","sub_path":"djulkalenderbot/modules/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9364071382","text":"n = 100000000000000000000000000000000000\nm = 0\nr = str\ns = 0\nc = 0\nwhile r != 's':\n v = int(input('Digite um número: '))\n s += v\n c += 1\n if v > m:\n m = v\n if v < n:\n n = v\n r = str(input('Queres parar [S/N]: ')).upper().strip()\nmedia = s / c\nprint('A média foi {}, o valor mais alto foi {} e o mais baixo {}.'.format(media, m, n))","repo_name":"pedrovs16/PythonEx","sub_path":"ex065.py","file_name":"ex065.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18677289938","text":"\n\n\nfrom . import TechArticlesSentenceTokenizer\n\nfrom . import TechArticlesWordTokenizer\nfrom . import TechArticlesCleaner\nfrom . import TechArticlesPreprocessor\n\nimport logging\n\n\n\n\n\nclass DefaultTokenizer:\n\n def __init__(self, sentence_tokenizer=None, word_tokenizer=None, preprocessor=None):\n self.sentence_tokenizer = TechArticlesSentenceTokenizer() if not sentence_tokenizer else sentence_tokenizer\n self.preprocessor = TechArticlesPreprocessor() if not preprocessor else preprocessor\n self.word_tokenizer = TechArticlesWordTokenizer(self.preprocessor) if not word_tokenizer else word_tokenizer\n self.articles_cleaner = TechArticlesCleaner()\n\n\n\n def tokenize_ddf(self, articleDF):\n\n texts = []\n\n\n def tokenize (row):\n texts.append(self.tokenize_doc(row['title'],row['text']))\n\n if (len(texts) % 100 == 0):\n logging.info(\"Processed {} texts\".format(len(texts)))\n return row\n\n\n logging.info(\"Tokenizing documents... this might take a while\")\n logging.info(\"ArticleDF has {} rows \".format(len(articleDF)))\n\n articleDF['article_txt'] = articleDF['title'].map(str)+\".\\n\"+ articleDF['text']\n articleDF['article_tokens'] = articleDF['article_txt'].apply(lambda x: self.tokenize_fulldoc(x, do_log=True))\n\n\n return articleDF['article_tokens']\n\n def tokenize_doc(self, title, doc, do_lemma=True):\n if title:\n return self.tokenize_fulldoc(title + \". \\n\" + doc, do_lemma=do_lemma)\n else:\n return self.tokenize_fulldoc(doc, do_lemma=do_lemma)\n\n def tokenize_fulldoc(self, all_doc, do_lemma=True, do_log=False):\n words = self.word_tokenizer.tokenize_fulldoc(all_doc, do_lemma=do_lemma, do_log=do_log)\n\n return words\n\n def clean_text(self, text):\n\n text = self.articles_cleaner.do_clean(text)\n text = self.sentence_tokenizer.clean_sentences(text)\n result = \" \".join(text)\n return result\n\n\ndefaultTokenizer = DefaultTokenizer()","repo_name":"diegoami/newscollection","sub_path":"technews_nlp_aggregator/nlp_model/common/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"25621045967","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef get_shapeletinfo_from_name(name_shap):\n \"\"\"\n Method to get a dictionnary that describes the shapelet\n\n Parameters\n ----------\n name_shap : name of the shapelet\n\n Returns\n -------\n dictionary\n \"\"\"\n name_shap_split = name_shap.split('#')\n shapelet_info= {}\n shapelet_info['ts'] = int(name_shap_split[1])\n shapelet_info['pos'] = int(name_shap_split[2].split('-')[0])\n shapelet_info['length'] = int(name_shap_split[2].split('-')[1])\n return shapelet_info\n\n\ndef get_shapelet(df_ts, shapelet_info):\n \"\"\"\n Method to get the shapelet\n\n Parameters\n ----------\n df_ts : dataframe that contains the time series in which the shapelet has been drawn\n shapelet_info : dictionary that describes the shapelet\n\n Returns\n -------\n numpy array, shape (1, length of shapelet)\n \"\"\"\n ts = shapelet_info['ts']\n pos = shapelet_info['pos']\n L = shapelet_info['length']\n return df_ts.loc[ts][pos:pos + L]\n\n\ndef get_ts(df_ts, shapelet_info):\n \"\"\"\n Method to get the timeseries in which the shapelet has been drawn\n\n Parameters\n ----------\n df_ts : dataframe that contains the time series in which the shapelet has been drawn\n shapelet_info : dictionary that describes the shapelet\n\n Returns\n -------\n numpy array, shape (1, length of the time series)\n \"\"\"\n ts = shapelet_info['ts']\n return df_ts.loc[ts]\n\ndef plot_UCRdataset_shapelet(name_shap, df_ts):\n \"\"\"\n Method to plot the shapelet in the time series where it has been drawn\n\n Parameters\n ----------\n df_ts : dataframe that contains the time series in which the shapelet has been drawn\n name_shap : name of the shapelet\n\n Returns\n -------\n numpy array, shape (1, length of shapelet)\n \"\"\"\n shapelet_info = get_shapeletinfo_from_name(name_shap)\n ts = get_ts(df_ts, shapelet_info)\n plt.figure(figsize=(20, 3))\n ts.plot()\n shap= get_shapelet(df_ts, shapelet_info)\n shap.plot(marker='.', c='r')\n\ndef multihist(x, **kwargs):\n \"\"\"\n Method to plot an histogram with specific parameters\n\n Parameters\n ----------\n x : data\n kwargs : supplementary parameters\n\n Returns\n -------\n histogram\n \"\"\"\n return plt.hist(x,histtype='step',linewidth=3, **kwargs)\n\ndef plot_distribution_loc_dist_TwoPatterns(name_shap,twpatt_df_shaprep,twpatt_y_train):\n \"\"\"\n Method to plot the distribution of localization and of distance for the selected shapelet in the TwoPatterns dataset\n\n Parameters\n ----------\n name_shap : name of the shapelet to plot\n twpatt_df_shaprep : dataframe of the time series represented via shapelet features\n twpatt_y_train : class of the time series, lenght must be equal to the length of twpatt_df_shaprep\n\n Returns\n -------\n save and plot the distribution of localization and of distance for the selected shapelet\n \"\"\"\n twpatt_df_shaprep['class'] = ['1 (A-A)' if y == 1 else '2 (B-A)' if y == 2 else '3 (A-B)' if y == 3 else '4 (B-B)' for y in twpatt_y_train]\n plt.rc(\"legend\",title_fontsize = 'x-large')\n shapelet_df = twpatt_df_shaprep[['{}loc'.format(name_shap),'{}dist'.format(name_shap),'class']]\n shapelet_df = pd.melt(shapelet_df,id_vars=['class'],value_vars=['{}loc'.format(name_shap),'{}dist'.format(name_shap)])\n shapelet_df.replace({'{}loc'.format(name_shap):'loc','{}dist'.format(name_shap):'dist'},inplace=True)\n shapelet_df.columns =['class','category','value']\n \n g0 = sns.FacetGrid(shapelet_df[shapelet_df['category']=='dist'], hue='class', size=5, aspect=2, sharex='none')\n _ = g0.map(multihist, 'value', alpha=0.6)\n g0.axes[0,0].set_xlabel('shapelet distance',fontsize=35)\n g0.axes[0,0].set_ylabel('number of time series',fontsize=35)\n g0.axes[0,0].legend(title='class',title_fontsize=20,loc=1,bbox_to_anchor=(1.25, 0.75))\n handles,labels = g0.axes[0,0].get_legend_handles_labels()\n handles = [handles[3],handles[0],handles[1],handles[2]]\n labels = [labels[3],labels[0],labels[1],labels[2]]\n g0.axes[0,0].legend(handles,labels,title='class',title_fontsize=30,loc=1,bbox_to_anchor=(1.25, 0.75))\n g0.set(xlim = (0,40))\n g0.savefig(\"./TwoPatterns_{}_dist_distribution.pdf\".format(name_shap),bbox_inches='tight')\n \n g1 = sns.FacetGrid(shapelet_df[shapelet_df['category']=='loc'], hue='class', size=5, aspect=2, sharex='none')\n _ = g1.map(multihist, 'value', alpha=0.6)\n g1.axes[0,0].set_xlabel('localization',fontsize=35)\n g1.axes[0,0].set_ylabel('number of time series',fontsize=35)\n g1.axes[0,0].legend(title='class',title_fontsize=20,loc=1,bbox_to_anchor=(1.25, 0.75))\n handles,labels = g1.axes[0,0].get_legend_handles_labels()\n handles = [handles[3],handles[0],handles[1],handles[2]]\n labels = [labels[3],labels[0],labels[1],labels[2]]\n g1.set(xlim = (0,120))\n g1.savefig(\"./TwoPatterns_{}_loc_distribution.pdf\".format(name_shap),bbox_inches='tight')\n plt.show()\n\n","repo_name":"rtavenar/localized_random_shapelets","sub_path":"plot_shapelet.py","file_name":"plot_shapelet.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"} +{"seq_id":"42787442517","text":"import numpy as np\n\nimport aoc\n\ndirections = {\n 'U': np.array([0, 1]),\n 'D': np.array([0, -1]),\n 'R': np.array([1, 0]),\n 'L': np.array([-1, 0])\n}\n\nday = 3\nlines = aoc.get_input(day)\n\ncable1 = [(inst[0], int(inst[1:])) for inst in lines[0].split(',')]\ncable2 = [(inst[0], int(inst[1:])) for inst in lines[1].split(',')]\n\ngrid = {}\nat = np.array([0, 0])\nstep = 0\n\nfor direction, length in cable1:\n delta = directions[direction]\n for _ in range(length):\n at += delta\n step += 1\n if at.tostring() not in grid.keys():\n grid[at.tostring()] = step\n\n\nmin_dist = 1e100\nmin_at = None\n\nmin_delay = 1e100\nmin_delay_at = None\n\nat = np.array([0, 0])\nstep = 0\n\nfor direction, length in cable2:\n delta = directions[direction]\n for _ in range(length):\n at += delta\n step += 1\n if at.tostring() in grid.keys():\n dist = np.abs(at).sum()\n if dist < min_dist:\n min_dist = dist\n min_at = at\n\n delay = step + grid[at.tostring()]\n if delay < min_delay:\n min_delay = delay\n min_delay_at = at\n\nprint(min_dist)\n\ncorrect = aoc.submit(min_dist, day)\nprint(f'Answer 1 correct: {correct}')\n\nprint(min_delay)\n\ncorrect = aoc.submit(min_delay, day, 2)\nprint(f'Answer 2 correct: {correct}')\n","repo_name":"sbremer/adventofcode2019","sub_path":"aoc19_simon/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9393473166","text":"#! /usr/bin/env python\n'''\nAuthor: Scott H. Hawley\n\nBased on paper,\nA SOFTWARE FRAMEWORK FOR MUSICAL DATA AUGMENTATION\nBrian McFee, Eric J. Humphrey, and Juan P. Bello\nhttps://bmcfee.github.io/papers/ismir2015_augmentation.pdf\n\n'''\nfrom __future__ import print_function\nimport numpy as np\nimport librosa\nfrom random import getrandbits\nimport sys, getopt, os\nfrom multiprocessing import Pool\nfrom functools import partial\n\n\ndef random_onoff(): # randomly turns on or off\n return bool(getrandbits(1))\n\n\n# returns a list of augmented audio data, stereo or mono\ndef augment_audio(y, sr, n_augment = 0, allow_speedandpitch = True, allow_pitch = True,\n allow_speed = True, allow_dyn = True, allow_noise = True, allow_timeshift = True, tab=\"\",quiet=False):\n\n mods = [y] # always returns the original as element zero\n length = y.shape[0]\n\n for i in range(n_augment):\n if not quiet:\n print(tab+\"augment_audio: \",i+1,\"of\",n_augment)\n y_mod = y.copy()\n count_changes = 0\n\n # change speed and pitch together\n if (allow_speedandpitch) and random_onoff():\n length_change = np.random.uniform(low=0.9,high=1.1)\n speed_fac = 1.0 / length_change\n if not quiet:\n print(tab+\" resample length_change = \",length_change)\n tmp = np.interp(np.arange(0,len(y),speed_fac),np.arange(0,len(y)),y)\n #tmp = resample(y,int(length*lengt_fac)) # signal.resample is too slow\n minlen = min( y.shape[0], tmp.shape[0]) # keep same length as original;\n y_mod *= 0 # pad with zeros\n y_mod[0:minlen] = tmp[0:minlen]\n count_changes += 1\n\n # change pitch (w/o speed)\n if (allow_pitch) and random_onoff():\n bins_per_octave = 24 # pitch increments are quarter-steps\n pitch_pm = 4 # +/- this many quarter steps\n pitch_change = pitch_pm * 2*(np.random.uniform()-0.5)\n if not quiet:\n print(tab+\" pitch_change = \",pitch_change)\n y_mod = librosa.effects.pitch_shift(y, sr, n_steps=pitch_change, bins_per_octave=bins_per_octave)\n count_changes += 1\n\n # change speed (w/o pitch),\n if (allow_speed) and random_onoff():\n speed_change = np.random.uniform(low=0.9,high=1.1)\n if not quiet:\n print(tab+\" speed_change = \",speed_change)\n tmp = librosa.effects.time_stretch(y_mod, speed_change)\n minlen = min( y.shape[0], tmp.shape[0]) # keep same length as original;\n y_mod *= 0 # pad with zeros\n y_mod[0:minlen] = tmp[0:minlen]\n count_changes += 1\n\n # change dynamic range\n if (allow_dyn) and random_onoff():\n dyn_change = np.random.uniform(low=0.5,high=1.1) # change amplitude\n if not quiet:\n print(tab+\" dyn_change = \",dyn_change)\n y_mod = y_mod * dyn_change\n count_changes += 1\n\n # add noise\n if (allow_noise) and random_onoff():\n noise_amp = 0.005*np.random.uniform()*np.amax(y)\n if random_onoff():\n if not quiet:\n print(tab+\" gaussian noise_amp = \",noise_amp)\n y_mod += noise_amp * np.random.normal(size=length)\n else:\n if not quiet:\n print(tab+\" uniform noise_amp = \",noise_amp)\n y_mod += noise_amp * np.random.normal(size=length)\n count_changes += 1\n\n # shift in time forwards or backwards\n if (allow_timeshift) and random_onoff():\n timeshift_fac = 0.2 *2*(np.random.uniform()-0.5) # up to 20% of length\n if not quiet:\n print(tab+\" timeshift_fac = \",timeshift_fac)\n start = int(length * timeshift_fac)\n if (start > 0):\n y_mod = np.pad(y_mod,(start,0),mode='constant')[0:y_mod.shape[0]]\n else:\n y_mod = np.pad(y_mod,(0,-start),mode='constant')[0:y_mod.shape[0]]\n count_changes += 1\n\n # last-ditch effort to make sure we made a change (recursive/sloppy, but...works)\n if (0 == count_changes):\n if not quiet:\n print(\"No changes made to signal, trying again\")\n mods.append( augment_audio(y, sr, n_augment = 1, tab=\" \", quiet=quiet)[1] )\n else:\n mods.append(y_mod)\n\n return mods\n\n\ndef augment_one_file(file_list, n_augment, quiet, file_index):\n\n infile = file_list[file_index]\n if os.path.isfile(infile):\n print(\" Operating on file \",infile,\", making \",n_augment,\" augmentations...\",sep=\"\")\n y, sr = librosa.load(infile, sr=None)\n mods = augment_audio(y, sr, n_augment=n_augment, quiet=quiet)\n for i in range(len(mods)-1):\n filename_no_ext = os.path.splitext(infile)[0]\n ext = os.path.splitext(infile)[1]\n outfile = filename_no_ext+\"_aug\"+str(i+1)+ext\n if not quiet:\n print(\" mod = \",i+1,\": saving file\",outfile,\"...\")\n librosa.output.write_wav(outfile,mods[i+1],sr)\n else:\n print(\" *** File\",infile,\"does not exist. Skipping.\")\n return\n\ndef main(args):\n np.random.seed(1)\n quiet = args.quiet\n\n if args.test: # just testing the augment_audio.py on sample data\n y, sr = librosa.load(librosa.util.example_audio_file(),sr=None)\n librosa.output.write_wav(\"orig.wav\",y,sr)\n mods = augment_audio(y, sr, n_augment=args.N, quiet=quiet)\n for i in range(len(mods)-1):\n outfile = \"modded\"+str(i+1)+\".wav\"\n librosa.output.write_wav(outfile,mods[i+1],sr)\n sys.exit()\n\n # read in every file on the list, augment it lots of times, output all those\n file_indices = tuple( range(len(args.file)) )\n cpu_count = os.cpu_count()\n pool = Pool(cpu_count)\n pool.map(partial(augment_one_file, args.file, args.N, args.quiet), file_indices)\n\n '''\n for infile in args.file:\n if os.path.isfile(infile):\n print(\" Operating on file \",infile,\", making \",args.N,\" augmentations...\",sep=\"\")\n y, sr = librosa.load(infile, sr=None)\n mods = augment_audio(y, sr, n_augment=args.N, quiet=quiet)\n for i in range(len(mods)-1):\n filename_no_ext = os.path.splitext(infile)[0]\n ext = os.path.splitext(infile)[1]\n outfile = filename_no_ext+\"_aug\"+str(i+1)+ext\n if not quiet:\n print(\" mod = \",i+1,\": saving file\",outfile,\"...\")\n librosa.output.write_wav(outfile,mods[i+1],sr)\n else:\n print(\" *** File\",infile,\"does not exist. Skipping.\")\n '''\n return\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='Perform data augmentation')\n parser.add_argument(\"-q\", \"--quiet\", help=\"quiet mode; reduce output\",\n action=\"store_true\")\n parser.add_argument(\"-t\", \"--test\", help=\"test on sample data (takes precedence over other args)\", action=\"store_true\")\n parser.add_argument(\"N\", help=\"number of augmentations to generate\",type=int)\n parser.add_argument('file', help=\"sound files to augment\", nargs='*')\n args = parser.parse_args()\n main(args)\n","repo_name":"drscotthawley/panotti","sub_path":"utils/augment_audio.py","file_name":"augment_audio.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","stars":260,"dataset":"github-code","pt":"47"} +{"seq_id":"20237810046","text":"import numpy as np\nfrom dengo.chemical_network import \\\n ChemicalNetwork, \\\n reaction_registry, \\\n cooling_registry\nfrom dengo.ion_by_ion import setup_ionization\nfrom dengo.chemistry_constants import tiny, kboltz, mh\nimport numpy as np\n\nNCELLS = 1\ndensity = 1.0 \n#* 1.67e-24\ntemperature = np.logspace(2, 8, NCELLS)\ntemperature[:] = 5e6\nX = 1e-8\n\nion_by_ion = ChemicalNetwork(write_intermediate = False,\n stop_time = 3.1557e13)\nion_by_ion.add_species(\"de\")\n\nfor atom in [\"H\", \"He\", \"O\"]: #\"C\", \"N\", \"O\", \"Ne\", \"Si\"]:\n s, c, r = setup_ionization(atom, photo_background=\"HM12\")\n ion_by_ion.add_collection(s, c, r)\n\n# This defines the temperature range for the rate tables\nion_by_ion.init_temperature((1e0, 1e12))\n\n# This defines the redsfhit range for the rate tables\nion_by_ion.init_redshift((0.0, 9.0))\n\ntiny = 1e-20\n\ninit_array = np.ones(NCELLS) * density\ninit_values = dict()\n\n# set up initial temperatures values used to define ge\ninit_values['T'] = temperature\n\nstart_neutral = False\n\nimport chianti.core as ch\n\nif start_neutral:\n for s in ion_by_ion.required_species:\n if getattr(s, 'free_electrons', -1) == 0:\n init_values[s.name] = init_array.copy()\n else:\n init_values[s.name] = X * init_array\n # Scale to solar abundances\n if s.name not in ['de', 'ge']:\n ion_name = s.name.lower()\n ion = ch.ion(ion_name, temperature=init_values['T'])\n init_values[s.name] *= ion.Abundance\n\n init_values['de'][:] = 1e-30\n init_values = ion_by_ion.convert_to_mass_density(init_values)\nelse:\n # start CIE\n import chianti.util as chu\n\n for s in sorted(ion_by_ion.required_species):\n if s.name != 'ge':\n if s.name == 'de':\n continue\n else:\n ion_name = s.name.lower()\n ion = ch.ion(ion_name, temperature=init_values['T'])\n ion.ioneqOne()\n ion_frac = ion.IoneqOne\n init_values[s.name] = ion_frac * init_array * ion.Abundance\n \n # in case something is negative or super small:\n init_values[s.name][init_values[s.name] < tiny] = tiny\n\n init_values['de'] = init_array * 0.0\n init_values = ion_by_ion.convert_to_mass_density(init_values)\n\ninit_values['de'] = ion_by_ion.calculate_free_electrons(init_values)\ninit_values['density'] = ion_by_ion.calculate_total_density(init_values)\nnumber_density = ion_by_ion.calculate_number_density(init_values)\n\n# calculate ge (very crudely)\ngamma = 5.0/3.0\ninit_values['ge'] = ((temperature * number_density * kboltz)\n / (init_values['density'] * mh * (gamma - 1)))\n\n# Write the initial conditions file\nion_by_ion.write_solver(\"ion_by_ion\", output_dir = \".\",\n init_values=init_values,\n input_is_number=False)\n\nimport pyximport\npyximport.install(setup_args={\"include_dirs\":np.get_include()},\n reload_support=True, inplace=True)\n\nion_by_ion_solver_run = pyximport.load_module(\"ion_by_ion_solver_run\",\n \"ion_by_ion_solver_run.pyx\",\n build_inplace = True, pyxbuild_dir = \"_dengo_temp\")\nrv, rv_int = ion_by_ion_solver_run.run_ion_by_ion(init_values, 1e16, 100000,\n z = 0.0)\nimport pylab\npylab.clf()\n\nmask = rv_int['successful']\nfor name in sorted(rv_int):\n if len(rv_int[name].shape) == 1:\n rv_int[name] = rv_int[name][mask]\n else:\n rv_int[name] = rv_int[name][0, mask]\n \nskip = ('successful', 'dt', 't', 'ge')\nfor n, v in sorted(rv_int.items()):\n if n in skip: continue\n pylab.loglog(rv_int['t'], v, label = n)\n\npylab.ylim(density * 1e-30, density * 10)\npylab.xlabel(\"time [s]\")\npylab.legend(loc='best', fontsize='xx-small')\npylab.savefig(\"plot.png\")\n\npylab.clf()\npylab.loglog(rv_int['t'], rv_int['T'], label = 'T')\npylab.xlabel(\"time [s]\")\npylab.savefig(\"plot_temp.png\")\n","repo_name":"data-exp-lab/dengo","sub_path":"examples/run_ion_network.py","file_name":"run_ion_network.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2279566331","text":"from pythoncourse2020.model.group import Contact\nimport random\nimport string\n\n\ndef random_string(prefix, maxlen):\n symbols = string.ascii_letters + string.digits + \" \"\n return prefix + \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))])\n\ntestdata = [\n Contact(first_name=random_string(\"first_name\", 10), midle_name=random_string(\"midle_name\", 20), last_name=random_string(\"last_name\", 20))\n for name in range(1)\n ]","repo_name":"ndo1989/pythoncourse2020","sub_path":"data/add_contact.py","file_name":"add_contact.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72757728464","text":"import requests\nimport jieba\nimport csv\nimport json\nimport datetime\nimport twstock\nimport sys\n#先把15~18行註解看完\n#請到聊天機器人資料夾下載並修改更新再使用\n#所需檔案:city_to_weather.csv ,synonyms.csv ,country_to_area.csv ,keyword.csv ,qu.txt\njieba.load_userdict('C:/xampp/htdocs/ichat/assets/chatbot/qu.txt')\nquestion = ''\nfor i in range(1,len(sys.argv)):\n question += (sys.argv[i]+' ')\n#整合.py除了import sys\n#只有這邊有改\n#sys.argv[0]是檔名\n#sys.argv[0]之後是php傳送過來的資料\ndef error_display():\n print('Hello!\\n我是iChat專屬聊天機器人!\\n使用搜尋建議以斷句且越精確越好\\n如:1.高雄 天氣\\n2.英國 時間\\n3.1234 股票\\n\\n如果沒有您要的解答\\n請給予管理員意見謝謝!!')\n\nbot_start = datetime.datetime.utcnow()\n\nseg_list = jieba.cut_for_search(question)\n\nNot_included = []\nwith open('C:/xampp/htdocs/ichat/assets/chatbot/keyword.csv', newline='', encoding=\"utf-8\") as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n Not_included.append(row[0])\n\nimportant = []\nfor i in seg_list:\n if (i not in Not_included) & (len(i) > 1):\n important.append(i)\n # print(i)\n\nsynonyms = []\n\nwith open('C:/xampp/htdocs/ichat/assets/chatbot/synonyms.csv', newline='', encoding=\"utf-8-sig\") as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n synonyms.append(row)\nfor i in range(0,len(important)) :\n for j in synonyms:\n if important[i] in j:\n important[i] = j[0]\n\nif \"stock\" in important :\n # print(\"這是股票問題\")\n all_stock_dict = []\n stock_code = []\n stock_name = []\n stock_market = []\n stock_group = []\n check = 0\n all_stock_dict.append(twstock.codes)\n for i in all_stock_dict:\n for j in i:\n stock_code.append(j)\n for i in all_stock_dict:\n for j in stock_code:\n stock_name.append(i[j][2])\n stock_market.append(i[j][5])\n stock_group.append(i[j][6])\n for i in important:\n if i.isdigit() :\n if i in stock_code:\n check = 1\n qu = i\n if i.isalpha():\n if i in stock_name:\n check = 1\n qu = stock_code[stock_name.index(i)]\n if check == 1:\n stock_id = stock_code.index(qu)\n print('編號:%s 名稱:%s 狀態:%s 業別:%s' % (stock_code[stock_id],stock_name[stock_id],stock_market[stock_id],stock_group[stock_id]))\n print('搜尋價格較慢,請耐心等候謝謝!!')\n stock_real = twstock.realtime.get(qu)\n stock_history = twstock.Stock(qu)\n stock_price = stock_history.price[-7:]\n stock_high = stock_history.high[-7:]\n stock_low = stock_history.low[-7:]\n stock_date = stock_history.date[-7:]\n print('\\n近七日價格')\n print('日期 收盤價 最高 最低')\n for i in range(len(stock_price)):\n print('%s %s %s %s' %(stock_date[i],stock_price[i],stock_high[i],stock_low[i]))\n print('\\n全名:%s \\n時間:%s \\n最新價格:%s'%(stock_real['info']['fullname'],stock_real['info']['time'],stock_real['realtime']['latest_trade_price']))\n print('\\n最佳價格及交易數')\n for i in range(len(stock_real['realtime']['best_bid_price'])):\n print('價格:%s 交易數:%s' % (stock_real['realtime']['best_bid_price'][i],stock_real['realtime']['best_bid_volume'][i]))\n bot_end = datetime.datetime.utcnow()\n else:\n error_display()\n\nelif \"weather\" in important :\n # print(\"這是天氣問題\")\n city = []\n city_id = 0\n city_name = ''\n #city_to_weather 各個格子功能 0.縣市編號 1.顯示縣市 2.顯示資料所取的站名 2以後都是用來比對地名用\n with open('C:/xampp/htdocs/ichat/assets/chatbot/city_to_weather.csv', newline='', encoding=\"utf-8-sig\") as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n city.append(row)\n for i in range(0,len(important)) :\n for j in city:\n if important[i] in j:\n city_id = j[0]\n city_name = j[1]\n StationName = j[2]\n if city_id == 0:\n print(\"請記得輸入地名喔~(以縣市為單位),或是給予管理員意見謝謝!!\")\n exit()\n web = \"https://www.cwb.gov.tw/Data/js/Observe/County/\"+city_id+\".js\"\n # print(web)\n r = requests.get(web)\n r.encoding = 'utf8'\n jss = r.text.replace('var ST = ','')\n jss = jss[:(jss.index(';'))]\n check = 0\n count = len(jss)\n while i < count:\n if (jss[i]== '\"')& (check == 0):\n check = 1\n elif (jss[i] == \"'\") & (check == 1):\n jss = jss[:i]+jss[i+1:]\n count -= 1\n elif (jss[i] == '\"') & (check == 1):\n check = 0\n i += 1\n jss = jss.replace(\"'\",'\"')\n \n # try :\n jss = json.loads(jss)\n for i in jss.values():\n for j in i.values():\n if StationName == j['StationName']['C']:\n print(\"地點:%s \\n氣溫:%s°C\\n濕度:%s\\n累積雨量:%s毫米\\n測量時間:%s %s\" % (city_name,j['Temperature']['C']['C'],j['Humidity']['C']+'%',j['Rain']['C'],j['Date'],j['Time']))\n # print(\"日期:%s 時間:%s 站名:%s 溫度:%s°C 相對濕度:%s 累積雨量:%s毫米 風向:%s\" % (j['Date'],j['Time'],j['StationName']['C'],j['Temperature']['C']['C'],j['Humidity']['C']+'%',j['Rain']['C'],j['WindDir']['C']))\n # except:\n # print(\"很抱歉!儀器故障或者目前無資料\")\n bot_end = datetime.datetime.utcnow()\n\nelif \"time\" in important :\n # print(\"這是時間問題\")\n country = []\n country_area = ''\n country_name = ''\n with open('C:/xampp/htdocs/ichat/assets/chatbot/country_to_area.csv', newline='', encoding=\"utf-8-sig\") as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n country.append(row)\n for i in range(0,len(important)) :\n for j in country:\n if important[i] in j:\n country_area = j[0]\n country_name = important[i]\n if country_area == '':\n error_display()\n exit()\n timenow = (datetime.datetime.utcnow()+datetime.timedelta(hours=((int)(country_area))))\n print(\"%s目前時間為:%s年%s月%s日%s時%s分(24hr)\"%(country_name,timenow.year,timenow.month,timenow.day,timenow.hour,timenow.minute))\n bot_end = datetime.datetime.utcnow()\n\nelif \"Greeting\" in important :\n error_display()\n\nelse :\n error_display()","repo_name":"daohaoyi/ichat","sub_path":"assets/chatbot/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":6607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26697810078","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 14 15:37:22 2020\n\n@author: PD\n\"\"\"\n#Import Libraries\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\ns='State/UnionTerritory'\n#Import Dataset\ndf = pd.read_csv(\"covid_19_india.csv\")\n\n#Get names of all the states\nstates_list = []\nfor a in df['State/UnionTerritory']:\n if a not in states_list:\n states_list.append(a)\n \n#Get total number of cases in each state\n#Is there a better way to do this? Please clone and add comments if you know any\ncount_cases = {} \nfor a in states_list:\n b=0\n for place,con in zip(df[s],df['Confirmed']):\n if(a==place):\n b+=con \n count_cases[a] = b\n \n#Pie chart for cases per state\nplt.title('Statewise COVID-19 Cases in India')\nlabels = [place for place in states_list]\n\n#Get data from dict\ndata = [(v) for i, (k, v) in enumerate(count_cases.items())]\nplt.pie(data, labels=labels, autopct='%.2f')\nplt.show()\n\n#Bar Chart for statewise cases\n#How to get the labels on x-axis to not overlap with each other?\nplt.bar(states_list,data, align='center')\nplt.show()\n\n#Histogram\nplt.hist(df['Date'])\nplt.show()\n\n\n","repo_name":"Prathmesh2498/MatPlotLib_Practice","sub_path":"MatPlotLib_practice.py","file_name":"MatPlotLib_practice.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10904887183","text":"\n\ndef solution(papers, papers_len, K):\n # 리스트를 순차적으로 방문하면서 인댁스와 값을 가져옴\n for index, value in enumerate(papers):\n # 남은 종이가 0보다 작을 경우 마지막 사람에게 줄 종이가 부족했으므로 현제인댁스의 - 1 반환\n if K < 0: return index - 1\n # 남는 종이가 0일경우 마지막 사람에게 나눠준 종이가 딱 나뉘어 떨어졌기 때문에 인댁스 그대로 반환\n elif K == 0: return index\n # 종이가 남아 있는 경우 다음 사람이 필요한 종이의 수를 뺌 \n else: K -= value\n # 반복문을 탈출했다는건 종이가 모든 사람을 주고도 남았다는 의미 이므로 papers의 길이를 그대로 반환\n return papers_len\nprint(solution([2, 4, 3, 2, 1], 5, 10))\nprint(solution([2, 4, 3, 2, 1], 5, 14))","repo_name":"Eluee/python","sub_path":"solution/chapter_4/solution4-2.py","file_name":"solution4-2.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20335368278","text":"\"\"\"sixth migration\n\nRevision ID: 6341eb84ef22\nRevises: 7e0310e4fa55\nCreate Date: 2023-02-22 16:50:08.383178\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '6341eb84ef22'\ndown_revision = '7e0310e4fa55'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('botstate')\n op.drop_table('considerations')\n op.drop_table('users')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('botstate',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('started', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),\n sa.Column('stopped', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),\n sa.Column('error', sa.BOOLEAN(), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='botstate_pkey')\n )\n # ### end Alembic commands ###\n","repo_name":"benzeneboi/bbot","sub_path":"backend/migrations/versions/6341eb84ef22_sixth_migration.py","file_name":"6341eb84ef22_sixth_migration.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70293275662","text":"class Solution(object):\n def countSmaller(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n def cal(nums, low, high, res):\n if low > high:\n return 0,0\n if low == high:\n return low, high\n if low < high:\n mid = (low + high) / 2\n low1, high1 = cal(nums, low, mid, res)\n low2, high2 = cal(nums, mid+1, high, res)\n for i in range(low1, high1 + 1):\n for j in range(low2, high2 + 1):\n if nums[i] > nums[j]:\n res[i] += 1\n return low, high\n res = [0] * len(nums)\n cal(nums, 0, len(res) - 1, res)\n return res\n","repo_name":"CrazyCoder4Carrot/leetcode","sub_path":"python/301-350/315. Count of Smaller Numbers After Self.py","file_name":"315. Count of Smaller Numbers After Self.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"41307530948","text":"import numpy as np\n\ndef binarySearch(arr, l, r, x): \n \n while l <= r: \n \n mid = l + (r - l) // 2; \n \n # Check if x is present at mid \n if arr[mid] == x: \n return mid \n \n # If x is greater, ignore left half \n elif arr[mid] < x: \n l = mid + 1\n \n # If x is smaller, ignore right half \n else: \n r = mid - 1\n \n # If we reach here, then the element \n # was not present \n return -1 \n\nmin = 0\nmax = 0\noutput = 0\narraySize = input()\narray = []\ninputA = input()\ninputA = inputA.split()\nfor i in range(len(inputA)): \n array.append(int(inputA[i]))\narray.sort()\nqueries = input()\nfor i in range(int(queries)):\n output = 0\n query = input()\n query = query.split()\n command = int(query[0])\n max = int(query[1])\n binaryReturn = binarySearch(array, 0, len(array)-1, max)\n if binaryReturn != -1:\n if command == 0: \n output = (len(array) - binaryReturn) \n elif command == 1:\n output = (len(array) - (binaryReturn + 1)) \n else: \n if max < array[0]:\n output = len(array)-1\n elif max > array[len(array)-1]:\n output = 0\n print(output)\n \n","repo_name":"MatthewHightech/Beginning-Python","sub_path":"binarySearch-2.py","file_name":"binarySearch-2.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38456719975","text":"N=int(input())\nM= list(map(int,input().split()))\nfor i in range(0,len(M)-1):\n min=i\n for j in range(i+1,len(M)):\n if M[min]>M[j]:\n min=j\n M[i],M[min]=M[min],M[i]\nprint(M)\nx=[]\nsum1=0\nsum2=0\nfor j in M:\n sum1+=j\n x.append(sum1)\nfor z in x:\n sum2+=z\nprint(sum2)\n\n\n","repo_name":"juyi212/Algorithm_study","sub_path":"0806/백준연습문제/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13237609595","text":"import requests, io, cv2\nimport numpy as np\nfrom PIL import Image\nimport pandas as pd\n\nDATABASE_FILE = 'zalando_fashion_data_extraction.csv'\nCONVERTED_DATABASE_FILE = 'zalando_fashion_data_conversion.csv'\ndf = pd.read_csv(DATABASE_FILE)\nIMAGE_SIZE = 28\n\n\ndef saveDatabase(newdf):\n newdf.to_csv(CONVERTED_DATABASE_FILE, index=False)\n\ndef loadDatabase():\n newdf = pd.read_csv(CONVERTED_DATABASE_FILE)\n return newdf\n\ndef image_to_array(img_url):\n response = requests.get(img_url)\n img = Image.open(io.BytesIO(response.content))\n image = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.Resampling.LANCZOS)\n # inverted = np.invert(image)\n imgArray = np.asarray(image)\n cv_im = cv2.cvtColor(imgArray, cv2.COLOR_BGR2GRAY)\n return cv_im / IMAGE_SIZE\n\n\ndef initiate_dfMain():\n COLUMNS_SIZE = IMAGE_SIZE*IMAGE_SIZE\n columns = ['label']\n for i in range(COLUMNS_SIZE):\n columns.append(f'pixel{i}')\n\n dfMain = pd.DataFrame(columns=columns)\n saveDatabase(dfMain)\n return dfMain\n\n\n\ndef main_func():\n print('Initializing dfMain...')\n dfMain = initiate_dfMain()\n print('dfMain Initialized!')\n\n CATEGORIES = {'Jacket': 0, 'Pants': 1, 'Jeans': 2, 'Shorts': 3, 'T-shirt': 4,\n 'Pullover': 5, 'Bag': 6, 'Cap': 7, 'Sandal': 8, 'Skirt': 9}\n\n for row in range(1, len(df)):\n try:\n category_string = df.iloc[row][0]\n category_value = int(CATEGORIES[category_string])\n\n image_url = df.iloc[row][1]\n image_array = image_to_array(image_url)\n\n reshaped_image_array = image_array.reshape(IMAGE_SIZE*IMAGE_SIZE)\n reshaped_image_array = np.insert(reshaped_image_array,0,category_value)\n\n dfMain = dfMain.append(pd.DataFrame(reshaped_image_array.reshape(1, -1), columns=dfMain.columns), ignore_index=True)\n print(f'Completing => {round((row / len(df)) * 100, 2)}%. Category => {category_string}')\n\n if row % 100:\n saveDatabase(dfMain)\n print('Saved database!')\n\n except Exception as e:\n print(f'----EXCEPTION {e}----')\n\nif __name__ == '__main__':\n main_func()\n","repo_name":"aahashemi/Zalando-Fashion-Data-Extraction-Conversion","sub_path":"zalando_fashion_data_conversion.py","file_name":"zalando_fashion_data_conversion.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"6464843185","text":"from .BaseMeasure import BaseMeasure\n\nimport numpy as np\nimport scipy.stats as st\n\n\nclass DILCA(BaseMeasure):\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"\n Initializes the DILCA object.\n \"\"\"\n super().__init__(*args, **kwargs)\n\n # ==========================\n # Distance Matrices\n # ==========================\n\n def _init_distance_matrices(self) -> None:\n \"\"\"\n Initialize distance matrices for all attributes based on SU values.\n \"\"\"\n print(\"X\\n\", self._X)\n print(\"y\\n\", self._y)\n\n # Dictionaries to store probabilities.\n self._x_probabilities: dict[tuple[int, int], np.ndarray] = {}\n self._y_probabilities: dict[tuple[int, int], np.ndarray] = {}\n self._conditional_probabilities: dict[tuple[int, int], np.ndarray] = {}\n self._init_probabilities()\n\n SU_matrix = self._compute_correlation_matrix()\n print(\"SU_matrix\\n\", SU_matrix)\n\n distance_matrices = self._DILCA_M(SU_matrix)\n print(\"distance_matrices\\n\", distance_matrices)\n\n self._distance_matrices = distance_matrices\n\n # --------------------------\n # ALGORITHM 1: computeCorrelationMatrix(D)\n # --------------------------\n\n def _compute_correlation_matrix(self) -> np.ndarray:\n \"\"\"\n Compute the symmetrical uncertainty matrix for all attributes.\n\n Returns:\n np.ndarray: Symmetrical Uncertainty matrix.\n \"\"\"\n SU_matrix = np.ones((self._d, self._d))\n\n for i in range(self._d):\n for j in range(self._d):\n if i != j:\n SU_matrix[i, j] = self._compute_symmetrical_uncertainty(i, j)\n\n return SU_matrix\n\n def _compute_symmetrical_uncertainty(self, X_i: int, Y_i: int) -> float:\n \"\"\"\n Calculate symmetrical uncertainty between two attributes.\n\n Args:\n X_i (int): Index of the first attribute.\n Y_i (int): Index of the second attribute.\n\n Returns:\n float: Symmetrical Uncertainty value.\n \"\"\"\n # Probabilities\n p_x = self._x_probabilities[(X_i, Y_i)]\n p_y = self._y_probabilities[(X_i, Y_i)]\n p_xy = self._conditional_probabilities[(X_i, Y_i)]\n\n # Compute Entropies\n H_X = st.entropy(p_x, base=2)\n H_Y = st.entropy(p_y, base=2)\n\n H_Y_given_X = -np.sum(\n p_x * np.nansum(p_xy * np.log2(p_xy, where=p_xy > 0), axis=1)\n )\n\n # Compute Information Gain\n IG_Y_given_X = H_Y - H_Y_given_X\n\n # Compute Symmetric Uncertainty\n if H_X + H_Y == 0:\n SU_Y_X = 0\n else:\n SU_Y_X = 2 * IG_Y_given_X / (H_X + H_Y)\n\n return SU_Y_X\n\n # --------------------------\n # ALGORITHM 3: DILCA_RR(VectorSU, Y) (**Not Inmplemented**)\n # --------------------------\n\n def _DILCA_RR(self, SU_matrix: np.ndarray) -> list[np.ndarray]:\n \"\"\"\n (Placeholder for DILCA_RR implementation.)\n Compute DILCA_RR distance matrices for all attributes.\n\n Args:\n SU_matrix (np.ndarray): Symmetrical Uncertainty matrix.\n\n Returns:\n list: List of distance matrices.\n \"\"\"\n raise NotImplementedError()\n\n # --------------------------\n # ALGORITHM 2: DILCA_M(VectorSU_Y, Y, σ)\n # --------------------------\n\n def _DILCA_M(self, SU_matrix: np.ndarray, sigma: float = 1.0) -> list[np.ndarray]:\n \"\"\"\n Compute DILCA_M distance matrices for all attributes.\n\n Args:\n SU_matrix (np.ndarray): Symmetrical Uncertainty matrix.\n sigma (float): Sigma multiplier for threshold computation.\n\n Returns:\n list: List of distance matrices.\n \"\"\"\n # calculate DILCA_M for each attribute\n distance_matrices = [\n self._DILCA_M_helper(SU_matrix[:, i], i, sigma) for i in range(self._d)\n ]\n\n return distance_matrices\n\n def _DILCA_M_helper(\n self, SU_vector_Y: np.ndarray, Y_i: int, sigma: float\n ) -> np.ndarray:\n \"\"\"\n Helper function for DILCA_M to compute the distance matrix for an attribute.\n\n Args:\n SU_vector_Y (np.ndarray): Symmetrical Uncertainty values for the target attribute.\n Y_i (int): Index of the target attribute.\n sigma (float): Sigma multiplier for threshold computation.\n\n Returns:\n np.ndarray: Distance matrix for the attribute.\n \"\"\"\n # Mask to nullify the Y_ith element\n mask = np.ones_like(SU_vector_Y, dtype=bool)\n mask[Y_i] = False\n\n # Calculate mean without Y_ith value\n SU_mean_Y = sigma * np.mean(SU_vector_Y[mask])\n\n # Get indices that are >= mean (excluding Y_i itself)\n context_Y_i = np.where((SU_vector_Y >= SU_mean_Y) & mask)[0]\n\n return self._compute_distance_matrix(Y_i, context_Y_i)\n\n # --------------------------\n # ALGORITHM 4: DistanceComputation(Y, context(Y))\n # --------------------------\n\n def _compute_distance_matrix(self, Y_i: int, context_Y_i: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute a distance matrix for all values of an attribute based on a context.\n\n Args:\n Y_i (int): Index of the target attribute.\n context_Y_i (np.ndarray): Indices of the context attributes. This context is such that\n the attributes belonging to this set have a high value of Symmetrical Uncertainty with\n respect to Y_i.\n\n Returns:\n np.ndarray: Distance matrix.\n \"\"\"\n # number of unique values in Y_i\n d = self._D[Y_i]\n\n # initialize distance matrix\n dist_matrix = np.zeros((d, d))\n\n # compute distance matrix\n for i in range(d):\n # start from i+1 to get upper triangle without diagonal\n for j in range(i + 1, d):\n dist_matrix[i][j] = self._compute_distance(i, j, Y_i, context_Y_i)\n # due to symmetry\n dist_matrix[j][i] = dist_matrix[i][j]\n return dist_matrix\n\n def _compute_distance(\n self, i: int, j: int, Y_i: int, context_Y_i: np.ndarray\n ) -> float:\n \"\"\"\n Compute distance between two values of an attribute based on a context.\n\n Args:\n i (int): First value.\n j (int): Second value.\n Y_i (int): Index of the target attribute.\n context_Y_i (np.ndarray): Indices of the context attributes.\n\n Returns:\n float: Computed distance.\n \"\"\"\n\n upper = sum(\n np.sum(\n np.square(\n self._conditional_probabilities[(Y_i, X_i)][i]\n - self._conditional_probabilities[(Y_i, X_i)][j]\n )\n )\n for X_i in context_Y_i\n )\n\n lower = context_Y_i.shape[0]\n\n distance = np.sqrt(upper / lower)\n\n return distance\n\n # ==========================\n # Probabilities Computation\n # ==========================\n\n def _init_probabilities(self) -> None:\n \"\"\"\n Initialize joint and conditional probabilities for attribute pairs.\n \"\"\"\n for i in range(self._d):\n # start from i+1 to get upper triangle without diagonal\n for j in range(i + 1, self._d):\n (\n x_probabilities,\n y_probabilities,\n cond_probabilities,\n ) = self._compute_probabilities(i, j)\n\n # Store probabilities for (i, j) pair\n self._x_probabilities[(i, j)] = x_probabilities\n self._y_probabilities[(i, j)] = y_probabilities\n self._conditional_probabilities[(i, j)] = cond_probabilities\n\n # Store probabilities for (j, i) pair due to symmetry\n self._x_probabilities[(j, i)] = y_probabilities\n self._y_probabilities[(j, i)] = x_probabilities\n # transpose for symmetric conditional probabilities\n self._conditional_probabilities[(j, i)] = cond_probabilities.T\n\n def _compute_probabilities(\n self, i: int, j: int\n ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Compute joint and conditional probabilities between two attributes.\n\n Args:\n i (int): Index of the first attribute.\n j (int): Index of the second attribute.\n\n Returns:\n tuple: Probabilities of X, Y, and their conditional probabilities.\n \"\"\"\n X, Y = self._X[:, i], self._X[:, j]\n joint_counts = st.contingency.crosstab(X, Y).count\n\n x_probabilities = joint_counts.sum(axis=1) / joint_counts.sum()\n y_probabilities = joint_counts.sum(axis=0) / joint_counts.sum()\n conditional_probabilities = joint_counts / joint_counts.sum()\n\n return (x_probabilities, y_probabilities, conditional_probabilities)\n","repo_name":"jgutierrezre/lshkcenters-1.0.3","sub_path":"new/myproject/Measures/DILCA.py","file_name":"DILCA.py","file_ext":"py","file_size_in_byte":9016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36255891091","text":"#ler nome\n#ler idade\n#ler sexo\n#de 4 pessoas\n\n#retornar a média de idade do grupo\n# qual o nome do homem mais velho\n# quantas mulheres tem menos de 20 anos\nsoma_idade = 0\nmaior_idade_homem = 0\nnomevelho = ''\ntotal = 0\nfor i in range(1,5):\n nome = str(input(f\"Insira o {i}° nome: \")).strip()\n idade = int(input(f\"Insira a {i} idade: \"))\n sexo = str(input(\"Insira 'm' para masculino ou 'f' para feminino: \")).strip()\n soma_idade += idade\n if i == 1 and sexo in 'Mm': #se dentro de sexo existir 'Mm' as variaveis correspondentes receberao os valores\n maior_idade_homem = idade\n nomevelho = nome\n if sexo in 'Mm' and idade > maior_idade_homem: #se em algum momento da repetição a idade for maior, essa linha colocará no topo dnv\n maior_idade_homem = idade\n nomevelho = nome\n if sexo in 'Ff' and idade < 20:\n total +=1\nmedia_idade = soma_idade / 4\nprint(f\"A média do grupo é: {media_idade}\")\nprint(f\"O homem mais velho se chama {nomevelho} e tem {maior_idade_homem} anos\")\nprint(f\"Existem {total} mulheres com menos de 20 anos\")","repo_name":"inczDan/exercicios-resolvidos-em-python-adicionais","sub_path":"analisador_completo.py","file_name":"analisador_completo.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19306198341","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom setuptools import setup, find_packages\r\n\r\nlong_desc = '''\r\nThis package contains the traclinks Sphinx extension.\r\n\r\n.. add description here ..\r\n'''\r\n\r\nrequires = ['Sphinx>=0.6']\r\n\r\nsetup(\r\n name='traclinks',\r\n version='0.1',\r\n url='http://bitbucket.org/birkenfeld/sphinx-contrib',\r\n download_url='http://pypi.python.org/pypi/traclinks',\r\n license='MIT',\r\n author='Kevin Horn',\r\n author_email='kevin.horn@gmail.com',\r\n description='Sphinx extension traclinks',\r\n long_description=long_desc,\r\n zip_safe=False,\r\n classifiers=[\r\n 'Development Status :: 4 - Beta',\r\n 'Environment :: Console',\r\n 'Environment :: Web Environment',\r\n 'Intended Audience :: Developers',\r\n 'License :: OSI Approved :: BSD License',\r\n 'Operating System :: OS Independent',\r\n 'Programming Language :: Python',\r\n 'Topic :: Documentation',\r\n 'Topic :: Utilities',\r\n ],\r\n platforms='any',\r\n packages=find_packages(),\r\n include_package_data=True,\r\n install_requires=requires,\r\n namespace_packages=['sphinxcontrib'],\r\n)\r\n","repo_name":"thewtex/sphinx-contrib","sub_path":"traclinks/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"47"} +{"seq_id":"32358581432","text":"import os\nimport psycopg2\nimport logging\n\nlogger_work = logging.getLogger(\"work\")\n\nPG_USER = os.getenv(\"POSTGRES_USER\", \"postgres\")\nPG_PASS = os.getenv(\"POSTGRES_PASSWORD\", \"password\")\nPG_HOST = os.getenv(\"POSTGRES_HOST\", \"192.168.99.100\")\nPG_PORT = os.getenv(\"POSTGRES_PORT\", 5432)\nPG_DATABASE = os.getenv(\"POSTGRES_DATABASE\", \"ml_data\")\nITEM_TABLE = os.getenv(\"ITEM_TABLE\", \"items\")\n\n\nclass Pg:\n \"\"\" Postgres presistence implementation \"\"\"\n def __init__(self):\n try:\n self.connection = psycopg2.connect(user=PG_USER,\n password=PG_PASS,\n host=PG_HOST,\n port=PG_PORT,\n database=PG_DATABASE)\n self.cursor = self.connection.cursor()\n except (Exception, psycopg2.DatabaseError) as error:\n logger_work.error(error)\n\n self.fields = (\n \"site\",\n \"id\",\n \"price\",\n \"start_time\",\n \"name\",\n \"description\",\n \"nickname\"\n )\n\n def __del__(self):\n self.connection.close()\n\n def __none_to_empty(self, value):\n return value if value else \"\"\n\n def __spread_data(self, item: dict) -> tuple:\n return tuple(\n self.__none_to_empty(item.get(field))\n for field in self.fields)\n\n def __make_line_sql(self, item: dict) -> str:\n field_names = \",\".join(self.fields)\n line_sql = f\"INSERT INTO {ITEM_TABLE} ({field_names}) VALUES {self.__spread_data(item)};\\n\" # noqa\n return line_sql\n\n def write_rows(self, data: list) -> str:\n sql = \"\"\n for item in data:\n if item:\n sql += self.__make_line_sql(item)\n try:\n self.cursor.execute(sql)\n self.connection.commit()\n except Exception as e:\n logger_work.error(f\"Cannot write data to PG.\\n{e}\")\n\n def read_row(self, pk: str) -> dict:\n print(f\"reading {pk} from postgres\")\n return {\"data\": \"fake\"}\n","repo_name":"dmalisani/etl","sub_path":"persistence/pg.py","file_name":"pg.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8787219651","text":"import requests\n\n#x = requests.get('https://ojk-invest-api.vercel.app/api/products')\n#data = x.json() #print the response text (the content of the requested file):\n#loop = data['data'] \n#for i in loop[\"products\"]:\n #print(i)\n\n#key = '92ea9528dfdf49d7a9e9acc99b9c3cb0'\n#api = requests.get(f'https://newsapi.org/v2/top-headlines?category=business&country=id&apiKey={key}')\n#dataapi = api.json()\n#for i in dataapi['articles']:\n# print(i['content'])\n\nr = requests.get(\"https://the-lazy-media-api.vercel.app/api/games/review\") \ndata1 = r.json() \n \nfor d in data1: \n #print(d['title'])\n #print(d['author'])\n #print(d['time'])\n #print(d['desc'])\n key =d['key'] \n\n r2 = requests.get(f\"https://the-lazy-media-api.vercel.app/api/detail/{key}\")\n data2 = r2.json()['results']\n data2['content']\n ","repo_name":"bobizinaidinzidan/belajar_finance","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"338128853","text":"import tensorflow as tf\r\nimport cv2\r\n\r\nimport pickle\r\nimport numpy as np\r\n\r\nimport feat_data_loader\r\nfrom head_pose.mark_detector import MarkDetector\r\nfrom queue import Queue\r\nfrom threading import Thread\r\n\r\nprint(\"OpenCV version: {}\".format(cv2.__version__))\r\n# multiprocessing may not work on Windows and macOS, check OS for safety.\r\n#detect_os()\r\n\r\ndef load_models(gan_filename='models/pg_gan/karras2018iclr-celebahq-1024x1024.pkl', inverter_filename='models/inverter/inverter_randforest_7000.pkl'):\r\n # Import official CelebA-HQ networks.\r\n with open(gan_filename, 'rb') as file:\r\n G, D, Gs = pickle.load(file)\r\n # G = Instantaneous snapshot of the generator, mainly useful for resuming a previous training run.\r\n # D = Instantaneous snapshot of the discriminator, mainly useful for resuming a previous training run.\r\n # Gs = Long-term average of the generator, yielding higher-quality results than the instantaneous snapshot.\r\n #F = tf.keras.models.load_model(feature_extractor_filename)\r\n with open(inverter_filename, 'rb') as file:\r\n I = pickle.load(file)\r\n return Gs, I\r\n\r\nMODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)\r\nage_list = ['(0, 2)', '(4, 6)', '(8, 12)', '(15, 20)', '(25, 32)', '(38, 43)', '(48, 53)', '(60, 100)']\r\ngender_list = ['Male', 'Female']\r\n\r\ndef process_video_capture(inverter_filename='models/inverter/inverter_randforest_7000.pkl'):\r\n CNN_INPUT_SIZE = 128\r\n sess = tf.InteractiveSession()\r\n video_capture = cv2.VideoCapture(0)\r\n video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\r\n # Check success\r\n if not video_capture.isOpened():\r\n raise Exception(\"Could not open video device\")\r\n\r\n # Introduce mark_detector to detect landmarks.\r\n mark_detector = MarkDetector()\r\n\r\n #tm = cv2.TickMeter()\r\n\r\n G, I = load_models(inverter_filename=inverter_filename)\r\n age_net = cv2.dnn.readNetFromCaffe('models/race_age/deploy_age.prototxt', 'models/race_age/age_net.caffemodel')\r\n gender_net = cv2.dnn.readNetFromCaffe('models/race_age/deploy_gender.prototxt', 'models/race_age/gender_net.caffemodel')\r\n\r\n \"\"\"\r\n\r\n \"\"\"\r\n feat_queue = Queue(maxsize=5)\r\n\r\n def gen_feats_task():\r\n while True:\r\n frame_got, frame = video_capture.read()\r\n if frame_got is False:\r\n break\r\n # Crop it if frame is larger than expected.\r\n #frame = frame[0:480, 300:940]\r\n # If frame comes from webcam, flip it so it looks like a mirror.\r\n frame = cv2.flip(frame, 2)\r\n\r\n # Feed frame to image queue.\r\n facebox = mark_detector.extract_cnn_facebox(frame)\r\n\r\n if facebox is not None:\r\n feats = feat_data_loader.calculate_facial_features(frame, facebox, CNN_INPUT_SIZE, mark_detector, age_net, gender_net, draw_data=True)\r\n feat_queue.put(feats)\r\n\r\n gen_feats_thread = Thread(target=gen_feats_task, daemon=True)\r\n gen_feats_thread.start()\r\n\r\n i = 0\r\n #intercept = np.zeros(Z_DIM)\r\n while True:\r\n #intercept += np.random.normal(scale=0.1, size=intercept.shape)\r\n #if np.dot(intercept, intercept) > 1:\r\n # intercept *= 0.5\r\n # Read frame, crop it, flip it, suits your needs.\r\n feats = feat_queue.get()\r\n viewer_latent = I.predict(feats)# + intercept\r\n labels = np.zeros([viewer_latent.shape[0], 0], np.float32)\r\n viewer_generated = G.run(viewer_latent, labels,out_mul=127.5, out_add=127.5, out_dtype=np.uint8)\r\n viewer_generated = np.squeeze(viewer_generated)\r\n viewer_generated = np.transpose(viewer_generated, (1,2,0))\r\n viewer_generated = cv2.cvtColor(viewer_generated, cv2.COLOR_BGR2RGB)\r\n #facebox = mark_detector.extract_cnn_facebox(viewer_generated)\r\n #if facebox is not None:\r\n # feats_gen = calculate_facial_features(viewer_generated, facebox, CNN_INPUT_SIZE, mark_detector, age_net, gender_net)\r\n # print(\"Frame: {}, mean diff in features: {} \".format(i, np.mean(np.abs(feats_gen - feats))))\r\n # Show preview.\r\n\r\n #cv2.imwrite(\"images/output_{}.png\".format(i), viewer_generated)\r\n\r\n cv2.imshow(\"Projection\", viewer_generated)\r\n if cv2.waitKey(1) & 0XFF == ord('q'):\r\n break\r\n i += 1\r\n video_capture.release()\r\n\r\n\r\nif __name__ == '__main__':\r\n inv = 'models/inverter/inverter_ar_neural_5_10000.pkl'\r\n process_video_capture(inverter_filename=inv)\r\n","repo_name":"kyranstar/Narcissus","sub_path":"generate_live_video_multi.py","file_name":"generate_live_video_multi.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14750742669","text":"from agent import Agent\nimport itertools\nimport gym\n\ndef main():\n env = gym.make('LunarLander-v2')\n\n obs_shape = env.observation_space.sample().shape\n act_shape = env.action_space.n\n\n agent = Agent(obs_shape, act_shape)\n\n for episode in itertools.count():\n observation = env.reset()\n done = False\n for timestep in itertools.count():\n env.render()\n action = agent.take_action(observation)\n observation_next, reward, done, info = env.step(action)\n agent.remember(observation, action, observation_next, reward, done)\n agent.learn()\n\n observation = observation_next\n\n if done:\n print(agent.get_episode_report())\n break\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"manfredmichael/Deep-Q-Network-with-Tensorflow-2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74049515663","text":"import pandas as pd\nfrom os import path\nimport os\nfrom functions.database.queries import generalQuery, updateQuery\nimport json\n\n\nfolder_path = path.dirname(__file__)\n\npreguntas_path = \"preguntas_fantasma_1.xlsx\"\ncajeros_path = \"cajero_agencias.xlsx\"\n\nfile_preguntas = path.abspath(path.join(folder_path, \"../../docs/\", preguntas_path))\nfile_cajeros = path.abspath(path.join(folder_path, \"../../docs/\", cajeros_path))\n\n\ndef generate_data_fantasma():\n try:\n if not (os.path.exists(file_preguntas) and os.path.exists(file_cajeros)):\n return \"no existe\"\n\n new_columns_names = {\n \"EL PORTAL\": \"PORTAL SHOPPING\",\n \"EXPRESS JAPON MANTA\": \"SERVIP EXPRESS JAPON MANTA\",\n \"EXPRESS UNIVERSIDAD CENTRAL\": \"SERVP EXPRESS UNIVERSIDAD CENTRAL\",\n \"GRAN AKI DURAN\": \"GRAN AKI DURÁN\",\n \"MOLINEROS EXPRESS\": \"MOLINEROS\",\n }\n # reading excel files\n df_cajeros = pd.read_excel(file_cajeros)\n df_preguntas = pd.read_excel(file_preguntas)\n # formatting dataFrames\n df_preguntas = df_preguntas.apply(lambda x: x.replace(\"-\", 1))\n df_preguntas = df_preguntas.replace('\"', \"`\", regex=True)\n df_preguntas = df_preguntas.rename(columns=new_columns_names)\n df_cajeros = df_cajeros.replace(new_columns_names)\n\n agencias = df_preguntas.keys().tolist()[1:]\n agencias.sort()\n\n preguntas = df_preguntas[\"PREGUNTA\"].tolist()\n\n preguntas_actitud = preguntas[:7]\n preguntas_destrezas = preguntas[7:11]\n preguntas_imagen = preguntas[11:13]\n\n def generate_items(arr, arr_calf):\n items = []\n for i in range(len(arr)):\n obj_aux = {\n \"id\": i + 1,\n \"question\": arr[i],\n \"total\": 100,\n \"correct\": arr_calf[i] * 100,\n }\n items.append(obj_aux)\n return items\n\n def generate_plantilla(arr_calf):\n items_actitud = generate_items(preguntas_actitud, arr_calf)\n items_destrezas = generate_items(preguntas_destrezas, arr_calf)\n items_imagen = generate_items(preguntas_imagen, arr_calf)\n\n plantilla = {\n \"actitud\": {\"display_name\": \"Actitud\", \"items\": items_actitud},\n \"destrezas_de_servicio\": {\n \"display_name\": \"Destrezas de servicio\",\n \"items\": items_destrezas,\n },\n \"imagen_y_orden\": {\n \"display_name\": \"Imagen y orden\",\n \"items\": items_imagen,\n },\n }\n return plantilla\n\n query_str = \"INSERT INTO plantillas_cliente_fantasma (medicion,zona,ciudad, agencia,tipo_agencia, cajero,fecha, ingresador_id, supervisor_id, items) VALUES \"\n for agencia in agencias:\n arr_calf = df_preguntas[agencia].tolist()\n result = generalQuery(\n f\"SELECT ZONA,CIUDAD,TIPO_AGENCIA FROM `gestionfinal` WHERE AGENCIA = '{agencia}' \"\n )\n medicion = \"M1\"\n zona = result[0][0]\n ciudad = result[0][1]\n tipo_agencia = result[0][2]\n cajero = df_cajeros.loc[df_cajeros[\"AGENCIA\"] == f\"{agencia}\"][\n \"COLABORADOR\"\n ].iloc[0]\n fecha = \"\"\n ingresador_id = 100\n supervisor_id = 100\n plantilla = generate_plantilla(arr_calf)\n encoded_JSON = json.dumps(plantilla, ensure_ascii=False)\n query_str += (\n \"(\"\n + f\"'{medicion}',\"\n + f\"'{zona}',\"\n + f\"'{ciudad}',\"\n + f\"'{agencia}',\"\n + f\"'{tipo_agencia}',\"\n + f\"'{cajero}',\"\n + f\"'{fecha}',\"\n + f\"{ingresador_id},\"\n + f\"{supervisor_id},\"\n + f\"'{encoded_JSON}'\"\n + \"),\"\n )\n # return query_str[:-1]\n # return updateQuery(query_str[:-1])\n # return query_str[:-1]\n result_query = updateQuery(query_str[:-1])\n print(result_query)\n return result_query\n\n except Exception as e:\n print(e)\n return False\n","repo_name":"R00rss/insight_us_white","sub_path":"backend-bcr/functions/manage_data/fantasma.py","file_name":"fantasma.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35674821499","text":"from PIL import Image\r\nimport numpy as np\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.metrics import confusion_matrix\r\nimport mnist, pandas as pd\r\nfrom tensorflow.keras import layers, Sequential, callbacks, optimizers, models\r\nimport matplotlib.pyplot as plt\r\n\r\n# x_train = mnist.train_images()\r\n# y_train = mnist.train_labels()\r\n# x_test = mnist.test_images()\r\n# y_test = mnist.test_labels()\r\nx_train = pd.read_csv('C:/Users/Val/Desktop/mnist/train.csv')\r\ny_train = x_train.pop('label')\r\nx_train = x_train.values\r\nx_test = pd.read_csv('C:/Users/Val/Desktop/mnist/test.csv')\r\n\r\nx_test = x_test.values\r\nx_train = x_train/256\r\nx_test = x_test/256\r\n\r\n\r\n# clf = MLPClassifier(solver='adam', activation='relu', hidden_layer_sizes=(64, 64))\r\n# clf.fit(x_train, y_train)\r\n# prediction = clf.predict(x_test)\r\n# prediction = pd.DataFrame(prediction)\r\n# prediction.to_csv('C:/Users/Val/Desktop/mnist/submission dnn sklearn.csv')\r\n# accuracy_1 = confusion_matrix(y_test, prediction).trace()/confusion_matrix(y_test, prediction).sum()\r\n\r\n\r\ndef build_model():\r\n dnn = Sequential([\r\n layers.Flatten(),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(10, activation='softmax')\r\n ])\r\n dnn.compile(loss='sparse_categorical_crossentropy', optimizer=optimizers.Adam(0.0001))\r\n dnn.fit(x_train, y_train, epochs=100, validation_split=0.2, verbose=0,\r\n callbacks=[callbacks.EarlyStopping(patience=10, monitor='val_loss')])\r\n\r\n\r\ndnn = models.load_model('mnist 9.model')\r\npredictions = dnn.predict(x_test)\r\npredictions = pd.DataFrame(np.array([np.argmax(predictions[i]) for i in range(len(predictions))]))\r\npredictions.to_csv('C:/Users/Val/Desktop/mnist/submission dnn keras.csv')\r\n\r\n","repo_name":"csvaldellon/Kaggle-Beginner-Competitions","sub_path":"mnist dnn.py","file_name":"mnist dnn.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42479978125","text":"# -*-coding:gb18030-*-\nfrom django.conf.urls.defaults import *\n\nimport settings\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n # Example:\n # (r'^tuan/', include('tuan.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n (r'^admin/', include(admin.site.urls)),\n\n (r'^static/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.STATIC_ROOT}),\n\n (r'^spider/', include('tuan.spider.urls')),\n)\n\nurlpatterns += patterns(\n 'views.views',\n # /category//page//\n (r'^$', 'tuan'),\n (r'^(?P\\d+)/$', 'tuan_page'),\n (r'^(?P[a-zA-Z\\-_]+)/$', 'tuan_city'),\n (r'^(?P[a-zA-Z\\-_]+)/page/(?P\\d+)/$', 'tuan_city_page'),\n (r'^(?P[a-zA-Z\\-_]+)/category/(?P\\d+)/$', 'tuan_city_category'),\n (r'^(?P[a-zA-Z\\-_]+)/category/(?P\\d+)/page/(?P\\d+)/$', 'tuan_city_category_page'),\n)\n","repo_name":"kebing/doudoutu","sub_path":"tuan/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"17333459939","text":"import pickle\r\nfrom pandas import DataFrame, get_dummies, read_csv\r\nfrom data import columns_regression, columns_cluster, dictGenre\r\nfrom sqlalchemy import create_engine\r\n\r\n### Import Spotipy API and authenticating\r\nimport spotipy\r\nfrom spotipy.oauth2 import SpotifyClientCredentials\r\ncid = '9d6662eb9b5d4385a41336390cd9053e'\r\nsecret = 'd36d9e2db9fb4da8b4e15cf72a9835ae'\r\nclient_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)\r\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\r\n\r\n### Connection to MySQL\r\nengine = create_engine('mysql://root:ray1581994@localhost/spotify?host=localhost?port=3306')\r\nconn = engine.connect()\r\ndataset = conn.execute(\"Select * from spotify.dataset\").fetchall()\r\ndataset = DataFrame(dataset,columns=['Unnamed: 0', 'track_id', 'track_name', 'artist_name', 'id', 'uri',\r\n 'track_href', 'analysis_url', 'year', 'danceability', 'energy', 'key',\r\n 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness',\r\n 'liveness', 'valence', 'tempo', 'type', 'duration_ms', 'time_signature',\r\n 'popularity', 'genre'])\r\n\r\nmodel_regression = pickle.load(open(\"regression.sav\",'rb'))\r\nmodel_clustering = pickle.load(open(\"clustering.sav\",\"rb\"))\r\nscaler = pickle.load(open('scaler.sav','rb'))\r\npca = pickle.load(open('pca.sav','rb'))\r\nkmeans = pickle.load(open(\"kmeans.sav\",\"rb\"))\r\n\r\ndef prediction_regression(data):\r\n df = DataFrame(data,index=[0])\r\n df[[\"danceability\",\"energy\",\"loudness\",\"speechiness\",\"acousticness\",\"instrumentalness\",\"liveness\",\"valence\",\"tempo\",\"duration_ms\"]] = df[[\"danceability\",\"energy\",\"loudness\",\"speechiness\",\"acousticness\",\"instrumentalness\",\"liveness\",\"valence\",\"tempo\",\"duration_ms\"]].astype(float)\r\n df = get_dummies(data=df,columns=[\"mode\",\"key\",'time_signature'])\r\n df = df.reindex(columns=columns_regression, fill_value=0)\r\n result = model_regression.predict(df)\r\n return int(round(result[0]))\r\n\r\ndef prediction_clustering(data):\r\n df_cluster = DataFrame(data,index=[0])\r\n df_cluster[[\"danceability\",\"energy\",\"loudness\",\"speechiness\",\"acousticness\",\"instrumentalness\",\"liveness\",\"valence\",\"tempo\",\"duration_ms\"]] = df_cluster[[\"danceability\",\"energy\",\"loudness\",\"speechiness\",\"acousticness\",\"instrumentalness\",\"liveness\",\"valence\",\"tempo\",\"duration_ms\"]].astype(float)\r\n df_cluster = get_dummies(data=df_cluster,columns=[\"mode\",\"key\",'time_signature'])\r\n df_cluster = df_cluster.reindex(columns=columns_cluster, fill_value=0)\r\n step1 = scaler.transform(df_cluster)\r\n step2 = pca.transform(step1)\r\n result = kmeans.predict(step2)\r\n convert = dictGenre[result[0]]\r\n return convert\r\n\r\ndef get_recommendation(genre):\r\n df_rec = dataset\r\n index_song = df_rec[df_rec[\"genre\"] == genre].sort_values(by=\"popularity\",ascending=False).head(5).index\r\n\r\n track_name = []\r\n artist_name = []\r\n track_id = []\r\n cover_link = []\r\n spotify_link = []\r\n\r\n for item in index_song:\r\n song = df_rec.iloc[item]\r\n track_name.append(song[\"track_name\"])\r\n artist_name.append(song[\"artist_name\"])\r\n track_id.append(song[\"track_id\"])\r\n spotify = \"https://open.spotify.com/track/\" + str(song[\"track_id\"])\r\n spotify_link.append(spotify)\r\n cover_link.append(sp.track(song[\"track_id\"])['album']['images'][1]['url'])\r\n \r\n return track_name, artist_name, track_id, cover_link, spotify_link\r\n\r\ndef prediction_song(data):\r\n id = sp.search(q=\"artist:{} track:{}\".format(data[\"artist_name\"],data[\"track_name\"]),type=\"track\",limit=1)['tracks'][\"items\"][0][\"id\"]\r\n audio_features = sp.audio_features(id)[0]\r\n data1 = DataFrame(audio_features,index=[0])\r\n data1 = data1.drop([\"type\",\"id\",\"uri\",\"track_href\",\"analysis_url\"],axis=1)\r\n data1[[\"key\",\"mode\",\"time_signature\"]] = data1[[\"key\",\"mode\",\"time_signature\"]].astype('object')\r\n data1 = get_dummies(data=data1,columns=[\"mode\",\"key\",'time_signature'])\r\n data1 = data1.reindex(columns=columns_regression, fill_value=0)\r\n result = model_regression.predict(data1)\r\n result = int(round(result[0]))\r\n cover = sp.track(id)['album']['images'][1]['url']\r\n spotify = \"https://open.spotify.com/track/\" + str(id)\r\n name = data[\"artist_name\"]\r\n track = data[\"track_name\"]\r\n return result, cover, spotify, name, track\r\n\r\n","repo_name":"rayefraim/JCDSBDG02-Final-Project","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28274141924","text":"import pyxel\r\nfrom random import randint, random\r\n\r\nfrom background import Background\r\nfrom enemy import ENEMY_BULLETS, Enemy\r\nfrom medium_boss import MEDIUM_ENEMY_BULLETS, MediumEnemy\r\nfrom player import Player\r\nfrom boss import Boss, BOSS_BULLETS\r\n\r\n\r\nclass Game:\r\n \"\"\"\r\n Cette class représente le jeu\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.player = Player()\r\n\r\n self.background = Background()\r\n\r\n self.enemies = [Enemy(0, 20, 1)]\r\n self.medium_enemies = []\r\n\r\n self.score = 0\r\n\r\n # On stocke le nombre de temps pendant lequel aucun enemies n'est apparu\r\n self.since_last_enemy_spawn = 0\r\n self.since_last_medium_enemy_spawn = 0\r\n\r\n self.boss = None\r\n # TODO\r\n self.BOSS_INTERVAL = 30 * 30\r\n self.boss_timeout = self.BOSS_INTERVAL\r\n\r\n def spawn_enemies(self):\r\n \"\"\"\r\n Fait apparaitre des enemies\r\n \"\"\"\r\n if (self.boss is None) and random() < 0.02 or self.since_last_enemy_spawn > 120: # 2% de chance ou si aucun\r\n # enemy n'est apparu pendant 1s\r\n self.enemies.append(Enemy(randint(0, 128), -10, 1))\r\n self.since_last_enemy_spawn = 0\r\n elif self.boss is None:\r\n self.since_last_enemy_spawn += 1\r\n\r\n if (self.boss is None) and random() < 0.005 or self.since_last_medium_enemy_spawn > 240: # 0.5% de chance ou\r\n # si aucun ennemi n'est apparu pendant 4s\r\n self.medium_enemies.append(MediumEnemy(-10, randint(0, 25), 1))\r\n elif self.boss is None:\r\n self.since_last_enemy_spawn += 1\r\n\r\n def spawn_boss(self):\r\n \"\"\"\r\n Fait apparaitre un boss\r\n \"\"\"\r\n if not (self.boss is None):\r\n return\r\n\r\n if self.boss_timeout < 1:\r\n self.boss = Boss(64, -10, 0.5, 2)\r\n self.boss_timeout = self.BOSS_INTERVAL\r\n else:\r\n self.boss_timeout -= 1\r\n\r\n def boss_killed(self):\r\n \"\"\"\r\n Cette fonction est appelée quand le boss meurt\r\n \"\"\"\r\n self.boss = None\r\n\r\n self.player.life = self.player.MAX_LIFE\r\n\r\n self.score += 100\r\n\r\n def draw(self):\r\n # Si la vie du joueur est à 0, on dit \"game over\"\r\n if self.player.life <= 0:\r\n\r\n score_text = str(self.score)\r\n\r\n pyxel.text(47 - ((len(score_text) - 1) * 3), 20, f\"Score: {score_text}\", 7)\r\n pyxel.text(45, 62, \"GAME OVER\", 7)\r\n pyxel.text(35, 90, \"Appuyez sur [F]\", 7)\r\n pyxel.text(35, 100, \"pour recommencer\", 7)\r\n\r\n return\r\n\r\n # On dessine le fond\r\n self.background.draw()\r\n\r\n # On dessine les enemies\r\n for enemy in self.enemies:\r\n enemy.draw()\r\n\r\n for enemy in self.medium_enemies:\r\n enemy.draw()\r\n\r\n if not (self.boss is None):\r\n self.boss.draw()\r\n\r\n self.draw_bullets()\r\n\r\n # On dessine le joueur en dernier\r\n self.player.draw()\r\n\r\n # On dessine la vie du joueur\r\n self.draw_life()\r\n\r\n # On dessine le score\r\n pyxel.text(10, 10, f\"Score: {self.score}\", 7)\r\n\r\n # Alerte de boss\r\n if self.boss_timeout <= 30 * 4: # 4s avant :\r\n pyxel.text(30, 64, \"Boss en approche !!\", 7)\r\n\r\n @staticmethod\r\n def draw_bullets():\r\n \"\"\"\r\n Cette fonction permet de dessiner toutes les balles enemies\r\n \"\"\"\r\n for bullet in ENEMY_BULLETS:\r\n bullet.draw()\r\n\r\n for bullet in MEDIUM_ENEMY_BULLETS:\r\n bullet.draw()\r\n\r\n for bullet in BOSS_BULLETS:\r\n bullet.draw()\r\n\r\n def draw_life(self):\r\n \"\"\"\r\n Cette fonction dessine la vie du joueur\r\n \"\"\"\r\n for i in range(0, self.player.life):\r\n pyxel.blt(10 + (5 + 6) * i, 113, 0, 0, 19, 7, 5, colkey=0)\r\n\r\n def update(self):\r\n # Si le joueur est mort, on stop l'update\r\n if self.player.life <= 0:\r\n return\r\n\r\n # on met à jour le joueur\r\n self.player.update()\r\n\r\n self.spawn_boss()\r\n\r\n # On met à jour le Background\r\n self.background.update()\r\n\r\n # On met à jour les enemies\r\n for enemy in self.enemies:\r\n enemy.update(self.player)\r\n\r\n for enemy in self.medium_enemies:\r\n enemy.update(self.player)\r\n\r\n if not (self.boss is None):\r\n self.boss.update(self.player)\r\n\r\n # On vérifie les balles et les enemies\r\n self.bullets_kills()\r\n self.update_bullets()\r\n self.check_enemies()\r\n\r\n # Apparaitre les enemies\r\n self.spawn_enemies()\r\n\r\n @staticmethod\r\n def update_bullets():\r\n \"\"\"\r\n Cette fonction met à jour les balles\r\n \"\"\"\r\n bullets_to_remove = []\r\n for i in range(0, len(ENEMY_BULLETS)):\r\n bullet = ENEMY_BULLETS[i]\r\n if not (0 <= bullet.y <= 128):\r\n bullets_to_remove.append(i)\r\n else:\r\n bullet.update()\r\n\r\n # On nettoie\r\n for i in range(0, len(bullets_to_remove)):\r\n ENEMY_BULLETS.pop(bullets_to_remove[i] - i)\r\n\r\n medium_bullets_to_remove = []\r\n for i in range(0, len(MEDIUM_ENEMY_BULLETS)):\r\n bullet = MEDIUM_ENEMY_BULLETS[i]\r\n if not (0 <= bullet.y <= 128):\r\n medium_bullets_to_remove.append(i)\r\n else:\r\n bullet.update()\r\n\r\n # On nettoie\r\n for i in range(0, len(medium_bullets_to_remove)):\r\n MEDIUM_ENEMY_BULLETS.pop(medium_bullets_to_remove[i] - i)\r\n\r\n boss_bullets_to_remove = []\r\n\r\n for i in range(0, len(BOSS_BULLETS)):\r\n bullet = BOSS_BULLETS[i]\r\n if not (0 <= bullet.y <= 128):\r\n boss_bullets_to_remove.append(i)\r\n else:\r\n bullet.update()\r\n\r\n # On nettoie\r\n for i in range(0, len(boss_bullets_to_remove)):\r\n BOSS_BULLETS.pop(boss_bullets_to_remove[i] - i)\r\n\r\n def bullets_kills(self):\r\n \"\"\"\r\n Cette fonction permet de détruire les enemies si une balle du joueur les touches\r\n \"\"\"\r\n bullets_to_remove = []\r\n\r\n for i in range(0, len(self.player.bullets)):\r\n bullet = self.player.bullets[i]\r\n\r\n for enemy in self.enemies:\r\n if (enemy.x <= bullet.x <= enemy.x + 9) and (enemy.y <= bullet.y <= enemy.y + 7):\r\n # La balle a touché l'ennemi\r\n # On le supprime\r\n self.enemies.remove(enemy)\r\n self.score += 1\r\n bullets_to_remove.append(i)\r\n\r\n for enemy in self.medium_enemies:\r\n if (enemy.x <= bullet.x <= enemy.x + 9) and (enemy.y <= bullet.y <= enemy.y + 7):\r\n # La balle a touché l'ennemi\r\n enemy.life -= 1\r\n if enemy.life < 1:\r\n self.medium_enemies.remove(enemy)\r\n self.score += 5\r\n bullets_to_remove.append(i)\r\n\r\n if not (self.boss is None) and (self.boss.x <= bullet.x <= self.boss.x + 47) and (\r\n self.boss.y <= bullet.y <= self.boss.y + 43):\r\n self.boss.life -= 1\r\n if self.boss.life < 1:\r\n self.boss_killed()\r\n\r\n # On retire ces balles\r\n for bullet in bullets_to_remove:\r\n self.player.bullets.pop(bullet)\r\n\r\n def check_enemies(self):\r\n \"\"\"\r\n Cette fonction vérifie si un enemies a atteint le fond OU s'il a touché le joueur.\r\n Elle vérifie également si une balle d'un enemies a touché le joueur\r\n \"\"\"\r\n\r\n enemies_to_remove = []\r\n\r\n for enemy in self.enemies:\r\n if enemy.y > 138:\r\n # Il est en dehors !\r\n enemies_to_remove.append(enemy)\r\n self.player.life -= 1\r\n\r\n if (enemy.y <= self.player.y <= enemy.y + 9) and (enemy.x <= self.player.x <= enemy.x + 7):\r\n # Un ennemi a touché le joueur\r\n self.player.life -= 1\r\n\r\n medium_enemies_to_remove = []\r\n\r\n for enemy in self.medium_enemies:\r\n if enemy.y > 138:\r\n # Il est en dehors !\r\n medium_enemies_to_remove.append(enemy)\r\n self.player.life -= 1\r\n\r\n if (enemy.y <= self.player.y <= enemy.y + 9) and (enemy.x <= self.player.x <= enemy.x + 7):\r\n # Un ennemi a touché le joueur\r\n self.player.life -= 1\r\n\r\n # On retire ces balles\r\n for bullet in medium_enemies_to_remove:\r\n self.medium_enemies.remove(bullet)\r\n\r\n # On vérifie les balles\r\n bullets_to_remove = []\r\n\r\n for bullet in ENEMY_BULLETS:\r\n if (self.player.x <= bullet.x <= self.player.x + 9) and (\r\n self.player.y <= bullet.y <= self.player.y + 7):\r\n self.player.life -= 1\r\n bullets_to_remove.append(bullet)\r\n\r\n # On retire ces balles\r\n for bullet in bullets_to_remove:\r\n ENEMY_BULLETS.remove(bullet)\r\n\r\n medium_bullets_to_remove = []\r\n for bullet in MEDIUM_ENEMY_BULLETS:\r\n if (self.player.x <= bullet.x <= self.player.x + 9) and (\r\n self.player.y <= bullet.y <= self.player.y + 7):\r\n self.player.life -= 1\r\n medium_bullets_to_remove.append(bullet)\r\n\r\n # On retire ces balles\r\n for bullet in medium_bullets_to_remove:\r\n MEDIUM_ENEMY_BULLETS.remove(bullet)\r\n\r\n # On vérifie les balles\r\n boss_bullets_to_remove = []\r\n\r\n for bullet in BOSS_BULLETS:\r\n if (self.player.x <= bullet.x <= self.player.x + 9) and (\r\n self.player.y <= bullet.y <= self.player.y + 7):\r\n self.player.life -= 1\r\n boss_bullets_to_remove.append(bullet)\r\n\r\n # On retire ces balles\r\n for bullet in boss_bullets_to_remove:\r\n BOSS_BULLETS.remove(bullet)\r\n","repo_name":"Sedorikku1949/PyxelCuvier","sub_path":"6_metres_a_3/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20305727088","text":"from rest_framework import status\nfrom rest_framework.test import APIClient\nfrom django.contrib.auth.models import User\nfrom store.models import Collection\nfrom model_bakery import baker\nimport pytest\n\n\n@pytest.fixture\ndef authenticate(api_client):\n def do_authenticate(userObject):\n return api_client.force_authenticate(user=userObject)\n return do_authenticate\n\n@pytest.fixture\ndef create_collection(api_client):\n def do_create_collection(collection):\n return api_client.post('http://127.0.0.1:8000/store/collections/',collection)\n return do_create_collection\n\n@pytest.mark.django_db\nclass TestCreateCollection:\n #@pytest.mark.skip\n def test_if_user_is_anonymous_return_401(self, create_collection):\n #Arrange- nothing here\n #Act\n\n response=create_collection({'title':'a'})\n\n #Assert\n assert response.status_code==status.HTTP_401_UNAUTHORIZED\n\n\n def test_if_user_is_not_admin_return_403(self, authenticate, create_collection):\n #Arrange- nothing here\n #Act\n\n authenticate({})\n response=create_collection({'title':'a'})\n\n #Assert\n assert response.status_code==status.HTTP_403_FORBIDDEN\n\n\n def test_if_data_is_invalid_return_400(self,authenticate, create_collection):\n #Arrange- nothing here\n #Act\n\n authenticate(User(is_staff=True))\n response=create_collection({'title':''})\n\n #Assert\n assert response.status_code==status.HTTP_400_BAD_REQUEST\n assert response.data['title'] is not None\n\n\n def test_if_data_is_valid_return_201(self,authenticate, create_collection):\n #Arrange- nothing here\n #Act\n\n authenticate(User(is_staff=True))\n response=create_collection({'title':'a'})\n\n #Assert\n assert response.status_code==status.HTTP_201_CREATED\n assert response.data['id']>0\n\n\n@pytest.mark.django_db\nclass TestRetrieveCollection:\n def test_if_collection_exists_return_200(self, api_client):\n collection=baker.make(Collection)\n response=api_client.get(f'http://127.0.0.1:8000/store/collections/{collection.id}/')\n assert response.status_code==status.HTTP_200_OK\n assert response.data=={\n 'id':collection.id,\n 'title':collection.title,\n 'products_count':0\n }\n\n ","repo_name":"maryam-mouzarani/django-practice","sub_path":"store/tests/test_collections.py","file_name":"test_collections.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36635530878","text":"import sys, os\nsys.path.append('./discoverlib')\nfrom discoverlib import graph\nfrom discoverlib import geom\n\nBRANCH_THRESHOLD = 15\nLOOP_THRESHOLD = 50\n\nclass Group(object):\n\tdef __init__(self):\n\t\tself.l = []\n\n\tdef add(self, x):\n\t\tif x not in self.l:\n\t\t\tself.l.append(x)\n\n\tdef update(self, other):\n\t\tfor x in other.l:\n\t\t\tself.add(x)\n\n\tdef head(self):\n\t\treturn self.l[0]\n\n\tdef __iter__(self):\n\t\treturn iter(self.l)\n\n\tdef __len__(self):\n\t\treturn len(self.l)\n\ndef func(in_fname, out_fname):\n\n\tg = graph.read_graph(in_fname)\n\n\tbad_edges = set()\n\tmerge_vertices = {}\n\tmerge_groups = []\n\n\troad_segments, _ = graph.get_graph_road_segments(g)\n\tedge_index = g.edgeIndex()\n\n\t# prune short branches\n\tfor rs in road_segments:\n\t\tif (len(rs.dst().out_edges) < 2 or len(rs.src().out_edges) < 2) and rs.length() < BRANCH_THRESHOLD:\n\t\t\tfor edge in rs.edges:\n\t\t\t\tbad_edges.add(edge)\n\n\t# merge short loops\n\tfor rs in road_segments:\n\t\tif rs.length() < LOOP_THRESHOLD:\n\t\t\tif rs.src() in merge_vertices and rs.dst() in merge_vertices:\n\t\t\t\tgroup = merge_vertices[rs.src()]\n\t\t\t\tdst_group = merge_vertices[rs.dst()]\n\t\t\t\tif group != dst_group:\n\t\t\t\t\tgroup.update(dst_group)\n\t\t\t\t\tfor vertex in dst_group:\n\t\t\t\t\t\tmerge_vertices[vertex] = group\n\t\t\telif rs.src() in merge_vertices:\n\t\t\t\tgroup = merge_vertices[rs.src()]\n\t\t\t\tgroup.add(rs.dst())\n\t\t\t\tmerge_vertices[rs.dst()] = group\n\t\t\telif rs.dst() in merge_vertices:\n\t\t\t\tgroup = merge_vertices[rs.dst()]\n\t\t\t\tgroup.add(rs.src())\n\t\t\t\tmerge_vertices[rs.src()] = group\n\t\t\telse:\n\t\t\t\tgroup = Group()\n\t\t\t\tgroup.add(rs.src())\n\t\t\t\tgroup.add(rs.dst())\n\t\t\t\tmerge_vertices[rs.src()] = group\n\t\t\t\tmerge_vertices[rs.dst()] = group\n\t\t\t\tmerge_groups.append(group)\n\t\t\tfor edge in rs.edges:\n\t\t\t\tmerge_vertices[edge.src] = group\n\t\t\t\tmerge_vertices[edge.dst] = group\n\t\t\t\tgroup.add(edge.src)\n\t\t\t\tgroup.add(edge.dst)\n\n\tdef get_avg(group):\n\t\tpoint_sum = geom.Point(0, 0)\n\t\tfor vertex in group:\n\t\t\tpoint_sum = point_sum.add(vertex.point)\n\t\treturn point_sum.scale(1.0 / len(group))\n\n\tng = graph.Graph()\n\tvertex_map = {}\n\n\tdef get_vertex(vertex):\n\t\tif vertex in merge_vertices:\n\t\t\tgroup = merge_vertices[vertex]\n\t\t\tgroup_head = group.head()\n\t\t\tif group_head not in vertex_map:\n\t\t\t\tvertex_map[group_head] = ng.add_vertex(get_avg(group))\n\t\t\treturn vertex_map[group_head]\n\t\telse:\n\t\t\tif vertex not in vertex_map:\n\t\t\t\tvertex_map[vertex] = ng.add_vertex(vertex.point)\n\t\t\treturn vertex_map[vertex]\n\n\tfor edge in g.edges:\n\t\tif edge in bad_edges:\n\t\t\tcontinue\n\t\tsrc = get_vertex(edge.src)\n\t\tdst = get_vertex(edge.dst)\n\t\tif src == dst:\n\t\t\tcontinue\n\t\tng.add_edge(src, dst)\n\n\tng.save(out_fname)\n\nif __name__ == '__main__':\n\tos.popen('mkdir out_graph_big_uf_clean')\n\timport glob\n\tfiles = glob.glob('out_graph_big_uf/*')\n\tfor file in files:\n\t\tprint(file)\n\t\tfunc(file, file.replace('out_graph_big_uf', 'out_graph_big_uf_clean'))\n\n\n\n\n","repo_name":"lizuoyue/ETH-Thesis","sub_path":"eval_road/2-clean.py","file_name":"2-clean.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"47"} +{"seq_id":"30212550482","text":"# IMPORT MODULES\nimport os\nimport sys\n# IMPORT qt_core\nfrom qt_core import *\n# IMPORT MAIN WINDOW\nfrom gui.windows.main_window.ui_main_window import *\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"Curso de Python e PySide6\")\n\n # SETUP MAIN WINDOW\n self.ui = UI_MainWindow()\n self.ui.setup_ui(self)\n\n # Toggle button\n self.ui.toggle_button.clicked.connect(self.toggle_botton)\n\n # Btn home\n self.ui.btn_1.clicked.connect(self.show_page_1)\n\n # Btn widgets\n self.ui.btn_2.clicked.connect(self.show_page_2)\n\n # Btn settings\n self.ui.settings_btn.clicked.connect(self.show_page_3)\n\n # EXIBI A NOSSA APLICAÇÃO\n self.show()\n\n def reset_selection(self):\n for btn in self.ui.left_menu.findChildren(QPushButton):\n try:\n btn.set_active(False)\n except:\n pass\n\n def show_page_1(self):\n self.reset_selection()\n self.ui.pages.setCurrentWidget(self.ui.ui_pages.page_1)\n self.ui.btn_1.set_active(True)\n\n def show_page_2(self):\n self.reset_selection()\n self.ui.pages.setCurrentWidget(self.ui.ui_pages.page_2)\n self.ui.btn_2.set_active(True)\n\n def show_page_3(self):\n self.reset_selection()\n self.ui.pages.setCurrentWidget(self.ui.ui_pages.page_3)\n self.ui.settings_btn.set_active(True)\n\n def toggle_botton(self):\n # Get menu width\n menu_width = self.ui.left_menu.width()\n\n # Check with\n width = 50\n if menu_width == 50:\n width = 240\n\n # Start animation\n self.animation = QPropertyAnimation(self.ui.left_menu, b\"minimumWidth\")\n self.animation.setStartValue(menu_width)\n self.animation.setEndValue(width)\n self.animation.setDuration(500)\n self.animation.setEasingCurve(QEasingCurve.InOutCirc)\n self.animation.start()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = MainWindow()\n sys.exit(app.exec())\n","repo_name":"tiaonazario/WM_Curso_PySide6","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"36679886283","text":"import re\nimport pymorphy2\n\nfrom readFromFile import readFromFile\n\n\ndef Training():\n counter = {}\n\n line = readFromFile().split()\n for word in line:\n counter[word] = counter.get(word, 0) + 1\n string = ''\n for j in counter:\n if counter[j] >= 10:\n temp = re.sub('(\\W|[0-9])', '', j)\n if temp and len(temp) > 1:\n string = string + temp + '\\n'\n\n actTraining = int(input('Какой язык в обычающей выборке?:\\n'\n '1 - Русский\\n'\n '2 - Английский\\n'))\n if actTraining == 1:\n SaveTraining(string, 1)\n elif actTraining == 2:\n SaveTraining(string, 2)\n\n\ndef train_for_short_word():\n counter = {}\n\n line = readFromFile().split()\n for word in line:\n counter[word] = counter.get(word, 0) + 1\n string = ''\n for j in counter:\n if counter[j] >= 10:\n temp = re.sub('(\\W|[0-9])', '', j)\n if temp and 1 < len(temp) <= 10:\n morph = pymorphy2.MorphAnalyzer()\n word_morph = morph.parse(temp)\n string = string + word_morph[0].lexeme[0].word + '\\n'\n\n actTraining = int(input('Какой язык в обычающей выборке?:\\n'\n '1 - Русский\\n'\n '2 - Английский\\n'))\n if actTraining == 1:\n SaveLexemeTraining(string, 1)\n elif actTraining == 2:\n SaveLexemeTraining(string, 2)\n\n\ndef SaveTraining(string, type):\n if type == 1:\n file = open(\"Training/frequencyMethodRussian.txt\", \"w\")\n file.write(str(string))\n file.close()\n elif type == 2:\n file = open(\"Training/frequencyMethodEnglish.txt\", \"w\")\n file.write(str(string))\n file.close()\n\n\ndef SaveLexemeTraining(string, type):\n if type == 1:\n file = open(\"Training/shortMethodRussian.txt\", \"w\")\n file.write(str(string))\n file.close()\n elif type == 2:\n file = open(\"Training/shortMethodEnglish.txt\", \"w\")\n file.write(str(string))\n file.close()\n","repo_name":"EugeniVoitehovski/EYAZIS2","sub_path":"Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9221703066","text":"from project import db\nfrom project.api.misc.enums import genres_list\n\n\nclass Genre(db.Model):\n __tablename__ = \"Genre\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Enum(*genres_list, name=\"genres\"), nullable=False, unique=True)\n\n\nGenreArtist = db.Table(\n \"GenreArtist\",\n db.Column(\"artist_id\", db.Integer, db.ForeignKey(\"Artist.id\"), primary_key=True),\n db.Column(\"genre_id\", db.Integer, db.ForeignKey(\"Genre.id\"), primary_key=True),\n)\n\n\nGenreVenue = db.Table(\n \"GenreVenue\",\n db.Column(\"venue_id\", db.Integer, db.ForeignKey(\"Venue.id\"), primary_key=True),\n db.Column(\"genre_id\", db.Integer, db.ForeignKey(\"Genre.id\"), primary_key=True),\n)\n","repo_name":"MaxBoykoII/fyyur-tdd","sub_path":"project/api/misc/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31935683922","text":"import json\n\n\ndef get_ntp_global(**session_dict):\n r = session_dict['s'].get(session_dict['url'] + \"config/ntp\", verify=False)\n if r.ok:\n return r\n else:\n print(f\"HTTP Code: {r.status_code} \\n {r.reason} \\n Message {r.text}\")\n return r\n\n\ndef put_ntp_global(enable: bool, is_broadcast: bool = True, max_association=8, **session_dict):\n data = json.dumps({\n 'broadcast': is_broadcast,\n 'max-association': {'max-association_value': max_association},\n 'enable': enable})\n\n r = session_dict['s'].put(session_dict['url'] + \"config/ntp\", data=data, verify=False)\n if r.ok:\n return r\n else:\n print(f\"HTTP Code: {r.status_code} \\n {r.reason} \\n Message {r.text}\")\n return r\n\n\ndef get_ntp_server(address='', **session_dict):\n if address != '':\n target_url = f\"{session_dict['url']}config/ntp/server/ip4addr/{address}\"\n else:\n target_url = session_dict['url'] + \"config/ntp/server/ip4addr\"\n r = session_dict['s'].get(target_url, verify=False)\n if r.ok:\n return r\n else:\n print(f\"HTTP Code: {r.status_code} \\n {r.reason} \\n Message {r.text}\")\n return r\n\n\ndef post_ntp_server(address, min_pol=6, max_pol=10, is_burst=False, is_iburst=False, is_oobm=False, **session_dict):\n data = {'ip4addr': {'ip4addr_value': address,\n 'ip4addr_reference': {'min-poll': {'min-poll_value': min_pol},\n 'max-poll': {'max-poll_value': max_pol}}}}\n if is_iburst and not is_burst:\n data['iburst'] = True\n elif is_burst and not is_iburst:\n data['iburst'] = True\n\n data = json.dumps(data)\n r = session_dict['s'].post(session_dict['url'] + \"config/ntp/server/ip4addr\", data=data, verify=False)\n if r.ok:\n return r\n else:\n print(f\"HTTP Code: {r.status_code} \\n {r.reason} \\n Message {r.text}\")\n return r\n\n\ndef delete_ntp_server(address, **session_dict):\n r = session_dict['s'].delete(session_dict['url'] + f\"config/ntp/server/ip4addr/{address}\", verify=False)\n if r.ok:\n return r\n else:\n print(f\"HTTP Code: {r.status_code} \\n {r.reason} \\n Message {r.text}\")\n return r\n","repo_name":"Linkk93/aos_api_connector","sub_path":"aos_s/api_ntp.py","file_name":"api_ntp.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"19805271499","text":"#!/usr/bin/env python\nimport sys\nfrom rq import Connection, Worker\nimport os\n\nfrom retrobiocat_web.app.app import create_app\n\nscheduler = os.environ.get('SCHEDULER') or False\nproduction_mode = os.environ.get('PRODUCTION') or False\n\nif __name__ == '__main__':\n app = create_app(use_talisman=production_mode)\n app.app_context().push()\n\n with Connection(app.redis):\n qs = sys.argv[1:] or ['tasks', 'network', 'pathway', 'db', 'process_blasts',\n 'alignment', 'blast', 'preprocess', 'osra']\n if 'auto_jobs' in qs:\n scheduler = True\n w = Worker(qs, log_job_description=False)\n w.work(with_scheduler=scheduler)\n","repo_name":"willfinnigan/RetroBioCat","sub_path":"retrobiocat_web/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"47"} +{"seq_id":"20127350428","text":"K, J, H = input().split()\nnewK, newJ, newH = input().split()\nK=int(K)\nJ=int(J)\nH=int(H)\nnewK=int(newK)\nnewJ=int(newJ)\nnewH=int(newH)\nK+=newK\nJ+=newJ\nH+=newH\nprint(K, J, H)","repo_name":"kauacdias/python","sub_path":"Lista 1 - Lógica - Lógica de Programação com Python (UFBA)/Exercício 2/atividade2_lista1.py","file_name":"atividade2_lista1.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25442704344","text":"import sys\ninput = sys.stdin.readline\n\nn, p = map(int, input().split())\n\nstack = [[] for _ in range(8)]\ncnt = 0\n\nfor _ in range(n):\n a, b = map(int, input().split())\n while True:\n if stack[a] == []:\n stack[a].append(b)\n cnt += 1\n break\n elif stack[a][-1] == b:\n break\n elif stack[a][-1] > b:\n stack[a].pop()\n cnt += 1\n continue\n elif stack[a][-1] < b:\n stack[a].append(b)\n cnt += 1\n break\n\nprint(cnt)","repo_name":"sungmin-99/HNU_Algorithm_Study","sub_path":"sungmin/5week/2841_외계인의 기타 연주.py","file_name":"2841_외계인의 기타 연주.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"75034477582","text":"# Add required imports\nimport random\nimport math\n\n# Add non-required imports\nimport time\n\n\ndef slowfun_too_slow(x, y):\n v = math.pow(x, y)\n v = math.factorial(v)\n v //= x + y\n v %= 982451653\n\n return v\n\n\n# Gotta have webster\nwebster = {}\n\n\ndef slowfun(x, y):\n \"\"\"\n Rewrite slowfun_too_slow() in here so that the program produces the same\n output, but completes quickly instead of taking ages to run.\n \"\"\"\n if (x, y) not in webster:\n webster[x, y] = slowfun_too_slow(x, y)\n return webster[x, y]\n\n\n##############################################################################\n# Do not modify below this line!\n\n# I'm ignoring that instruction above ^^\nstart = time.time()\n\n# I'll leave this alone\nfor i in range(50000):\n x = random.randrange(2, 14)\n y = random.randrange(3, 6)\n print(f\"{i}: {x},{y}: {slowfun(x, y)}\")\n\n# Again, I'm adding code in the forbidden area\nend = time.time()\ntotal = end - start\nprint(f\"Time it took to run (seconds): {total:.2f}\")\n\n#######################################\n# Time it took to run (seconds): 3.07 #\n#######################################\n","repo_name":"jacobpad/cs-hash-tables","sub_path":"applications/01_lookup_table/lookup_table.py","file_name":"lookup_table.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14870681693","text":"import requests\nfrom bs4 import BeautifulSoup\nr = requests.get('https://atcoder.jp/contests/abc144/tasks/abc144_b')\nsoup = BeautifulSoup(r.text, \"lxml\")\ntags = soup.find_all('h3', text=lambda t: t and 'Sample' in t)\ni, j = 1, 1\nfor tag in tags:\n # print(tag.find_next_sibling().text)\n # print(\"---------------------------------\")\n if i % 2:\n f = open(\"Input%d\" % j, \"w+\")\n else:\n f = open(\"Output%d\" % j, \"w+\")\n j += 1\n f.write(tag.find_next_sibling().text)\n f.close()\n i += 1\n","repo_name":"pianoft/PythonPrograms","sub_path":"tester/tester2.py","file_name":"tester2.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38084719809","text":"import numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\n\ntrack = cv.imread(r'src\\centreline_gen\\output\\hockenheim_widths.png',cv.IMREAD_GRAYSCALE)\n\ndef extract_centres(img:np.ndarray):\n visited = {}\n stack = []\n directions = [(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1)]\n centreline = []\n \n\n start_y = img.shape[1] // 2 - 120\n start_x = 0\n while img[start_x][start_y] == 0.0:\n start_x += 1\n \n start_point = (start_x,start_y)\n\n\n stack.append(start_point)\n while stack:\n point = stack.pop()\n if point not in visited:\n visited[point] = True\n centreline.append(point)\n for vec in directions:\n new_point = (point[0]+vec[0],point[1]+vec[1])\n if img[new_point[0],new_point[1]] != 0.0 and new_point not in visited:\n stack.append(new_point)\n\n return centreline\n\ncentreline = np.array(extract_centres(track))\n\n\n\ncentreline = np.vstack([centreline, centreline[0]])\n\nx = centreline[::,0]\ny = centreline[::,1]\n\ntck,u = interpolate.splprep([x,y], s=0,per=True)\nxi, yi = interpolate.splev(np.linspace(0, 1, 1000), tck)\n\n# plot the result\nfig, ax = plt.subplots(1, 1)\nax.plot(x, y, 'or')\nax.plot(xi, yi, '-b')\nplt.show()","repo_name":"csprono/raceline-opt","sub_path":"src/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30747828262","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"countAssembly.py: gathers stats on assembled FASTA files\n\"\"\"\n\n__author__ = \"Richard Allen White III, Jose Figueroa\"\n__copyright__ = \"Copyright 2021\"\n__version__ = \"0.3\"\n__maintainer__ = \"Jose Figueroa\"\n__email__ = \"jlfiguer@uncc.edu\"\n__status__ = \"Production\"\n\nimport os\nimport argparse\nimport re\nimport math\n\n\n# Global variables\nFILE_EXT = [\".fasta\", \".fa\", \".fna\", \".ffn\"]\n\n\n## main\ndef main():\n ## Parse the command line\n def typePath(path):\n return (os.path.abspath(os.path.expanduser(path)))\n\n parser = argparse.ArgumentParser(add_help=False)\n required = parser.add_argument_group('required arguments')\n required.add_argument(\"-i\", \"--interval\", help = \"interval size in # of residues\", type=int, required = True)\n required.add_argument(\"-f\", \"--fasta\", help = \"fasta file or folder\", type=typePath, required = True)\n optional = parser.add_argument_group('optional arguments')\n optional.add_argument(\"-r\", \"--ref\", help = \"reference genome\")\n optional.add_argument(\"-s\", \"--size\", help = \"reference genome size\")\n optional.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show this help message and exit\")\n args = parser.parse_args()\n\n countFasta(args.fasta, args.interval, args.size)\n\n return 0\n\n\ndef countFasta(assembly, interval, genomeSize):\n files = []\n if os.path.isfile(assembly):\n files = [assembly]\n elif os.path.isdir(assembly):\n files = [os.path.join(assembly, f) for f in os.listdir(assembly) if os.path.splitext(f)[1] in FILE_EXT]\n else:\n print(\"Not a valid fasta file or directory\")\n return\n\n numSeq = 0\n gcCount = 0\n lengthInter = {}\n seqLengths = []\n for filename in files:\n with open(filename) as fasta:\n id = None\n seqLen = 0\n for line in fasta:\n line = line.strip()\n if line.startswith('>'):\n # Next sequence in file\n if id is not None:\n seqLengths.append(seqLen)\n inter = math.floor( seqLen/interval )\n if inter not in lengthInter:\n lengthInter[inter] = 0\n lengthInter[inter] += 1\n # Sequence Basic Info\n numSeq += 1\n id = line[1:]\n seqLen = 0\n elif re.search(r'^[\\w]', line) and id is not None:\n # Sequence data\n seqLen += len(line)\n gcCount += len(re.findall('[GC]', line))\n # Incorporate totals from last sequence in file\n seqLengths.append(seqLen)\n inter = math.floor( seqLen/interval )\n if inter not in lengthInter:\n lengthInter[inter] = 0\n lengthInter[inter] += 1\n\n\n # Calclulate N25, N50, and N75 and counts\n seqLengths.sort(reverse=True)\n maxSeq = seqLengths[0]\n minSeq = seqLengths[-1]\n \n # N Stats\n L25 = 0\n N25 = 0\n frac_covered = totalLength = sum(seqLengths)\n while frac_covered > totalLength*0.75:\n N25 = seqLengths[L25]\n L25 += 1\n frac_covered -= N25\n\n L50 = 0\n N50 = 0\n frac_covered = totalLength = sum(seqLengths)\n while frac_covered > totalLength*0.5:\n N50 = seqLengths[L50]\n L50 += 1\n frac_covered -= N50\n \n L75 = 0\n N75 = 0\n frac_covered = totalLength = sum(seqLengths)\n while frac_covered > totalLength*.25:\n N75 = seqLengths[L75]\n L75 += 1\n frac_covered -= N75\n \n L90 = 0\n N90 = 0\n frac_covered = totalLength = sum(seqLengths)\n while frac_covered > totalLength*.1:\n N90 = seqLengths[L90]\n L90 += 1\n frac_covered -= N90\n\n # NG Stats\n if genomeSize is None:\n genomeSize = totalLength\n LG25 = 0\n NG25 = 0\n frac_covered = genomeSize\n while frac_covered > genomeSize*0.75:\n NG25 = seqLengths[LG25]\n LG25 += 1\n frac_covered -= NG25\n\n LG50 = 0\n NG50 = 0\n frac_covered = genomeSize\n while frac_covered > genomeSize*0.5:\n NG50 = seqLengths[LG50]\n LG50 += 1\n frac_covered -= NG50\n \n LG75 = 0\n NG75 = 0\n frac_covered = genomeSize\n while frac_covered > genomeSize*.25:\n NG75 = seqLengths[LG75]\n LG75 += 1\n frac_covered -= NG75\n \n LG90 = 0\n NG90 = 0\n frac_covered = genomeSize\n while frac_covered > genomeSize*.1:\n NG90 = seqLengths[LG90]\n LG90 += 1\n frac_covered -= NG90\n\n # Print out the results\n print(\"\")\n ints = sorted(lengthInter.keys())\n i = ints[0]\n while i <= ints[-1]:\n if i not in lengthInter:\n lengthInter[i] = 0\n if lengthInter[i] > 0:\n print(f\"{i*interval}:{i*interval+interval-1}\\t{lengthInter[i]}\")\n i += 1\n \n print (f\"\\nTotal length of sequence:\\t{totalLength} bp\")\n print (f\"Total number of contigs:\\t{numSeq}\")\n print (f\"Max sequence length:\\t{maxSeq}\")\n print (f\"Min sequence length:\\t{minSeq}\")\n\n print (f\"\\nN25 stats:\\t\\t\\t25% of total sequence length is contained in the (L25) {L25} sequences >= {N25} bp\")\n print (f\"N50 stats:\\t\\t\\t50% of total sequence length is contained in the (L50) {L50} sequences >= {N50} bp\")\n print (f\"N75 stats:\\t\\t\\t75% of total sequence length is contained in the (L75) {L75} sequences >= {N75} bp\")\n print (f\"N90 stats:\\t\\t\\t90% of total sequence length is contained in the (L90) {L90} sequences >= {N90} bp\")\n\n print (f\"\\n*NG Stats using genome length of {genomeSize}.\")\n print (f\"NG25 stats:\\t\\t\\t25% of total genome length is contained in the {LG25} sequences >= {NG25} bp\")\n print (f\"NG50 stats:\\t\\t\\t50% of total genome length is contained in the {LG50} sequences >= {NG50} bp\")\n print (f\"NG75 stats:\\t\\t\\t75% of total genome length is contained in the {LG75} sequences >= {NG75} bp\")\n print (f\"NG90 stats:\\t\\t\\t90% of total genome length is contained in the {LG90} sequences >= {NG90} bp\")\n\n print (f\"\\nTotal GC count:\\t\\t\\t{gcCount} bp\")\n print (f\"GC %:\\t\\t\\t\\t{(100.0 * gcCount/totalLength):.2f} %\")\n\n print (\"* Without a reference genome we estimate the size using the assembled length.\")\n return\n\n\n## Start main method\nif __name__ == \"__main__\":\n main()\n\n## End of script\n","repo_name":"raw-lab/metaome_stats","sub_path":"countAssembly/bin/countAssembly.py","file_name":"countAssembly.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"70293272142","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def longestConsecutive(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.max = 1\n if not root:\n return 0\n def helper(root, val):\n if not root:\n return 0\n if not root.left and not root.right:\n return 1\n count = 0 \n if root.val -1 == val :\n count += 1\n return max(helper(root.left, root.val), count, helper(root.right, root.val))\n return self.max\n \n \n ","repo_name":"CrazyCoder4Carrot/leetcode","sub_path":"python/251-300/298. Binary Tree Longest Consecutive Sequence.py","file_name":"298. Binary Tree Longest Consecutive Sequence.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"38676425724","text":"import uiScriptLocale\n\nROOT_PATH = \"d:/ymir work/ui/public/battle/\"\nBACK_IMG_PATH = \"d:/ymir work/ui/pattern/\"\n\nwindow = {\n\t\"name\" : \"BattleWindow\",\n\n\t\"x\" : (SCREEN_WIDTH -518) / 2,\n\t\"y\" : (SCREEN_HEIGHT - 400) / 2,\n\n\t\"style\" : (\"movable\",\"float\",),\n\n\t\"width\" : 325,\n\t\"height\" : 200,\n\n\t\"children\" :\n\t(\n\t\t{\n\t\t\t\"name\" : \"board\",\n\t\t\t\"type\" : \"board\",\n\n\t\t\t\"x\" : 0,\n\t\t\t\"y\" : 0,\n\n\t\t\t\"width\" : 325,\n\t\t\t\"height\" : 200,\n\n\t\t\t\"children\" :\n\t\t\t(\n\t\t\t\t## Title\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"TitleBar\",\n\t\t\t\t\t\"type\" : \"titlebar\",\n\t\t\t\t\t\"style\" : (\"attach\",),\n\n\t\t\t\t\t\"x\" : 8,\n\t\t\t\t\t\"y\" : 7,\n\n\t\t\t\t\t\"width\" : 310,\n\t\t\t\t\t\"color\" : \"yellow\",\n\n\t\t\t\t\t\"children\" :\n\t\t\t\t\t(\n\t\t\t\t\t\t{ \"name\":\"title_name\", \"type\":\"text\", \"x\":0, \"y\":-1, \"text\": \"Zona de Lupta\", \"all_align\":\"center\" },\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\t\n\t\t\t\t###########\n\t\t\t\t\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"text_board\",\n\t\t\t\t\t\"type\" : \"window\",\n\t\t\t\t\t\"x\" : 14,\n\t\t\t\t\t\"y\" : 36,\n\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\"height\" : 0,\n\t\t\t\t},\n\n\n\n\t\t\t\t## RankingList bg\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"ranking_list\",\n\t\t\t\t\t\"type\" : \"window\",\n\n\t\t\t\t\t\"x\" : 51,\n\t\t\t\t\t\"y\" : 66,\n\n\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\"height\" : 0,\n\t\t\t\t\t\n\t\t\t\t\t\"children\" :\n\t\t\t\t\t(\n\t\t\t\t\t\t## LeftTop\n\t\t\t\t\t\t\n\t\t\t\t\t),\t\n\t\t\t\t},\n\n\t\t\t\t## Tab Area\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"tab_control\",\n\t\t\t\t\t\"type\" : \"window\",\n\n\t\t\t\t\t\"x\" : 7,\n\t\t\t\t\t\"y\" : 36,\n\n\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\"height\" : 0,\n\n\t\t\t\t\t\"children\" :\n\t\t\t\t\t(\n\t\t\t\t\t\t## Tab\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\" : \"tab_01\",\n\t\t\t\t\t\t\t\"type\" : \"image\",\n\n\t\t\t\t\t\t\t\"x\" : 0,\n\t\t\t\t\t\t\t\"y\" : 0,\n\n\t\t\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\t\t\"height\" : 0,\n\n\t\t\t\t\t\t\t\"image\" : ROOT_PATH+\"tab_current_rank.sub\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\" : \"tab_02\",\n\t\t\t\t\t\t\t\"type\" : \"image\",\n\n\t\t\t\t\t\t\t\"x\" : 0,\n\t\t\t\t\t\t\t\"y\" : 0,\n\n\t\t\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\t\t\"height\" : 0,\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\"image\" : ROOT_PATH+\"tab_accum_rank.sub\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t## RadioButton ##\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\" : \"tab_button_01\",\n\t\t\t\t\t\t\t\"type\" : \"radio_button\",\n\n\t\t\t\t\t\t\t\"x\" : 0,\n\t\t\t\t\t\t\t\"y\" : 0,\n\n\t\t\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\t\t\"height\" : 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\" : \"tab_button_02\",\n\t\t\t\t\t\t\t\"type\" : \"radio_button\",\n\n\t\t\t\t\t\t\t\"x\" : 124,\n\t\t\t\t\t\t\t\"y\" : 0,\n\n\t\t\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\t\t\"height\" : 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\t## List Column Titlebar\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"list\",\n\t\t\t\t\t\"type\" : \"window\",\n\n\t\t\t\t\t\"x\" : 7,\n\t\t\t\t\t\"y\" : 76,\n\n\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\"height\" : 0,\n\n\t\t\t\t\t\"children\" :\n\t\t\t\t\t(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\" : \"sub_titlebar\",\n\t\t\t\t\t\t\t\"type\" : \"image\",\n\n\t\t\t\t\t\t\t\"x\" : 0,\n\t\t\t\t\t\t\t\"y\" : 0,\n\n\t\t\t\t\t\t\t\"image\" : ROOT_PATH+\"column_titlebar.sub\",\n\n\t\t\t\t\t\t\t\"children\" :\n\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t{ \"name\":\"column_rank\", \"type\":\"text\", \"x\":227-170, \"y\":73-69, \"text\":\"Scurte Informatii\", \"r\":1.0, \"g\":1.0, \"b\":1.0, \"a\":1.0, \"text_horizontal_align\":\"center\" },\n\t\t\t\t\t\t\t\t{ \"name\":\"title_name\", \"type\":\"text\", \"x\":227-100, \"y\":90-69, \"text\":\"Esti pregatit pentru lupta finala ? Omorand oponentii\", \"r\":1.0, \"g\":1.0, \"b\":1.0, \"a\":1.0, \"text_horizontal_align\":\"center\" },\n\t\t\t\t\t\t\t\t{ \"name\":\"title_name\", \"type\":\"text\", \"x\":227-100, \"y\":107-69, \"text\":\"vei obtine puncte de lupta pe care le vei folosi pentru\", \"r\":1.0, \"g\":1.0, \"b\":1.0, \"a\":1.0, \"text_horizontal_align\":\"center\" },\n\t\t\t\t\t\t\t\t{ \"name\":\"title_name\", \"type\":\"text\", \"x\":250-100, \"y\":122-69, \"text\":\"a cumpara iteme valoroase.\", \"r\":1.0, \"g\":1.0, \"b\":1.0, \"a\":1.0, \"text_horizontal_align\":\"center\" },\n\t\t\t\t\t\t\t\t{ \"name\":\"title_name\", \"type\":\"text\", \"x\":237-100, \"y\":133-69, \"text\":\"Concursul se tine aproape zilnic si va fi anuntat de un GM.\", \"r\":1.0, \"g\":1.0, \"b\":1.0, \"a\":1.0, \"text_horizontal_align\":\"center\" },\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t},\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\t## Battle info\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"battle_info\",\n\t\t\t\t\t\"type\" : \"window\",\n\n\t\t\t\t\t\"x\" : -2,\n\t\t\t\t\t\"y\" : 160,\n\n\t\t\t\t\t\"width\" : 0,\n\t\t\t\t\t\"height\" : 0,\n\n\t\t\t\t\t\"children\" :\n\t\t\t\t\t(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\" : \"point_icon\",\n\t\t\t\t\t\t\t\"type\" : \"image\",\n\n\t\t\t\t\t\t\t\"x\" : 17,\n\t\t\t\t\t\t\t\"y\" : 10,\n\n\t\t\t\t\t\t\t\"image\" : ROOT_PATH+\"icon_my_point.sub\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{ \"name\":\"my_point\", \"type\":\"text\", \"x\":41, \"y\":18, \"text\": \"Zona de Lupta\", \"text_vertical_align\":\"center\" },\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\" : \"notice_icon\",\n\t\t\t\t\t\t\t\"type\" : \"image\",\n\n\t\t\t\t\t\t\t\"x\" : 133,\n\t\t\t\t\t\t\t\"y\" : 10,\n\n\t\t\t\t\t\t\t\"image\" : ROOT_PATH+\"icon_notice.sub\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{ \"name\":\"notice\", \"type\":\"text\", \"x\":160, \"y\":18, \"text\": \"Intrare Administratorul Luptelor.\", \"text_vertical_align\":\"center\" },\n\t\t\t\t\t\t#{\n\t\t\t\t\t\t\t#\"name\" : \"enter_button\",\n\t\t\t\t\t\t\t#\"type\" : \"button\",\n\n\t\t\t\t\t\t\t#\"x\" : 215,\n\t\t\t\t\t\t\t#\"y\" : 30,\n\n\t\t\t\t\t\t\t#\"text\" : \"Intra\",\n\n\t\t\t\t\t\t\t#\"default_image\" : \"d:/ymir work/ui/public/large_button_01.sub\",\n\t\t\t\t\t\t\t#\"over_image\" : \"d:/ymir work/ui/public/large_button_02.sub\",\n\t\t\t\t\t\t\t#\"down_image\" : \"d:/ymir work/ui/public/large_button_03.sub\",\n\t\t\t\t\t\t#},\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\t#############\n\t\t\t\t\n\t\t\t),\n\t\t},\n\t),\n}\n","repo_name":"VasilutaAndrei/compare-root","sub_path":"uiscript/battlewindow.py","file_name":"battlewindow.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18890714455","text":"infile=open('hirein.txt','r').readlines()\r\nn=int(infile[0])\r\nmonks=[int(i) for i in infile[1:1+n]]\r\ns=int(infile[1+n])\r\nstudent=[int(i) for i in infile[2+n:2+n+s]]\r\nm=int(infile[2+n+s])\r\nmaster=[int(i) for i in infile[3+n+s:]]\r\nanswer=0\r\n\r\nmonks.sort()\r\nstudent.sort()\r\nmaster.sort()\r\n\r\nN,S,M=0,0,0\r\nwhile N=master[M]:\r\n answer+=1\r\n N+=1\r\n M+=1\r\n else:\r\n N+=1\r\n \r\nopen('hireout.txt','w').write(str(answer))","repo_name":"eddiegz/Personal-C","sub_path":"AIO/hiring monks/monks.py","file_name":"monks.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"1508628470","text":"import sys\r\ndef number_shop():\r\n n=input(\"店舗番号を入力してください。\")\r\n url='ポータルのURLを入れる'+n\r\n print(url)\r\n\r\n# number_shop()\r\nargs = sys.argv\r\nurl='店舗のURLを入れる'+args[1]\r\nprint(url)\r\nprint(args[1])\r\n","repo_name":"MHiroshiNow/scraping","sub_path":"number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20512865235","text":"import time \r\nimport sqlite3\r\nimport discord\r\nimport random\r\nfrom discord.ext import commands\r\nfrom discord.utils import get\r\n\r\nintents = discord.Intents().all()\r\nclient = discord.Client\r\nToken = \"MTAyNzkyNjA1NjA1NDgzNzMyOQ.Geh_vr.dFSQ9bAPmfZ1Hz6yUB3XurSgmRfX-liQvvUuJA\" # Токен Бота\r\nchannel = 1042173661899149432\r\nbot = commands.Bot(command_prefix=\"!\", intents=discord.Intents().all()) # Параметр Бот\r\nDATABASE = \"serverdruzey3.db\"\r\n\r\n@bot.command()\r\nasync def go(ctx):\r\n print(\"go\")\r\n channel = bot.get_channel(1042173661899149432)\r\n emoji1 = discord.utils.get(bot.emojis, name='shulk')\r\n emoji2 = discord.utils.get(bot.emojis, name='diamond')\r\n connect = sqlite3.connect(DATABASE)\r\n cursor = connect.cursor()\r\n while True:\r\n print(2)\r\n time.sleep(5)\r\n Currency = cursor.execute(\"SELECT standart FROM currency\").fetchone()\r\n if random.randint(1, 2) == 1:\r\n cursor.execute(f\"UPDATE currency SET standart = standart + {random.randint(0, 10)}\")\r\n cursor.close()\r\n connect.commit()\r\n connect.close()\r\n embed = discord.Embed(\r\n title=f\"**Валюта поднялась в цене!**\",\r\n description=f\"**1 SH {emoji1} = {Currency[0]} АР {emoji2}**\",\r\n colour=discord.Colour.from_rgb(0, 162, 255)\r\n )\r\n await channel.send(embed=embed)\r\n \r\n else:\r\n cursor.execute(f\"UPDATE currency SET standart = standart - {random.randint(0, 10)}\")\r\n cursor.close()\r\n connect.commit()\r\n connect.close()\r\n embed = discord.Embed(\r\n title=f\"**Валюта упала в цене!**\",\r\n description=f\"**1 SH {emoji1} = {Currency[0]} АР {emoji2}**\",\r\n colour=discord.Colour.from_rgb(0, 162, 255)\r\n )\r\n await channel.send(embed=embed)\r\n\r\n \r\nif __name__ == \"__main__\":\r\n bot.run(Token)\r\n cource()\r\n","repo_name":"irinque/ServerDruzey-DiscordBot","sub_path":"course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"27549157266","text":"from django.shortcuts import render\nfrom rest_framework import generics, status, viewsets, permissions\nfrom .serializers import RouteSerializers, CreateRouteSerializer, CreateUserSerializer, UserSerializer, MyTokenObtainPairSerializer\nfrom .models import Route, User\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\n\n# Create your views here.\n\n# The authenticator function\nJWT_authenticator = JWTAuthentication()\n\n# ROUTE VIEWS\n# This is a view that's set up to return to us, all the routes that have been set up.\nclass RouteView(generics.ListAPIView):\n queryset = Route.objects.all()\n serializer_class = RouteSerializers\n\nclass CreateRouteView(APIView):\n serializer_class = CreateRouteSerializer\n # This is a function called when a POST request is made.\n def post(self, request, format=None):\n \n serializer = self.serializer_class(data=request.data)\n print(serializer)\n # is.valid() ensures the data entered is the correct data before it sends it to the data base.\n # The current issue we're facing is that the data entered isn't valid.\n if serializer.is_valid():\n name = serializer.data.get('name')\n location = serializer.data.get('location')\n difficulty = serializer.data.get('difficulty')\n description = serializer.data.get('description')\n image = serializer.data.get('image')\n climb_type = serializer.data.get('climb_type')\n pitch = serializer.data.get('pitch')\n\n route = Route(name=name, location=location, difficulty=difficulty, description=description, image=image, climb_type=climb_type, pitch=pitch)\n route.save()\n\n return Response(RouteSerializers(route).data, status=status.HTTP_201_CREATED)\n print(serializer.errors)\n return Response({'Bad Request': 'Invalid data...'}, status=status.HTTP_400_BAD_REQUEST)\n\n# USER VIEWS\nclass CreateUserView(APIView):\n permission_classes = (permissions.AllowAny,)\n authentication_classes = ()\n\n def post(self, request, format='json'):\n serializer = CreateUserSerializer(data=request.data)\n if serializer.is_valid():\n user = serializer.save()\n if user:\n json = serializer.data\n return Response(json, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n# Token Views\nclass ObtainTokenPairWithView(TokenObtainPairView):\n permission_classes = (permissions.AllowAny,)\n serializer_class = MyTokenObtainPairSerializer","repo_name":"rsato1007/the_climbers_route","sub_path":"the_climbers_route/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5837150596","text":"from argparse import ArgumentParser\nimport logging\nimport os\nimport gzip\n\nfrom toil.job import Job\nfrom toil.common import Toil\nfrom toil.lib.docker import apiDockerCall\n\nimport utilities\n\nlogger = logging.getLogger(__name__)\n\n\nclass NovoplastyJob(Job):\n \"\"\"\n Accepts paired-end Illumina reads for assembly using NOVOPlasty.\n \"\"\"\n\n def __init__(\n self,\n read_one_file_id,\n read_two_file_id,\n config_file_id,\n config_file_name,\n chained_job=False,\n seed_file_id=None,\n parent_rv={},\n *args,\n **kwargs\n ):\n \"\"\"\n Parameters\n ----------\n read_one_file_id : toil.fileStore.FileID\n id of the file in the file store containing FASTQ Illumina\n short left paired reads\n read_two_file_id : toil.fileStore.FileID\n id of the file in the file store containing FASTQ Illumina\n short right paired reads\n config_file_id : toil.fileStore.FileID\n id of the file in the file store containing assembler args\n config_file_name : str\n name of the file in the file store containing assembler args\n parent_rv : dict\n dictionary of return values from the parent job\n \"\"\"\n super(NovoplastyJob, self).__init__(*args, **kwargs)\n self.read_one_file_id = read_one_file_id\n self.read_two_file_id = read_two_file_id\n self.config_file_id = config_file_id\n self.config_file_name = config_file_name\n self.chained_job = chained_job\n self.parent_rv = parent_rv\n self.seed_file_id = seed_file_id\n\n def run(self, fileStore):\n \"\"\"\n Returns\n -------\n dict of toil.fileStore.FileID and str\n file ids and names of log and corrections files, and\n contigs FASTA file\n \"\"\"\n # Expected output file names\n project_name = \"Toil\"\n log_file_name = \"log_{0}.txt\".format(project_name)\n contigs_file_name = \"Circularized_assembly_1_{0}.fasta\".format(project_name)\n\n try:\n # Read the config files from the file store into the local\n # temporary directory, and parse\n config_file_path = utilities.readGlobalFile(\n fileStore, self.config_file_id, self.config_file_name\n )\n common_config, assembler_params = utilities.parseConfigFile(\n config_file_path, \"novoplasty\"\n )\n common_config, bbnorm_params = utilities.parseConfigFile(\n config_file_path, \"bbnorm\"\n )\n\n # Read the read files from the file store into the local\n # temporary directory\n if self.chained_job:\n read_one_file_name = bbnorm_params[\"read_one_file_name\"]\n read_two_file_name = bbnorm_params[\"read_two_file_name\"]\n else:\n read_one_file_name = common_config[\"read_one_file_name\"]\n read_two_file_name = common_config[\"read_two_file_name\"]\n read_one_file_path = utilities.readGlobalFile(\n fileStore, self.read_one_file_id, read_one_file_name\n )\n read_two_file_path = utilities.readGlobalFile(\n fileStore, self.read_two_file_id, read_two_file_name\n )\n\n # Select the first read sequence as the seed, and write it\n # into the local temporary directory\n working_dir = fileStore.localTempDir\n\n # Check if seed file exists; I.E., it's been imported\n if self.seed_file_id:\n # Read existing seed file - must be in FASTA format\n seed_file_path = utilities.readGlobalFile(\n fileStore, self.seed_file_id, os.path.basename(self.seed_file_id)\n )\n else:\n seed_file_path = os.path.join(\n working_dir, assembler_params[\"seed_file_name\"]\n )\n\n # Create seed file from first read - read must be in FASTQ format\n with open(seed_file_path, \"w+\") as f:\n with gzip.open(read_one_file_path, \"rt\") as g:\n do_write = False\n for line in g:\n if line[0] == \"@\":\n do_write = True\n if line[0] == \"+\":\n break\n if do_write:\n f.write(line)\n\n # Write the NOVOPlasty config file into the local temporary\n # directory\n logger.info(\n \"Handling configuration file {0}\".format(\n assembler_params[\"config_file_name\"]\n )\n )\n with open(\n os.path.join(working_dir, assembler_params[\"config_file_name\"]), \"w+\"\n ) as f:\n config = \"\"\"Project:\n-----------------------\nProject name = {project_name}\nType = mito\nGenome Range = 1500-60000\nK-mer = 121\nMax memory = 3\nExtended log = 0\nSave assembled reads = no\nSeed Input = {seed_file_path}\nExtend seed directly = no\nReference sequence =\nVariance detection =\nChloroplast sequence =\n\nDataset 1:\n-----------------------\nRead Length = 251\nInsert size = 500\nPlatform = illumina\nSingle/Paired = PE\nCombined reads =\nForward reads = {read_one_file_path}\nReverse reads = {read_two_file_path}\n\nHeteroplasmy:\n-----------------------\nMAF =\nHP exclude list =\nPCR-free =\n\nOptional:\n-----------------------\nInsert size auto = yes\nUse Quality Scores = no\n \"\"\".format(\n project_name=project_name,\n seed_file_path=seed_file_path,\n read_one_file_path=read_one_file_path,\n read_two_file_path=read_two_file_path,\n )\n f.write(config)\n\n # Mount the Toil local temporary directory to the same path in\n # the container, and use the path as the working directory in\n # the container, then call NOVOPlasty\n # TODO: Specify the container on construction\n image = \"ralatsdio/novoplasty:v4.2\"\n logger.info(\"Calling image {0}\".format(image))\n apiDockerCall(\n self,\n image=image,\n volumes={working_dir: {\"bind\": working_dir, \"mode\": \"rw\"}},\n working_dir=working_dir,\n parameters=[\n \"perl\",\n \"/home/biodocker/bin/NOVOPlasty.pl\",\n \"-c\",\n \"config.txt\",\n ],\n )\n\n # Write the log, and contigs FASTA files from the local temporary\n # directory into the file store\n log_file_id = utilities.writeGlobalFile(fileStore, log_file_name)\n contigs_file_id = utilities.writeGlobalFile(fileStore, contigs_file_name)\n\n except Exception as exc:\n # Ensure expectred return values on exceptions\n logger.info(\"Calling image {0} failed: {1}\".format(image, exc))\n contigs_file_id = None\n log_file_id = None\n\n # Return file ids and names for export\n novoplasty_rv = {\n \"novoplasty_rv\": {\n \"log_file\": {\n \"id\": log_file_id,\n \"name\": log_file_name,\n },\n \"contigs_file\": {\n \"id\": contigs_file_id,\n \"name\": contigs_file_name,\n },\n }\n }\n novoplasty_rv.update(self.parent_rv)\n logger.info(\"Return value {0}\".format(novoplasty_rv))\n return novoplasty_rv\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Assemble reads corresponding to a single well.\n \"\"\"\n # Parse FASTQ data path, plate and well specification,\n # configuration path and file, and output directory, making the\n # output directory if needed\n parser = ArgumentParser()\n Job.Runner.addToilOptions(parser)\n cmps = str(os.path.abspath(__file__)).split(os.sep)[0:-4]\n cmps.extend([\"dat\", \"miscellaneous\"])\n parser.add_argument(\n \"-d\",\n \"--data-path\",\n default=os.sep + os.path.join(*cmps),\n help=\"path containing plate and well FASTQ source\",\n )\n parser.add_argument(\n \"-s\", \"--source-scheme\", default=\"file\", help=\"scheme used for the source URL\"\n )\n parser.add_argument(\n \"-l\", \"--plate-spec\", default=\"A11967A_sW0154\", help=\"the plate specification\"\n )\n parser.add_argument(\n \"-w\", \"--well-spec\", default=\"A01\", help=\"the well specification\"\n )\n cmps = str(os.path.abspath(__file__)).split(os.sep)[0:-1]\n parser.add_argument(\n \"-c\",\n \"--config-path\",\n default=os.sep + os.path.join(*cmps),\n help=\"path to a .ini file with args to be passed to the assembler\",\n )\n parser.add_argument(\n \"-f\",\n \"--config-file\",\n default=\"Assembler.ini\",\n help=\"path to a .ini file with args to be passed to the assembler\",\n )\n parser.add_argument(\n \"--seed-path\",\n default=os.sep + os.path.join(*cmps),\n help=\"path to a .fastq file to be used as seed by assembler, where applicable (defaults to using first read)\",\n )\n parser.add_argument(\n \"--seed-file\",\n default=None,\n help=\"path to a .fastq file to be used as seed by assembler, where applicable (defaults to using first read)\",\n )\n parser.add_argument(\n \"-o\",\n \"--output-directory\",\n default=None,\n help=\"the directory containing all output files\",\n )\n options = parser.parse_args()\n if options.output_directory is None:\n options.output_directory = os.path.join(\n \"output\",\n os.path.basename(__file__).replace(\".py\", \"\"),\n options.plate_spec,\n options.well_spec,\n )\n if not os.path.exists(options.output_directory):\n os.makedirs(options.output_directory)\n\n # Work within the Toil context manager\n with Toil(options) as toil:\n if not toil.options.restart:\n\n # Import the local read files into the file store\n read_one_file_ids, read_two_file_ids = utilities.importReadFiles(\n toil,\n options.data_path,\n options.plate_spec,\n [options.well_spec],\n options.source_scheme,\n )\n\n # Import local config file into the file store\n config_file_id = utilities.importConfigFile(\n toil, os.path.join(options.config_path, options.config_file)\n )\n\n # Import local seed file into the file store, if needed\n if options.seed_file:\n seed_file_id = utilities.importFile(\n toil, os.path.join(options.seed_path, options.seed_file)\n )\n else:\n seed_file_id = None\n\n # Construct and start the NOVOPlasty job\n novoplasty_job = NovoplastyJob(\n read_one_file_ids[0],\n read_two_file_ids[0],\n config_file_id,\n options.config_file,\n seed_file_id=seed_file_id,\n )\n novoplasty_rv = toil.start(novoplasty_job)\n\n else:\n\n # Restart the NOVOPlasty job\n novoplasty_rv = toil.restart(novoplasty_job)\n\n # Export all NOVOPlasty output files from the file store\n utilities.exportFiles(\n toil, options.output_directory, novoplasty_rv[\"novoplasty_rv\"]\n )\n","repo_name":"addgene/addgene-bioinformatics","sub_path":"src/python/jobs/NovoplastyJob.py","file_name":"NovoplastyJob.py","file_ext":"py","file_size_in_byte":11868,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"4833350113","text":"import base64\nimport json\nimport os\nimport ssl\nimport urllib2\nimport urllib\n\nfrom cloudinit import helpers\nfrom cloudinit import util\n\ndef getEnv():\n env = os.environ\n # Check if Puppet PC1 directory on PATH\n if 'puppetlabs' not in env[\"PATH\"]:\n env[\"PATH\"] = env[\"PATH\"] + \":/opt/puppetlabs/bin\"\n return env\n\ndef getFacterFact(factname, outputType=None):\n command = \"facter %s\" % factname\n\n if outputType:\n command += \" %s\" % outputType\n\n try:\n (facter_out, facter_err) = util.subp(command, shell=True, env=getEnv())\n if facter_err:\n raise Exception(\"facter returned an error when querying for %s: %s\" % factname, string)\n output = facter_out.strip()\n if not output:\n raise Exception(\"facter did not return anything for %s\" % factname)\n return output\n except util.ProcessExecutionError as e:\n raise Exception(\"facter execution failed: %s\" % e)\n return false\n\nclass ForemanAdapter:\n mandatory_fields = ['server', 'hostgroup', 'login', 'password']\n\n def __init__(self, log, user_data):\n self.log = log\n log.debug(\"Got to ForemanAdapter.init\")\n facter_os = json.loads(getFacterFact(\"os\", outputType=\"--json\"))['os']\n log.debug(\"Facter_os is a %s, looks like: %s\", type(facter_os), facter_os)\n fq_os = \"%s %s\" % (facter_os['name'], facter_os['release']['full'])\n log.debug(\"Fq_os = %s\", fq_os)\n\n log.debug(\"Setting defaults...\")\n self.defaults = {\n \"architecture\": getFacterFact(\"architecture\"),\n # \"model\" : \"Virtual Machine\",\n \"operatingsystem\": fq_os,\n \"environment\": \"production\",\n \"domain\": getFacterFact(\"domain\"),\n # \"ptable\": \"RedHat default\",\n }\n log.debug(\"Set defaults\")\n\n self.user_data = user_data\n for field in self.mandatory_fields:\n if field not in self.user_data:\n raise Exception((\"%s must be supplied in [puppet] \"\n \" section in userdata.\" % field))\n log.debug(\"All mandatory fields present\")\n self.login = self.user_data.pop(\"login\")\n self.password = self.user_data.pop(\"password\")\n\n def makeRequest(self, url, data=None, headers={}, request_type=\"GET\", allowed_responses=[], auth=True):\n log = self.log\n log.debug(\"Got to makeRequest for URL: %s. Data = %s, Headers = %s, Allowed_responses = %s\" % (url, data, headers, allowed_responses))\n\n if request_type == 'POST':\n data = json.dumps(data)\n\n if request_type == \"GET\" and data is not None:\n url_suffix = \"?\" + urllib.urlencode(data)\n url = url + url_suffix\n data = None\n\n log.debug(\"Final URL = %s\", url)\n\n req = urllib2.Request(url, data=data, headers=headers)\n req.get_method = lambda: request_type\n\n if auth:\n auth_string = base64.encodestring(\"%s:%s\" % (self.login, self.password))\n auth_string = auth_string.strip()\n auth_header = \"Basic %s\" % auth_string\n\n # Add header to request\n req.add_header('Authorization', auth_header)\n\n try:\n out = urllib2.urlopen(req, context=ssl._create_unverified_context())\n except urllib2.HTTPError as e:\n if e.code in allowed_responses:\n log.debug(\"Response code %s is in allowed_responses list\", e.code)\n return e\n else:\n log.warn(\"Error encountered opening URL '%s': %s\", url, e)\n raise Exception((\"Error encountered opening URL '%s': %s\" % (url, e)))\n else:\n log.debug(\"Request completed successfully\")\n return out\n\n def foremanRequest(self, resource, request_type, data=None):\n log = self.log\n log.debug(\"Got to foremanRequest. Resource = %s, request_type = %s, data = %s\", resource, request_type, data)\n\n headers = {\"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"}\n\n url = self.user_data['server'] + \"/api/\" + resource\n return json.loads(self.makeRequest(url, data=data, headers=headers, request_type=request_type).read())\n\n def registerToForeman(self):\n log = self.log\n log.debug(\"Got to registerToForeman.\")\n host_dict = {}\n host_dict['hostgroup_id'] = self.getMetafieldID(\n \"hostgroup\", self.user_data['hostgroup'])\n\n for field in self.defaults.keys():\n value = self.user_data.get(field, self.defaults[field])\n host_dict[field + \"_id\"] = self.getMetafieldID(field, value)\n\n host_dict['name'] = getFacterFact(\"fqdn\")\n host_dict['ip'] = getFacterFact(\"ipaddress\")\n host_dict['mac'] = getFacterFact(\"macaddress\").lower()\n\n log.debug(\"Checking for duplicate hosts using values: %s\", host_dict)\n self.checkForDuplicates(host_dict)\n log.debug(\"No duplicates found, creating host...\")\n\n # Host needs to be marked as Building\n host_dict['build'] = True\n\n newhost_dict = self.foremanRequest(resource = \"hosts\",\n request_type = \"POST\",\n data = {\"host\": host_dict})\n log.info(\"New host created with ID: %s\", newhost_dict[\"id\"])\n return newhost_dict[\"id\"]\n\n def hostExists(self, hostname):\n log = self.log\n url = \"%s/api/hosts/%s\" % (self.user_data['server'], hostname)\n response = self.makeRequest(url, allowed_responses=[200,404])\n log.debug(\"Response code = %s, Body looks like: %s\" % (response.getcode(), response.read()))\n if response.getcode() == 200:\n log.debug(\"Matching host already exists.\")\n return True\n elif response.getcode() == 404:\n log.debug(\"No matching host found.\")\n return False\n else:\n log.debug(\"Unexpected code returned. Raising exception.\")\n raise Exception(\"Unexpected response received when checking if host exists. Code: %s, Body: %s\" % (response.getcode(), response.read()))\n\n def checkForDuplicates(self, host_dict):\n log = self.log\n hostname = host_dict[\"name\"]\n if not hostname.strip():\n raise Exception(\"Invalid hostname to check\")\n log.debug(\"Looking for hostname matching '%s'\", hostname)\n # if given hostname already exists, delete the old record\n # maybe update would be better?\n if self.hostExists(hostname):\n log.warn(\"deleting %s from foreman\" % hostname)\n d = self.foremanRequest(resource=\"hosts/\" + hostname,\n request_type=\"DELETE\")\n\n for field in ['ip', 'mac']:\n log.debug(\"Checking for hosts with matching %s\", field)\n matching_hosts = self.foremanRequest(\n resource = \"hosts\",\n request_type=\"GET\",\n data = {\"search\": \"%s=%s\" % (field, host_dict[field])}\n )\n log.debug(\"Matching hosts looks like: %s\", matching_hosts)\n if matching_hosts and matching_hosts['results']:\n msg = (\"Host with %s %s already exists: %s\" %\n (field, host_dict[field], matching_hosts))\n raise Exception(msg)\n\n def getMetafieldID(self, fieldname, fieldvalue):\n log = self.log\n log.debug(\"Got to getMetafieldID. Looking for field %s with value %s\", fieldname, fieldvalue)\n get_data = {\"search\": fieldvalue}\n lookup_key = 'name'\n\n # operatinsystems can't be searched on foreman-side for some reason so\n # we need to list all entries and pick the matching one\n if fieldname in [\"hostgroup\", \"operatingsystem\"]:\n #get_data = {\"search\": \"\"}\n lookup_key = 'title'\n\n field_dict = self.foremanRequest(\n resource=fieldname + \"s\",\n request_type=\"GET\",\n data=get_data)\n\n log.debug(\"Response from Foreman = %s\", field_dict)\n\n for item in field_dict['results']:\n if item[lookup_key] == fieldvalue:\n log.debug(\"Got a match. ID = %i\", int(item['id']))\n return int(item['id'])\n return None\n\n def runForemanFinishScript(self):\n log = self.log\n log.debug(\"Got to runForemanFinishScript\")\n\n # Get finish script\n url = self.user_data['server'] + \"/unattended/finish\"\n log.debug(\"Requesting URL %s\", url)\n response = self.makeRequest(url)\n log.debug(\"Response looks like: %s\", response)\n finish_script = response.read()\n\n log.debug(\"Running finish script\")\n try:\n (output, err) = util.subp(finish_script, shell=True)\n log.debug(\"Finish script run. Output = \\n%s\", output)\n except util.ProcessExecutionError as e:\n log.warn(\"Error encountered when running finish script: %s\", e)\n raise Exception((\"Error running finish script: %s\" % e))\n\n log.debug(\"Marking host as built\")\n url = self.user_data['server'] + \"/unattended/built\"\n log.debug(\"Opening URL %s\", url)\n\n self.makeRequest(url)\n log.info(\"Host marked as successfully built.\")\n\ndef handle(_name, cfg, cloud, log, _args):\n if 'foreman' not in cfg:\n return\n\n foreman_cfg = cfg['foreman']\n log.debug(\"Foreman_cfg looks like: %s\", foreman_cfg)\n adapter = ForemanAdapter(log, foreman_cfg)\n log.debug(\"ForemanAdapter init completed...\")\n newhost_id = adapter.registerToForeman()\n\n if foreman_cfg.get('runfinish', False):\n log.debug(\"Running Foreman finish script\")\n adapter.runForemanFinishScript()\n\n log.info(\"cc_foreman complete...\")\n\n","repo_name":"weareact/cloud-init-foreman","sub_path":"cc_foreman.py","file_name":"cc_foreman.py","file_ext":"py","file_size_in_byte":9781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"29885271626","text":"from django.urls import path\nfrom . import views\n\napp_name = 'gravy_app'\nurlpatterns = [\n path('', views.ListPosts.as_view(), name='home'),\n path('gravy_app/new_post/', views.CreatePost.as_view(), name='new_post'),\n path('gravy_app//', views.ListPosts.as_view(), name='home'),\n path('gravy_app//edit_post/', views.EditPost.as_view(), name='edit_post'),\n path('gravy_app//delete_post/', views.DeletePost.as_view(), name='delete_post')\n]\n","repo_name":"PdxCodeGuild/class_australian_shepherd","sub_path":"code/kacey/labs/django/gravy_project/gravy_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"19558213829","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n if len(my_list) == 0:\n return 0\n mul = 0\n div = 0\n for n in my_list:\n mul += n[0] * n[1]\n div += n[1]\n return float(mul / div)\n","repo_name":"OtungPrincess/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"25454545794","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport utils\nimport logging\n\nlogging.basicConfig(\n filename=utils.get_home() + \"/meteo/logfile.log\",\n level=logging.DEBUG,\n format='%(asctime)s\\t%(levelname)s\\t%(name)s\\t%(message)s')\nlog = logging.getLogger(\"failed_request.py\")\n\nrequest_file = utils.get_home() + \"/meteo/failed_request.sql\"\n\n\ndef append(request):\n if len(request) == 0:\n log.error(\"Dropping an empty line that should not have been there!\")\n return\n with open(request_file, \"a\") as fappend:\n fappend.write(request + \"\\n\")\n log.debug(\"Appending request to failed_request.sql: \" + request)\n return\n\n\ndef extract_first():\n try:\n with open(request_file, 'r') as fin:\n data = fin.read().splitlines(True)\n except FileNotFoundError:\n log.warning(\"File '\" + str(request_file) + \"' was not found => no request to extract, returning None.\")\n return None\n if len(data) == 0:\n log.debug(\"No request in file '\" + str(request_file) + \"', remaining empty and returning None.\")\n return None\n with open(request_file, 'w') as fout:\n fout.writelines(data[1:])\n return data[0].rstrip()\n\n\ndef fix_previously_failed_requests(conn):\n previously_failed_requests = extract_first()\n if previously_failed_requests:\n log.debug(\"Tentatively re-executing request:\" + previously_failed_requests)\n try:\n curs = conn.cursor()\n curs.execute(previously_failed_requests)\n curs.close()\n conn.commit()\n log.info(\"SQL request executed with success.\")\n except Exception as e:\n log.debug(e)\n log.warning(\"SQL request failed (again).\")\n append(previously_failed_requests)\n return\n","repo_name":"Thomas-Baeckeroot/meteo","sub_path":"src/main/py/failed_request.py","file_name":"failed_request.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"41718726957","text":"# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom builtins import str\nfrom io import open\n\nfrom argparse import ArgumentParser\nimport pickle, utils, oldslavdep, os, os.path, time\n\nif __name__ == '__main__':\n\n parser = ArgumentParser() #changed from OptParse to ArgParse (NP)\n parser.add_argument(\"--train\", dest=\"conll_train\", help=\"Path to annotated CONLL train file\", metavar=\"FILE\", default=\"N/A\")\n parser.add_argument(\"--dev\", dest=\"conll_dev\", help=\"Path to annotated CONLL dev file\", metavar=\"FILE\", default=\"N/A\")\n parser.add_argument(\"--test\", dest=\"conll_test\", help=\"Path to CONLL test file\", metavar=\"FILE\", default=\"N/A\")\n parser.add_argument(\"--output\", dest=\"conll_test_output\", help=\"File name for predicted output\", metavar=\"FILE\", default=\"N/A\")\n parser.add_argument(\"--prevectors\", dest=\"external_embedding\", help=\"Pre-trained vector embeddings\", metavar=\"FILE\")\n parser.add_argument(\"--params\", dest=\"params\", help=\"Parameters file\", metavar=\"FILE\", default=\"model.params\")\n parser.add_argument(\"--model\", dest=\"model\", help=\"Load/Save model file\", metavar=\"FILE\", default=\"model\")\n parser.add_argument(\"--wembedding\", type=int, dest=\"wembedding_dims\", default=100)\n parser.add_argument(\"--cembedding\", type=int, dest=\"cembedding_dims\", default=50)\n parser.add_argument(\"--pembedding\", type=int, dest=\"pembedding_dims\", default=100)\n parser.add_argument(\"--epochs\", type=int, dest=\"epochs\", default=30)\n parser.add_argument(\"--hidden\", type=int, dest=\"hidden_units\", default=100)\n # parser.add_argument(\"--lr\", type=float, dest=\"learning_rate\", default=0.1) #Uncomment if model is used to train new parser or update OldSlavNet\n parser.add_argument(\"--outdir\", type=str, dest=\"output\", default=\"results\")\n parser.add_argument(\"--activation\", type=str, dest=\"activation\", default=\"tanh\")\n parser.add_argument(\"--lstmlayers\", type=int, dest=\"lstm_layers\", default=2)\n parser.add_argument(\"--lstmdims\", type=int, dest=\"lstm_dims\", default=128)\n parser.add_argument(\"--disableblstm\", action=\"store_false\", dest=\"blstmFlag\", default=True)\n parser.add_argument(\"--disablelabels\", action=\"store_false\", dest=\"labelsFlag\", default=True)\n parser.add_argument(\"--predict\", action=\"store_true\", dest=\"predictFlag\", default=False)\n parser.add_argument(\"--bibi-lstm\", action=\"store_false\", dest=\"bibiFlag\", default=True)\n parser.add_argument(\"--disablecostaug\", action=\"store_false\", dest=\"costaugFlag\", default=True)\n parser.add_argument(\"--dynet-seed\", type=int, dest=\"seed\", default=0) \n parser.add_argument(\"--dynet-mem\", type=int, dest=\"mem\", default=0)\n\n args = parser.parse_args()\n\n #print 'Using external embedding:', args.external_embedding\n\n if args.predictFlag:\n with open(args.params, 'rb') as paramsfp:\n words, w2i, c2i, pos, rels, stored_opt = pickle.load(paramsfp)\n stored_opt.external_embedding = None\n print('Loading pre-trained model')\n parser = oldslavdep.OldSlavDep(words, pos, rels, w2i, c2i, stored_opt)\n parser.Load(args.model)\n \n testoutpath = os.path.join(args.output, args.conll_test_output)\n print('Predicting POS tags and parsing dependencies')\n # ts = time.time()\n # test_pred = list(parser.Predict(options.conll_test))\n # te = time.time()\n # print 'Finished in', te-ts, 'seconds.'\n # utils.write_conll(testoutpath, test_pred)\n\n with open(testoutpath, 'w') as fh:\n for sentence in parser.Predict(args.conll_test):\n for entry in sentence[1:]:\n fh.write(str(entry) + '\\n')\n fh.write('\\n')\n\n else:\n print(\"Training file: \" + args.conll_train)\n if args.conll_dev != \"N/A\":\n print(\"Development file: \" + args.conll_dev)\n\n highestScore = 0.0\n eId = 0\n\n if os.path.isfile(os.path.join(args.output, args.params)) and \\\n os.path.isfile(os.path.join(args.output, os.path.basename(args.model))) :\n\n print('Found a previous saved model => Loading this model')\n with open(os.path.join(args.output, args.params), 'rb') as paramsfp:\n words, w2i, c2i, pos, rels, stored_opt = pickle.load(paramsfp)\n stored_opt.external_embedding = None\n parser = oldslavdep.OldSlavDep(words, pos, rels, w2i, c2i, stored_opt)\n parser.Load(os.path.join(args.output, os.path.basename(args.model)))\n parser.trainer.restart()\n if args.conll_dev != \"N/A\":\n devPredSents = parser.Predict(args.conll_dev)\n\n count = 0\n lasCount = 0\n uasCount = 0\n posCount = 0\n poslasCount = 0\n for idSent, devSent in enumerate(devPredSents):\n conll_devSent = [entry for entry in devSent if isinstance(entry, utils.ConllEntry)]\n\n for entry in conll_devSent:\n if entry.id <= 0:\n continue\n if entry.pos == entry.pred_pos and entry.parent_id == entry.pred_parent_id and entry.pred_relation == entry.relation:\n poslasCount += 1\n if entry.pos == entry.pred_pos:\n posCount += 1\n if entry.parent_id == entry.pred_parent_id and entry.pred_relation == entry.relation:\n lasCount += 1\n if entry.parent_id == entry.pred_parent_id:\n uasCount += 1\n count += 1\n\n print(\"---\\nLAS accuracy:\\t%.2f\" % (lasCount * 100 / count))\n print(\"UAS accuracy:\\t%.2f\" % (uasCount * 100 / count))\n print(\"POS accuracy:\\t%.2f\" % (posCount * 100 / count))\n print(\"POS&LAS:\\t%.2f\" % (poslasCount * 100 / count))\n\n score = poslasCount * 100 / count\n if score >= highestScore:\n parser.Save(os.path.join(args.output, os.path.basename(args.model)))\n highestScore = score\n\n print(\"POS&LAS of the previous saved model: %.2f\" % (highestScore))\n\n else:\n print('Extracting vocabulary')\n words, w2i, c2i, pos, rels = utils.vocab(args.conll_train)\n\n with open(os.path.join(args.output, args.params), 'wb') as paramsfp:\n pickle.dump((words, w2i, c2i, pos, rels, args), paramsfp, protocol=2)\n\n #print 'Initializing joint model'\n parser = oldslavdep.OldSlavDep(words, pos, rels, w2i, c2i, args)\n \n\n for epoch in range(args.epochs):\n print('\\n-----------------\\nStarting epoch', epoch + 1)\n\n if epoch % 10 == 0:\n if epoch == 0:\n parser.trainer.restart(learning_rate=0.001) \n elif epoch == 10:\n parser.trainer.restart(learning_rate=0.0005)\n else:\n parser.trainer.restart(learning_rate=0.00025)\n\n parser.Train(args.conll_train)\n \n if args.conll_dev == \"N/A\":\n parser.Save(os.path.join(args.output, os.path.basename(args.model)))\n \n else: \n devPredSents = parser.Predict(args.conll_dev)\n \n count = 0\n lasCount = 0\n uasCount = 0\n posCount = 0\n poslasCount = 0\n for idSent, devSent in enumerate(devPredSents):\n conll_devSent = [entry for entry in devSent if isinstance(entry, utils.ConllEntry)]\n \n for entry in conll_devSent:\n if entry.id <= 0:\n continue\n if entry.pos == entry.pred_pos and entry.parent_id == entry.pred_parent_id and entry.pred_relation == entry.relation:\n poslasCount += 1\n if entry.pos == entry.pred_pos:\n posCount += 1\n if entry.parent_id == entry.pred_parent_id and entry.pred_relation == entry.relation:\n lasCount += 1\n if entry.parent_id == entry.pred_parent_id:\n uasCount += 1\n count += 1\n \n print(\"---\\nLAS accuracy:\\t%.2f\" % (lasCount * 100 / count))\n print(\"UAS accuracy:\\t%.2f\" % (uasCount * 100 / count))\n print(\"POS accuracy:\\t%.2f\" % (posCount * 100 / count))\n print(\"POS&LAS:\\t%.2f\" % (poslasCount * 100 / count))\n \n score = poslasCount * 100 / count\n if score >= highestScore:\n parser.Save(os.path.join(args.output, os.path.basename(args.model)))\n highestScore = score\n eId = epoch + 1\n \n print(\"Highest POS&LAS: %.2f at epoch %d\" % (highestScore, eId))\n\n","repo_name":"npedrazzini/OldSlavNet","sub_path":"scripts/parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":9184,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"74548715981","text":"#Jacob Pawlak\n#February 23rd, 2017\n#Detailed Differences\n#https://open.kattis.com/problems/detaileddifferences\n\ndef main():\n\n\tnum_times = int(input())\n\tfor i in range(0, num_times):\n\t\tfirst = input()\n\t\tsecond = input()\n\t\tdiff = \"\"\n\t\tfor j in range(0, len(first)):\n\t\t\tif(first[j] == second[j]):\n\t\t\t\tdiff += \".\"\n\t\t\telse:\n\t\t\t\tdiff += \"*\"\n\t\tprint(first)\n\t\tprint(second)\n\t\tprint(diff)\n\t\tprint()\n\nmain()\n","repo_name":"JacobPawlak/KattisSolutions","sub_path":"detaileddifferences.py","file_name":"detaileddifferences.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"40635025480","text":"print('CÁLCULO DE MASSA DE AGENTE CO2 CONFORME VdS 2093, CEA 4007, APSAD R13\\n\\nv1.0 04/22 - Manesco\\n\\nInstruções:\\n- Separe os valores por espaço;\\n- Utilize \".\" para casas decimais.\\n')\r\noutro_trecho = True\r\n\r\nvol_total = 0.00\r\nsup_total = 0.00\r\nmas_total = 0.00\r\n\r\nkb = 1.50\r\n\r\nwhile outro_trecho:\r\n l, a, c = input('Insira Dimensões do Trecho: (L A C): ').split()\r\n\r\n l = float(l)\r\n a = float(a)\r\n c = float(c)\r\n\r\n vol = a * l * c ## CALCULO VOLUME DO TRECHO\r\n sup = 2.00 * ((c * l) + (c * a) + (l * a)) ## CALCULO SUPERFICIE DO TRECHO\r\n mas = kb * ((0.75 * vol) + (0.20 * sup)) ## CALCULO MASSA CO2\r\n\r\n vol_total = vol_total + vol\r\n sup_total = sup_total + sup\r\n mas_total = mas_total + mas\r\n\r\n opcao = input('Existem Mais Trechos? (s/n): ')\r\n\r\n if opcao != 's':\r\n outro_trecho = False\r\n\r\nprint('\\nVolume Total = %.3f' % vol_total, 'm³')\r\nprint('Superfície Total = %.3f' % sup_total, 'm²')\r\nprint('Massa Total = %.3f' % mas_total, 'kg')\r\nprint('Considerando fator Kb = 1.50\\n')\r\n\r\nif mas_total <= 10:\r\n print('Utilizar Cilindro de CO² de 10kg\\n')\r\nelse:\r\n if mas_total >= 10 and mas_total <=20:\r\n print('Utilizar Cilindro de CO² de 20kg\\n')\r\n else:\r\n print('Utilizar Cilindro de CO² de 30kg\\n')\r\n\r\ninput('Pressione \"ENTER\" para sair...')","repo_name":"lucasmanesco/Calculo_CO2","sub_path":"Calc_Massa_CO2.py","file_name":"Calc_Massa_CO2.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9913229504","text":"import numpy as np\nimport cv2\n\narrow=np.zeros((500,500,3), np.uint8)\narrow[arrow<30]=254\npoints=np.array([[100,30], [200,30], [200,20], [225,50], [200,80], [200,70], [100,70]], np.int32)\ncv2.fillConvexPoly(arrow, points, (0,0,255), 4, 0)\nhsv=cv2.cvtColor(arrow, cv2.COLOR_BGR2HSV)\nlower = (0,0,0)\nupper = (0,0,255)\nmask= cv2.inRange(arrow, lower, upper)\nimage, contours, hierarchy= cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncv2.drawContours(arrow, contours, -1,(0,255,0),3)\ncentres=[]\nfor i in range(len(contours)):\n moments=cv2.moments(contours[i])\n centres.append((int(moments['m10']/moments['m00']), int(moments['m01']/moments['m00'])))\n cv2.circle(arrow, centres[i], 3, (0,0,0), -1)#\nprint(len(contours))\nif int(moments['m10']/moments['m00'])> 225:\n print(\"left\")\nelse:\n print(\"right\")\ncv2.imshow(\"Image\", arrow)\ncv2.imshow(\"hsv\", hsv)\ncv2.waitKey(0)\n","repo_name":"ruchikachavhan/IRC","sub_path":"arrow.py","file_name":"arrow.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4126805558","text":"import logging\nfrom unittest import mock\nfrom unittest.mock import call\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.signing import Signer\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.test import RequestFactory\n\nfrom braces.views import LoginRequiredMixin\nfrom django.test import override_settings\nfrom model_mommy import mommy\nfrom notifications.models import Log, StatusUpdate, MemberUpdate, ReadLog, \\\n ActionContextQuerySet\nfrom notifications.views import LogListView, LogCountView, ReadLogUpdateView, \\\n LogQuestionnairesListView, LogInformationUpdateCreateView, \\\n LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView\nfrom qcat.tests import TestCase\n\n\nclass LogListViewTest(TestCase):\n\n def setUp(self):\n self.view = LogListView()\n self.url_path = reverse('notification_partial_list')\n self.request = RequestFactory().get(self.url_path)\n self.user = {}\n self.request.user = self.user\n self.view_instance = self.setup_view(\n view=self.view, request=self.request\n )\n member_add_log = mommy.make(\n _model=Log,\n id=8,\n action=settings.NOTIFICATIONS_ADD_MEMBER\n )\n self.change_log = mommy.make(\n _model=Log,\n id=42,\n action=settings.NOTIFICATIONS_CHANGE_STATUS\n )\n mommy.make(_model=StatusUpdate, log=self.change_log)\n mommy.make(_model=MemberUpdate, log=member_add_log)\n\n def get_view_with_get_querystring(self, param):\n request = RequestFactory().get(\n '{url}?{param}'.format(url=self.url_path, param=param)\n )\n request.user = self.user\n return self.setup_view(view=self.view, request=request)\n\n def test_force_login(self):\n self.assertIsInstance(self.view_instance, LoginRequiredMixin)\n\n def test_queryset_method(self):\n self.assertEqual(\n self.view_instance.queryset_method,\n 'user_log_list'\n )\n\n def test_queryset_method_pending(self):\n self.assertEqual(\n self.get_view_with_get_querystring('is_pending').queryset_method,\n 'user_pending_list'\n )\n\n def test_get_paginate_by(self):\n self.assertEqual(\n self.view_instance.get_paginate_by(None),\n settings.NOTIFICATIONS_LIST_PAGINATE_BY\n )\n\n def test_get_paginate_by_teaser(self):\n self.assertEqual(\n self.get_view_with_get_querystring('is_teaser').get_paginate_by(None),\n settings.NOTIFICATIONS_TEASER_PAGINATE_BY\n )\n\n @mock.patch('notifications.views.Log.actions.user_log_list')\n def test_get_queryset(self, mock_actions):\n self.view_instance.get_queryset()\n mock_actions.assert_called_once_with(user={})\n\n @mock.patch('notifications.views.Log.actions.user_pending_list')\n def test_get_queryset_pending(self, mock_actions):\n self.get_view_with_get_querystring('is_pending').get_queryset()\n mock_actions.assert_called_once_with(user={})\n\n @mock.patch.object(LogListView, 'add_user_aware_data')\n def test_get_context_data_logs(self, mock_add_user_aware_data):\n self.view_instance.object_list = 'foo'\n self.view_instance.get_context_data()\n mock_add_user_aware_data.assert_called_once_with('foo')\n\n def _test_add_user_aware_data(self):\n # for faster tests, mock all the elements. elements are created here\n # as this makes the tests more readable.\n pth = 'notifications.views.Log.actions'\n with mock.patch('{}.read_id_list'.format(pth)) as read_id_list:\n read_id_list.return_value = [42]\n with mock.patch('{}.user_pending_list'.format(pth)) as pending:\n pending.values_list.return_value = [8, 42]\n logs = Log.objects.all()\n return list(self.view_instance.add_user_aware_data(logs))\n\n def test_add_user_aware_data_keys(self):\n data_keys = self._test_add_user_aware_data()[0].keys()\n for key in ['id', 'created', 'text', 'is_read', 'is_todo', 'edit_url']:\n self.assertTrue(key in data_keys)\n\n def test_add_user_aware_data_is_read(self):\n data = self._test_add_user_aware_data()\n # logs are ordered by creation date - 42 is the newer one\n self.assertTrue(data[0]['is_read'])\n\n def test_add_user_aware_data_is_not_read(self):\n data = self._test_add_user_aware_data()\n self.assertFalse(data[1]['is_read'])\n\n #def test_add_user_aware_data_is_todo(self):\n # data = self._test_add_user_aware_data()\n # self.assertTrue(data[1]['is_todo'])\n\n def test_add_user_aware_data_is_not_todo(self):\n data = self._test_add_user_aware_data()\n self.assertFalse(data[0]['is_todo'])\n\n @override_settings(NOTIFICATIONS_ACTIONS={'foo': 'bar', 'result': '42'})\n def test_statuses_in_context(self):\n self.view_instance.object_list = []\n context = self.view_instance.get_context_data()\n self.assertDictEqual(\n context['statuses'],\n {'foo': 'bar', 'result': '42'}\n )\n\n @mock.patch('notifications.views.Log.actions.user_log_list')\n def test_status_filter_queryset(self, mock_user_log_list):\n mock_user_log_list.return_value = []\n self.assertEqual(\n [], self.view_instance.get_queryset()\n )\n\n @mock.patch('notifications.views.Log.actions.user_log_list')\n def test_status_filter_queryset_for_status(self, mock_user_log_list):\n mock_user_log_list.return_value = Log.objects.filter()\n view = self.view\n view.get_statuses = mock.MagicMock(return_value=[3])\n view_instance = self.setup_view(\n view=view, request=self.request\n )\n self.assertQuerysetEqual(\n view_instance.get_queryset(),\n [self.change_log.id],\n transform=lambda item: item.id\n )\n\n def test_get_status_invalid(self):\n request = RequestFactory().get('{}?statuses=foo'.format(self.url_path))\n view = self.setup_view(self.view, request)\n self.assertEqual(view.get_statuses(), [])\n\n @override_settings(NOTIFICATIONS_ACTIONS={'2': 'bar'})\n def test_get_status_invalid_config(self):\n request = RequestFactory().get('{}?statuses=1'.format(self.url_path))\n view = self.setup_view(self.view, request)\n self.assertEqual(view.get_statuses(), [])\n\n def test_get_status_valid(self):\n request = RequestFactory().get('{}?statuses=1,2,3'.format(self.url_path))\n view = self.setup_view(self.view, request)\n self.assertEqual(view.get_statuses(), [1, 2, 3])\n\n\nclass ReadLogUpdateViewTest(TestCase):\n\n def setUp(self):\n self.view = ReadLogUpdateView()\n self.request = RequestFactory().post(\n reverse('notification_read'),\n data={'user': 123, 'log': 'log', 'checked': 'true'}\n )\n self.user = mock.MagicMock(id=123)\n self.request.user = self.user\n self.view_instance = self.setup_view(view=self.view, request=self.request)\n\n def test_validate_data_all_keys(self):\n self.assertFalse(\n self.view_instance.validate_data()\n )\n\n def test_validate_data_id_type(self):\n self.assertFalse(\n self.view_instance.validate_data(checked='1', log='1', user='foo')\n )\n\n def test_validate_data_invalid_user(self):\n self.assertFalse(\n self.view_instance.validate_data(checked='456', log='1', user='456')\n )\n\n def test_validate_data_valid(self):\n self.assertTrue(\n self.view_instance.validate_data(checked='1', log='1', user='123')\n )\n\n @mock.patch('notifications.views.ReadLog.objects.update_or_create')\n def test_post_valid_checked(self, mock_get_or_create):\n self.view_instance.post(request=self.request)\n mock_get_or_create.assert_called_once_with(\n user_id='123', log_id='log', defaults={'is_read': True}\n )\n\n @mock.patch('notifications.views.ReadLog.objects.update_or_create')\n def test_post_valid_unchecked(self, mock_get_or_create):\n request = RequestFactory().post(\n reverse('notification_read'),\n data={'user': 123, 'log': 'log', 'checked': 'false'}\n )\n self.view_instance.post(request=request)\n mock_get_or_create.assert_called_once_with(\n user_id='123', log_id='log', defaults={'is_read': False}\n )\n\n @mock.patch.object(ReadLogUpdateView, 'validate_data')\n def test_post_invalid(self, mock_validate_data):\n logging.disable(logging.CRITICAL)\n mock_validate_data.return_value = False\n with self.assertRaises(Http404):\n self.view_instance.post(request=self.request)\n\n\nclass LogCountViewTest(TestCase):\n\n def setUp(self):\n super().setUp()\n self.request = RequestFactory().get(reverse('notification_new_count'))\n self.request.user = mommy.make(_model=get_user_model())\n self.view = self.setup_view(view=LogCountView(), request=self.request)\n mommy.make(\n _model=Log,\n catalyst=self.request.user,\n action=settings.NOTIFICATIONS_CHANGE_STATUS,\n _quantity=4\n )\n mommy.make(\n _model=Log,\n catalyst=self.request.user,\n action=settings.NOTIFICATIONS_EDIT_CONTENT,\n _quantity=2\n )\n\n @mock.patch('notifications.views.Log.actions.only_unread_logs')\n def test_get_unread_only(self, mock_only_unread_logs):\n self.view.get(request=self.request)\n mock_only_unread_logs.assert_called_once_with(\n user=self.request.user\n )\n\n def test_log_count(self):\n response = self.view.get(request=self.request)\n self.assertEqual(response.content, b'4')\n\n def test_log_count_one_read(self):\n mommy.make(\n _model=ReadLog,\n log=Log.objects.filter(action=settings.NOTIFICATIONS_CHANGE_STATUS).first(),\n user=self.request.user,\n is_read=True\n )\n response = self.view.get(request=self.request)\n self.assertEqual(response.content, b'3')\n\n\nclass LogQuestionnairesListViewTest(TestCase):\n\n def setUp(self):\n super().setUp()\n self.request = RequestFactory().get(reverse('notification_questionnaire_logs'))\n self.request.user = 'foo'\n self.view = self.setup_view(view=LogQuestionnairesListView(), request=self.request)\n\n @mock.patch.object(ActionContextQuerySet, 'user_log_list')\n def test_get_questionnaire_logs(self, mock_user_log_list):\n self.view.get_questionnaire_logs('foo')\n mock_user_log_list.assert_called_once_with(user='foo')\n\n\n @mock.patch.object(LogQuestionnairesListView, 'get_questionnaire_logs')\n def test_get(self, mock_get_questionnaire_logs):\n mock_get_questionnaire_logs.return_value = ['foo_1', 'foo_2', 'bar_3']\n response = self.view.get(self.request)\n self.assertEqual(\n response.content, b'{\"questionnaires\": [\"bar_3\", \"foo_1\", \"foo_2\"]}'\n )\n\n\nclass LogInformationUpdateCreateViewTest(TestCase):\n\n def setUp(self):\n super().setUp()\n self.url = reverse('notification_inform_compiler')\n self.view = LogInformationUpdateCreateView()\n self.request = RequestFactory().get(self.url)\n self.request.user = 'foo'\n self.view = self.setup_view(view=self.view, request=self.request)\n\n def test_get_compiler_query(self):\n questionnaire = mock.MagicMock()\n self.view.get_compiler(questionnaire)\n self.assertEqual(\n questionnaire.method_calls[0],\n call.questionnairemembership_set.get(role='compiler')\n )\n\n def test_get_compiler(self):\n sentinel = mock.sentinel\n questionnaire = mock.MagicMock()\n questionnaire.questionnairemembership_set.get.return_value = sentinel\n self.assertEqual(\n self.view.get_compiler(questionnaire),\n sentinel.user\n )\n\n @mock.patch('notifications.views.query_questionnaire')\n def test_get_questionnaire(self, mock_query_questionnaire):\n one_questionnaire = mock.MagicMock()\n one_questionnaire.first = lambda : 'foo'\n mock_query_questionnaire.return_value = one_questionnaire\n self.assertEqual(\n self.view.get_questionnaire('foo'), 'foo'\n )\n\n @mock.patch('notifications.views.query_questionnaire')\n def test_get_questionnaire_raises(self, mock_query_questionnaire):\n not_exists = mock.MagicMock()\n not_exists.exists = lambda : False\n mock_query_questionnaire.return_value = not_exists\n with self.assertRaises(Http404):\n self.view.get_questionnaire('foo')\n\n @mock.patch('notifications.views.query_questionnaire')\n def test_get_questionnaire_calls_filter(self, mock_query_questionnaire):\n self.view.get_questionnaire('foo')\n mock_query_questionnaire.assert_called_once_with(\n identifier='foo', request=self.request\n )\n\n @override_settings(NOTIFICATIONS_FINISH_EDITING='setting')\n @mock.patch.object(LogInformationUpdateCreateView, 'get_questionnaire')\n @mock.patch.object(LogInformationUpdateCreateView, 'get_compiler')\n def test_post(self, mock_get_compiler, mock_get_questionnaire):\n compiler = mock.MagicMock()\n mock_get_questionnaire.return_value = mock.sentinel.questionnaire\n mock_get_compiler.return_value = compiler\n request = RequestFactory().post(self.url, data={\n 'identifier': 'foo',\n 'message': 'bar'\n })\n with mock.patch('notifications.views.InformationLog') as mock_create:\n self.setup_view(view=self.view, request=self.request).post(request)\n mock_create.assert_called_once_with(\n action='setting',\n questionnaire=mock.sentinel.questionnaire,\n receiver=compiler,\n sender='foo'\n )\n\n\nclass LogSubscriptionPreferencesMixinTest(TestCase):\n\n def setUp(self):\n self.url = reverse('notification_preferences')\n self.view = LogSubscriptionPreferencesView()\n self.request = RequestFactory().get(self.url)\n self.user = mommy.make(_model=get_user_model())\n self.obj = self.user.mailpreferences\n self.request.user = self.user\n self.request._messages = mock.MagicMock()\n self.view = self.setup_view(view=self.view, request=self.request)\n self.view.object = self.obj\n\n def test_get_initial(self):\n self.obj.wanted_actions = 'some,thing,yay'\n self.assertEqual(\n ['some', 'thing', 'yay'],\n self.view.get_initial()['wanted_actions']\n )\n\n def test_get_form_valid_changed_language(self):\n self.view.object = mock.MagicMock()\n self.view.object.has_changed_language = False\n form = mock.MagicMock()\n form.changed_data = ['language']\n self.view.form_valid(form)\n self.assertTrue(self.view.object.has_changed_language)\n\n def test_get_form_valid_message(self):\n self.view.form_valid(mock.MagicMock())\n self.assertTrue(self.request._messages.method_calls)\n\n\nclass SignedLogSubscriptionPreferencesViewTest(TestCase):\n\n def setUp(self):\n self.user = mommy.make(_model=get_user_model())\n self.obj = self.user.mailpreferences\n self.view = SignedLogSubscriptionPreferencesView()\n self.request = RequestFactory().get(str(self.obj.get_signed_url()))\n self.request._messages = mock.MagicMock()\n self.view = self.setup_view(view=self.view, request=self.request)\n self.view.object = self.obj\n\n def test_get_success_url_signed(self):\n self.request.user = mock.MagicMock(is_authenticated=lambda: False)\n self.assertEqual(\n self.view.get_success_url(),\n self.obj.get_signed_url()\n )\n\n def test_get_success_url_user(self):\n self.request.user = self.user\n self.assertEqual(\n self.view.get_success_url(),\n reverse('notification_preferences')\n )\n\n def test_get_object_user(self):\n self.request.user = self.user\n self.assertEqual(\n self.view.get_object(),\n self.obj\n )\n\n def test_get_signed_object(self):\n self.request.user = mock.MagicMock(is_authenticated=lambda: False)\n self.view.kwargs['token'] = mock.MagicMock()\n with mock.patch.object(Signer, 'unsign') as mock_unsign:\n mock_unsign.return_value = self.obj.id\n self.assertEqual(\n self.view.get_object(), self.obj\n )\n mock_unsign.assert_called_with(self.view.kwargs['token'])\n\n def test_get_signed_object_404(self):\n self.request.user = mock.MagicMock(is_authenticated=lambda: False)\n self.view.kwargs['token'] = mock.MagicMock()\n with self.assertRaises(Http404):\n self.view.get_object()\n","repo_name":"sotkonstantinidis/testcircle","sub_path":"apps/notifications/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":17136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26669390079","text":"# -*- coding: utf-8 -*-\n\"\"\"WavefrontDirectReporter and WavefrontProxyReporter implementations.\"\"\"\nfrom __future__ import unicode_literals\n\nimport json\n\nimport pyformance.reporters.reporter\n\nimport wavefront_sdk\nfrom wavefront_sdk.entities.histogram import histogram_granularity\n\nfrom . import delta\nfrom . import runtime_metrics\nfrom . import wavefront_histogram\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\n\nclass WavefrontReporter(pyformance.reporters.reporter.Reporter):\n \"\"\"Base reporter for reporting data in Wavefront format.\"\"\"\n\n # pylint: disable=too-many-arguments\n def __init__(self, source='wavefront-pyformance', registry=None,\n reporting_interval=60, clock=None, prefix='', tags=None,\n enable_runtime_metrics=False):\n \"\"\"Construct Wavefront Reporter.\"\"\"\n super(WavefrontReporter, self).__init__(\n registry=registry, reporting_interval=reporting_interval,\n clock=clock)\n self.wavefront_client = None\n self.source = source\n self.prefix = prefix\n self.tags = tags or {}\n self.histogram_granularities = set()\n self.enable_runtime_metrics = enable_runtime_metrics\n\n @staticmethod\n def decode_key(key):\n \"\"\"Decode encoded key into original key and dict of tags.\"\"\"\n if '-tags=' in key:\n key_name, tags_json = key.split('-tags=')\n return key_name, json.loads(tags_json)\n return key, None\n\n def report_now(self, registry=None, timestamp=None):\n \"\"\"Collect metrics from registry and report them to Wavefront.\"\"\"\n self._report(registry, timestamp, False)\n\n def _report(self, registry=None, timestamp=None, flush_current_hist=False):\n \"\"\"\n Collect metrics from registry and report them to Wavefront.\n\n With option to include current minute bin of histogram.\n\n :param registry: Registry\n :param timestamp: Timestamp\n :param flush_current_hist: Flush the current minute bin of histogram.\n :return: None\n \"\"\"\n registry = registry or self.registry\n if self.enable_runtime_metrics:\n col = runtime_metrics.RuntimeCollector(registry)\n col.collect()\n metrics = registry.dump_metrics()\n for key in metrics.keys():\n metric_name, metric_tags = self.decode_key(key)\n tags = self.tags\n if metric_tags:\n tags = self.tags.copy()\n tags.update(metric_tags)\n\n wf_hist = wavefront_histogram.get(key, registry)\n if wf_hist is not None:\n distributions = wf_hist.get_distribution()\n if flush_current_hist:\n distributions.extend(\n wf_hist.get_current_minute_distribution())\n for dist in distributions:\n self.wavefront_client.send_distribution(\n name='{}{}'.format(self.prefix, metric_name),\n centroids=dist.centroids,\n histogram_granularities=self.histogram_granularities,\n timestamp=dist.timestamp,\n source=self.source,\n tags=tags)\n continue\n\n is_delta = delta.is_delta_counter(key, registry)\n for value_key in metrics[key].keys():\n if is_delta:\n self.wavefront_client.send_delta_counter(\n name=delta.get_delta_name(self.prefix, metric_name,\n value_key),\n value=metrics[key][value_key], source=self.source,\n tags=tags\n )\n # decrement delta counter\n registry.counter(key).dec(metrics[key][value_key])\n else:\n self.wavefront_client.send_metric(\n name='{}{}.{}'.format(self.prefix, metric_name,\n value_key),\n value=metrics[key][value_key], timestamp=timestamp,\n source=self.source, tags=tags)\n\n def stop(self):\n \"\"\"Stop pyformance and wavefront reporter.\"\"\"\n self._report(registry=self.registry, flush_current_hist=True)\n super(WavefrontReporter, self).stop()\n self.wavefront_client.close()\n\n def report_minute_distribution(self):\n \"\"\"Report distribution using minute granularity.\"\"\"\n self.histogram_granularities.add(histogram_granularity.MINUTE)\n return self\n\n def report_hour_distribution(self):\n \"\"\"Report distribution using hour granularity.\"\"\"\n self.histogram_granularities.add(histogram_granularity.HOUR)\n return self\n\n def report_day_distribution(self):\n \"\"\"Report distribution with day granularity.\"\"\"\n self.histogram_granularities.add(histogram_granularity.DAY)\n return self\n\n\nclass WavefrontProxyReporter(WavefrontReporter):\n \"\"\"Requires a host and port to report data to a Wavefront proxy.\"\"\"\n\n # pylint: disable=too-many-arguments\n def __init__(self, host, port=2878, distribution_port=None,\n source='wavefront-pyformance', registry=None,\n reporting_interval=60, clock=None, prefix='proxy.',\n tags=None, enable_runtime_metrics=False):\n \"\"\"Run parent __init__ and do proxy reporter specific setup.\"\"\"\n super(WavefrontProxyReporter, self).__init__(\n source=source, registry=registry,\n reporting_interval=reporting_interval, clock=clock, prefix=prefix,\n tags=tags, enable_runtime_metrics=enable_runtime_metrics)\n self.wavefront_client = wavefront_sdk.WavefrontProxyClient(\n host=host, metrics_port=port, distribution_port=distribution_port,\n tracing_port=None)\n\n def report_now(self, registry=None, timestamp=None):\n \"\"\"Collect metrics from registry and report them to Wavefront.\"\"\"\n timestamp = timestamp or int(round(self.clock.time()))\n super(WavefrontProxyReporter, self).report_now(registry, timestamp)\n\n\nclass WavefrontDirectReporter(WavefrontReporter):\n \"\"\"Direct Reporter for sending metrics using direct ingestion.\n\n This reporter requires a server and a token to report data\n directly to a Wavefront server.\n \"\"\"\n\n # pylint: disable=too-many-arguments\n def __init__(self, server, token, source='wavefront-pyformance',\n registry=None, reporting_interval=60, clock=None,\n prefix='direct.', tags=None, enable_runtime_metrics=False):\n \"\"\"Run parent __init__ and do direct reporter specific setup.\"\"\"\n super(WavefrontDirectReporter, self).__init__(\n source=source, registry=registry,\n reporting_interval=reporting_interval, clock=clock, prefix=prefix,\n tags=tags, enable_runtime_metrics=enable_runtime_metrics)\n self.server = self._validate_url(server)\n self.token = token\n self.batch_size = 10000\n self.wavefront_client = wavefront_sdk.WavefrontDirectClient(\n self.server, token, batch_size=self.batch_size,\n flush_interval_seconds=reporting_interval)\n\n @staticmethod\n def _validate_url(server): # pylint: disable=no-self-use\n \"\"\"Validate URL of server.\"\"\"\n parsed_url = urlparse(server)\n if not all((parsed_url.scheme, parsed_url.netloc)):\n raise ValueError('invalid server url')\n return server\n\n def report_now(self, registry=None, timestamp=None):\n \"\"\"Collect metrics from registry and report them to Wavefront.\"\"\"\n super(WavefrontDirectReporter, self).report_now(registry, timestamp)\n self.wavefront_client.flush_now()\n","repo_name":"mitul01/robotshop","sub_path":"src/payment/.venv/Lib/site-packages/wavefront_pyformance/wavefront_reporter.py","file_name":"wavefront_reporter.py","file_ext":"py","file_size_in_byte":7864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"16236357039","text":"from openpyxl import load_workbook, Workbook\r\nimport numpy\r\n\r\n\r\ndef writeExcel(mech_all,WORKCENTRE):\r\n wb = Workbook()\r\n sheet = wb.active\r\n sheet.title = \"Reached\"\r\n name = \"\"\r\n wb.create_sheet(\"not_reached\")\r\n wb.create_sheet(\"not_available\")\r\n sheet2 = wb['not_reached']\r\n sheet3 = wb['not_available']\r\n sheet.cell(1, 1).value = \"Station\"\r\n sheet.cell(1, 2).value = \"Mech_Class\"\r\n sheet.cell(1, 3).value = \"Destination\"\r\n sheet.cell(1, 4).value = \"Distance (ft)\"\r\n\r\n sheet2.cell(1, 1).value = \"Station\"\r\n sheet2.cell(1, 2).value = \"Mech_Class\"\r\n sheet2.cell(1, 3).value = \"not_reached\"\r\n\r\n sheet3.cell(1, 1).value = \"Mech_Class\"\r\n sheet3.cell(1, 2).value = \"Bin location\"\r\n\r\n i = 1\r\n j = 1\r\n name = name+\"_\"+ WORKCENTRE[0].replace(\"16WS\",\"\")\r\n for mech_class in mech_all:\r\n i = i+1\r\n \r\n for station in mech_class:\r\n for row in station.data_reached:\r\n sheet.cell(i, j).value = station.station_name\r\n sheet.cell(i, j+1).value = station.mech_class_name\r\n sheet.cell(i, j+2).value = row[0]\r\n sheet.cell(i, j+3).value = row[1]\r\n i = i+1\r\n i = 1\r\n for mech_class in mech_all:\r\n i = i+1\r\n\r\n for station in mech_class:\r\n for row in station.data_not_reached:\r\n sheet2.cell(i, j).value = station.station_name\r\n sheet2.cell(i, j+1).value = station.mech_class_name\r\n sheet2.cell(i, j+2).value = row\r\n i = i+1\r\n\r\n i = 2\r\n\r\n for mech_class in mech_all:\r\n # i=i+1\r\n for station in mech_class:\r\n for row in station.not_available:\r\n sheet3.cell(i, j).value = station.mech_class_name\r\n sheet3.cell(i, j+1).value = row\r\n\r\n i = i+1\r\n\r\n wb.save(f\"Report1/Report1_data{name}.xlsx\")\r\n","repo_name":"naveenasokan/Factory-Layout-Optimzation-with-Data-Structure-Algorithms","sub_path":"report1datadump.py","file_name":"report1datadump.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22342253449","text":"import tkinter as tk\nfrom chessGame import chessGame\nimport dataForChessGame as data\nfrom dataForChessGame import gameScale\n\n\nclass initialize(object):\n def __init__(self):\n self.root = tk.Tk()\n self.canvas = tk.Canvas(self.root, width=gameScale, height=int(gameScale + gameScale / 2), highlightthickness=0,\n background=data.brown)\n\n # Changing the startFEN changes the starting position\n self.c = chessGame(white=data.white, green=data.green, gameScale=data.gameScale, canvas=self.canvas,\n root=self.root,\n FEN=data.startFEN)\n\n def main(self):\n self.root.title(\"chessGame\")\n self.root.configure(background='Black')\n self.root.geometry(f'{str(gameScale)}x{str(int(gameScale + gameScale / 2))}')\n\n self.c.drawBoard()\n self.c.processStartFENString()\n\n self.canvas.pack()\n self.root.mainloop()\n\n\ni = initialize()\ni.main()\n","repo_name":"Fletcher555/Chess-Project","sub_path":"initializeGame.py","file_name":"initializeGame.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10614111777","text":"import serial\nfrom queue import Queue\nfrom threading import Thread, RLock, Event\nfrom time import sleep, time\nfrom struct import pack, unpack\n\nclass MSG(bytearray):\n SIZE = 6 #*8 bit\n def __init__(self, source=None, msgType=None, id=None, data=None):\n if source == None:\n source = MSG.SIZE\n super().__init__(source)\n if msgType != None:\n self[0] = int.from_bytes(msgType, 'big')\n if id != None:\n self[1] = int.from_bytes(id, 'big')\n if data != None:\n self.dataEncode(data);\n def type(self):\n return int(self[0]).to_bytes(1, 'big')\n def id(self):\n return int(self[1]).to_bytes(1, 'big')\n def data(self):\n return self[2:]\n def dataAsInt(self):\n return unpack('l', value)\n else:\n print(type(value))\n raise Exception('TypeError')\n\n class TYPE:\n HEARTBEAT = b'\\x00'\n SET = b'\\x01'\n GET = b'\\x02'\n class INFO:\n STATUS = b'\\x00'\n ERROR = b'\\x01'\n SUCCESS = b'\\x02'\n ACK = b'\\x03'\n SETPOINT = b'\\x04'\n POSITION = b'\\x05'\n\n class MOTOR:\n BASE = b'\\x00'\n SHOULDER = b'\\x01'\n ELBOW = b'\\x02'\n WRIST = b'\\x03'\n SUCTION = b'\\x04'\n\n nonLatchedStatusFlag = {\n 0: 'NO FAULT',\n 1: 'POWER',\n (1 << 2):'OPENY',\n (1 << 3):'OPENX',\n (1 << 4):'WD',\n (1 << 5):'CPFAIL',\n (1 << 6):'TW'\n }\n\n latchedStatusFlag = {\n 0: 'NO FAULT',\n 1: 'POWER',\n (1 << 3):'OVCXNB',\n (1 << 4):'OVCXNT',\n (1 << 5):'OVCXPB',\n (1 << 6):'OVCXPT',\n (1 << 10):'TSD',\n (1 << 11):'OVCYNB',\n (1 << 12):'OVCYNT',\n (1 << 13):'OVCYPB',\n (1 << 14):'OVCYPT'\n }\n\nclass MCU:\n def __init__(self, port):\n self.port = port\n self.Serial = serial.Serial()\n self.lock = RLock()\n self.__open_serial()\n self.active = Event()\n self.error = Event()\n\n # Start\n #self.heartbeat()\n\n def heartbeat(self):\n def run(self):\n i = 0\n while i < 5:\n i = i+1\n msg = MSG(msgType=MSG.TYPE.HEARTBEAT)\n resp = self.__send(msg)\n if resp != None and resp.type() == MSG.TYPE.HEARTBEAT:\n self.active.set()\n i = 0\n else:\n self.active.clear()\n if resp.id != MSG.INFO.SUCCESS:\n self.error.set()\n else:\n self.error.clear()\n sleep(5)\n print('5 heartbeats missed: resetting')\n self.reset()\n self.heartbeat()\n\n t = Thread(target=run, args=(self,))\n t.daemon = True\n t.start()\n\n def __open_serial(self, baudrate=115200, timeout=0, write_timeout=0):\n self.lock.acquire()\n if not self.Serial.is_open:\n self.Serial.port = self.port\n self.Serial.baudrate = baudrate\n self.Serial.timeout = timeout\n self.Serial.write_timeout = write_timeout\n self.Serial.open()\n sleep(2)\n self.lock.release()\n\n def __close_serial(self):\n self.lock.acquire()\n if self.Serial.is_open:\n self.Serial.close()\n self.lock.release()\n\n def reset(self):\n self.__close_serial()\n sleep(0.1)\n self.__open_serial()\n\n def __send(self, msg, timeout=2):\n assert type(msg) == MSG\n with self.lock:\n #print(\"Sending: {}\".format(msg))\n self.Serial.write(msg)\n start_time = time()\n while self.Serial.in_waiting < MSG.SIZE:\n if time()-start_time > timeout:\n print(\"Response timed out\")\n print(\"Buffer: {}\".format(self.Serial.read(size=self.Serial.in_waiting)))\n return None #timeout condition\n sleep(0.1)\n resp = self.Serial.read(size=self.Serial.in_waiting)\n resp = MSG(resp)\n print(\"Received: {}\".format(resp))\n return resp\n\n def set(self, id, value):\n msg = MSG(msgType=MSG.TYPE.SET, id=id, data=value)\n #print(msg)\n resp = self.__send(msg)\n assert resp.type() == MSG.TYPE.SET\n return resp\n\n def get(self, id, type):\n msg = MSG(msgType=MSG.TYPE.GET, id=id, data=type)\n #print(msg)\n resp = self.__send(msg)\n assert resp.type() == MSG.TYPE.GET\n return resp\n\n def status(self, id):\n msg = MSG(msgType=MSG.TYPE.GET, id=id, data=MSG.INFO.STATUS)\n resp = self.__send(msg)\n if resp != None:\n latched = int.from_bytes(resp.data()[0:2], 'big')\n nonLatched = int.from_bytes(resp.data()[2:4], 'big')\n latched = MSG.latchedStatusFlag[latched]\n nonLatched = MSG.nonLatchedStatusFlag[nonLatched]\n errorFlag = resp.type()\n return errorFlag, latched, nonLatched\n\nif __name__ == '__main__':\n arduino = MCU(\"/dev/ttyAMA0\")\n arduino.set(MSG.MOTOR.SHOULDER, 90)\n exit()\n","repo_name":"francescov1/scrabble-control","sub_path":"motorControl.py","file_name":"motorControl.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3067174062","text":"dp = [0] * 9999999\nwhile True:\n\tnum = int(input())\n\tif num == 0:\n\t\tbreak\n\ti = 1\n\tn = 0\n\twhile True:\n\t\tif i == 1:\n\t\t\tdp[1] = 1\n\t\telse:\n\t\t\tdp[i] = n + 1\n\t\t\tfor j in range(pre, i):\n\t\t\t\tdp[j] = n\n\t\tif num <= i:\n\t\t\tbreak\n\t\tn += 1\n\t\tpre = i\n\t\ti = dp[n] + i\n\t\n\tif num == i:\n\t\tprint(n + 1)\n\telse:\n\t\tprint(n)","repo_name":"SoleMin/Algorithmic_Problems","sub_path":"110607/0.py","file_name":"0.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71573171662","text":"#!/usr/bin/env python\n__author__ = 'Sergei F. Kliver'\nimport argparse\nimport multiprocessing as mp\nfrom collections import OrderedDict\n\nimport pandas as pd\n\nfrom RouToolPa.Parsers.Sequence import CollectionSequence\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-i\", \"--input_file_list\", action=\"store\", dest=\"input_file_list\", required=True,\n type=lambda s: s.split(\",\"),\n help=\"Comma-separated list of files with different assemblies\")\nparser.add_argument(\"-l\", \"--labels_list\", action=\"store\", dest=\"labels_list\",\n type=lambda s: s.split(\",\"),\n help=\"Comma-separated list of assembly labels. Should have same length as list of \"\n \"input files with assemblies. Default - not set, assemblies will be named like A1, A2, ../ \")\nparser.add_argument(\"-e\", \"--thresholds\", action=\"store\", dest=\"thresholds\", default=[0, 100, 250, 500, 1000],\n type=lambda s: list(map(int, s.split(\",\"))),\n help=\"Comma-separated list of thresholds for N50 calculations. \"\n \"Default: 0,100,250,500,1000\")\nparser.add_argument(\"-o\", \"--output_prefix\", action=\"store\", dest=\"output_prefix\", required=True,\n help=\"Prefix of output files\")\nparser.add_argument(\"-f\", \"--format\", action=\"store\", dest=\"format\", default=\"fasta\",\n help=\"Format of input files\")\n\nargs = parser.parse_args()\n\nif args.labels_list is not None:\n if len(args.labels_list) != len(args.input_file_list):\n raise ValueError(\"Length of labels list is not equal to number of files with assemblies\")\n\nassemblies_dict = OrderedDict()\nstats_dict = OrderedDict({\"N50\": pd.DataFrame(),\n \"L50\": pd.DataFrame(),\n \"Ns\": pd.DataFrame(),\n \"Total length\": pd.DataFrame(),\n \"Total scaffolds\": pd.DataFrame()})\nthresholds_stats = OrderedDict([(threshold, pd.DataFrame()) for threshold in args.thresholds])\n\nfor i in range(0, len(args.input_file_list)):\n assembly_label = args.labels_list[i] if args.labels_list else \"A%i\" % (i + 1)\n assembly_output_prefix = \"%s.%s\" % (args.output_prefix, assembly_label)\n assemblies_dict[assembly_label] = CollectionSequence(in_file=args.input_file_list[i], parsing_mode=\"parse\").get_stats_and_features(thresholds_list=args.thresholds, count_gaps=True)\n assemblies_dict[assembly_label].to_csv(\"%s.tsv\" % assembly_output_prefix, sep=\"\\t\")\n print(assemblies_dict[assembly_label])\n for stat_entry in stats_dict:\n stats_dict[stat_entry][assembly_label] = assemblies_dict[assembly_label].loc[stat_entry]\n\n for threshold in thresholds_stats:\n thresholds_stats[threshold][assembly_label] = assemblies_dict[assembly_label][threshold]\n\nfor stat_entry in stats_dict:\n stats_dict[stat_entry].to_csv(\"%s.%s.tsv\" % (args.output_prefix, stat_entry.replace(\" \", \"_\")), sep=\"\\t\")\n\nfor threshold in thresholds_stats:\n thresholds_stats[threshold].to_csv(\"%s.%i.tsv\" % (args.output_prefix, threshold), sep=\"\\t\")\n","repo_name":"mahajrod/MAVR","sub_path":"scripts/assembly/get_stats_from_assemblies.py","file_name":"get_stats_from_assemblies.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"41251543829","text":"emails = [\"test.email+alex@leetcode.com\",\"test.e.mail+bob.cathy@leetcode.com\",\"testemail+david@lee.tcode.com\"]\nemail = set()\nfor i in emails:\n\tuser = i.split(\"@\")[0]\n\tdomain = i.split(\"@\")[1]\n\tuser = user.split(\"+\")[0]\n\tuser = user.replace(\".\", \"\")\n\temail.add(user + \"@\" + domain)\n\nprint(len(email))","repo_name":"jason-little/python","sub_path":"emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21092390615","text":"from unittest import TestCase\n\nfrom src.model.ktssValidation import KTSSValidator\nfrom src.model.tests.factories import (\n InvalidParserFactory,\n ParserFactory,\n ParserFactoryKTSSValidatorAnnotate,\n ParserFactoryKTSSValidatorAnnotateNested,\n ParserFactoryKTSSValidatorDistances,\n)\n\n\nclass TestKTSSValidator(TestCase):\n def setUp(self) -> None:\n alphabet = {\"a\", \"b\"}\n states = {\"\", \"a\", \"b\", \"aa\", \"bb\", \"ab\", \"ba\"}\n transitions = {\n \"\": {\"a\": \"a\", \"b\": \"b\"},\n \"a\": {\"a\": \"aa\", \"b\": \"ab\"},\n \"b\": {\"a\": \"ba\", \"b\": \"bb\"},\n \"aa\": {\"a\": \"aaa\", \"b\": \"aab\"},\n \"ab\": {\"a\": \"aba\", \"b\": \"abb\"},\n \"ba\": {\"a\": \"baa\", \"b\": \"bab\"},\n \"bb\": {\"a\": \"bba\", \"b\": \"bbb\"},\n }\n probabilities = {\n \"\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"a\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"b\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"aa\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"ab\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"ba\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"bb\": {\"a\": 1 / 2, \"b\": 1 / 2},\n }\n initial_state = \"\"\n final_states = {\"bb\", \"aa\"}\n\n model = {\n \"alphabet\": alphabet,\n \"states\": states,\n \"transitions\": transitions,\n \"initial_state\": initial_state,\n \"final_states\": final_states,\n \"probabilities\": probabilities,\n }\n\n self.ktss_validator = KTSSValidator(model)\n\n return super().setUp()\n\n def test__set_mappings_valid(self):\n parser = ParserFactory()\n\n is_valid = False\n try:\n self.ktss_validator._set_mappings(parser)\n is_valid = True\n except NotImplementedError:\n pass\n\n self.assertTrue(is_valid)\n\n def test__set_mappings_invalid(self):\n is_invalid = False\n try:\n self.ktss_validator._set_mappings(InvalidParserFactory)\n except NotImplementedError:\n is_invalid = True\n\n self.assertTrue(is_invalid)\n\n def test__string_distances_match(self):\n string1 = \"AAA\"\n string2 = \"AAA\"\n difference = 0\n\n result = self.ktss_validator._string_distances(string1, string2)\n\n self.assertEqual(result, difference)\n\n def test__string_distances_not_match(self):\n string1 = \"AAA\"\n string2 = \"AAB\"\n difference = 1\n\n result = self.ktss_validator._string_distances(string1, string2)\n\n self.assertEqual(result, difference)\n\n def test__add_symbol_true(self):\n symbol = \"a\"\n mapping = {\"a\": \"s\"}\n symbols = []\n result = [\"s\"]\n\n KTSSValidator._add_symbol(symbol, mapping, symbols)\n\n self.assertEqual(symbols, result)\n\n def test__add_symbol_false(self):\n symbol = \"v\"\n mapping = {\"a\": \"s\"}\n symbols = []\n result = []\n\n KTSSValidator._add_symbol(symbol, mapping, symbols)\n\n self.assertEqual(symbols, result)\n\n def test__is_nested_true(self):\n mapping = {\"a\": {}}\n\n result = KTSSValidator._is_nested(mapping)\n\n self.assertTrue(result)\n\n def test__is_nested_false(self):\n mapping = {}\n\n result = KTSSValidator._is_nested(mapping)\n\n self.assertFalse(result)\n\n def test__filter_possibles(self):\n state = \"\"\n symbols = [\"a\", \"b\", \"c\", \"d\"]\n possible_symbols = [\"a\", \"b\"]\n\n result = self.ktss_validator._filter_possibles(state, symbols)\n\n self.assertEqual(result, possible_symbols)\n\n def test__filter_possibles_empty(self):\n state = \"\"\n symbols = [\"c\", \"d\"]\n possible_symbols = []\n\n result = self.ktss_validator._filter_possibles(state, symbols)\n\n self.assertEqual(result, possible_symbols)\n\n\nclass TestKTSSValidatorAnnotate(TestCase):\n def setUp(self) -> None:\n alphabet = {\"a\", \"b\", \"d\"}\n states = {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\"}\n transitions = {\n \"1\": {\"a\": \"2\", \"b\": \"3\"},\n \"2\": {\"a\": \"4\", \"b\": \"5\", \"d\": \"6\", \"l\": \"15\"},\n \"3\": {\"a\": \"7\", \"b\": \"8\", \"d\": \"9\"},\n \"4\": {\"z\": \"10\", \"x\": \"11\"},\n \"6\": {\"z\": \"12\"},\n \"9\": {\"z\": \"13\", \"x\": \"14\"},\n \"15\": {\"z\": \"16\"},\n }\n probabilities = {\n \"1\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"2\": {\"a\": 1 / 4, \"b\": 1 / 4, \"d\": 1 / 4, \"l\": 1 / 4},\n \"3\": {\"a\": 1 / 3, \"b\": 1 / 3, \"d\": 1 / 3},\n \"4\": {\"z\": 1 / 2, \"x\": 1 / 2},\n \"6\": {\"z\": 1},\n \"9\": {\"z\": 1 / 2, \"x\": 1 / 2},\n \"15\": {\"z\": 1},\n }\n initial_state = \"1\"\n final_states = {}\n model = {\n \"alphabet\": alphabet,\n \"states\": states,\n \"transitions\": transitions,\n \"initial_state\": initial_state,\n \"final_states\": final_states,\n \"probabilities\": probabilities,\n }\n self.ktss_validator = KTSSValidator(model)\n self.ktss_validator.parser = ParserFactoryKTSSValidatorAnnotate\n return super().setUp()\n\n def test_annotate_sequence_empty(self):\n sequence = \"\"\n annotated = \"\"\n\n result = self.ktss_validator.annotate_sequence(sequence)\n\n self.assertEqual(result, annotated)\n\n def test_2(self):\n sequence = \"A\"\n annotated = \"a\"\n\n result = self.ktss_validator.annotate_sequence(sequence)\n\n self.assertEqual(result, annotated)\n\n def test_3(self):\n sequence = \"AAA\"\n annotated1 = \"aaz\"\n annotated2 = \"alz\"\n\n results = [self.ktss_validator.annotate_sequence(sequence) for _ in range(1000)]\n\n self.assertIn(annotated1, results)\n self.assertNotIn(annotated2, results)\n\n def test_4(self):\n sequence = \"AG\"\n annotated = \"ad\"\n\n result = self.ktss_validator.annotate_sequence(sequence)\n\n self.assertEqual(result, annotated)\n\n def test_5(self):\n sequence = \"AA\"\n annotated_mutation = \"al\"\n annotated_not_muutation = \"aa\"\n\n results = [self.ktss_validator.annotate_sequence(sequence) for _ in range(1000)]\n\n self.assertNotIn(annotated_mutation, results)\n self.assertIn(annotated_not_muutation, results)\n\n def test_6(self):\n sequence = \"AAA\"\n annotated_mutation = \"alz\"\n annotated_not_muutation = \"aaz\"\n\n results = [self.ktss_validator.annotate_sequence(sequence) for _ in range(1000)]\n\n self.assertNotIn(annotated_mutation, results)\n self.assertIn(annotated_not_muutation, results)\n\n def test_9(self):\n sequence = \"A\"\n annotated1 = \"a\"\n annotated2 = \"b\"\n\n results = [self.ktss_validator.annotate_sequence(sequence) for _ in range(1000)]\n\n self.assertIn(annotated1, results)\n self.assertNotIn(annotated2, results)\n\n\nclass TestKTSSValidatorAnnotateNested(TestCase):\n def setUp(self) -> None:\n alphabet = {\"a\", \"b\", \"d\"}\n states = {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\"}\n transitions = {\n \"1\": {\"a\": \"2\", \"b\": \"3\"},\n \"2\": {\"a\": \"4\", \"b\": \"5\", \"d\": \"6\", \"l\": \"15\"},\n \"3\": {\"a\": \"7\", \"b\": \"8\", \"d\": \"9\"},\n \"4\": {\"z\": \"10\", \"x\": \"11\"},\n \"6\": {\"z\": \"12\"},\n \"9\": {\"z\": \"13\", \"x\": \"14\"},\n \"15\": {\"z\": \"16\"},\n }\n probabilities = {\n \"1\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"2\": {\"a\": 1 / 4, \"b\": 1 / 4, \"d\": 1 / 4, \"l\": 1 / 4},\n \"3\": {\"a\": 1 / 3, \"b\": 1 / 3, \"d\": 1 / 3},\n \"4\": {\"z\": 1 / 2, \"x\": 1 / 2},\n \"6\": {\"z\": 1},\n \"9\": {\"z\": 1 / 2, \"x\": 1 / 2},\n \"15\": {\"z\": 1},\n }\n initial_state = \"1\"\n final_states = {}\n model = {\n \"alphabet\": alphabet,\n \"states\": states,\n \"transitions\": transitions,\n \"initial_state\": initial_state,\n \"final_states\": final_states,\n \"probabilities\": probabilities,\n }\n self.ktss_validator = KTSSValidator(model)\n self.ktss_validator.parser = ParserFactoryKTSSValidatorAnnotateNested\n return super().setUp()\n\n def test_7(self):\n sequence = \"AA\"\n annotated1 = \"aa\"\n annotated2 = \"al\"\n\n results = [self.ktss_validator.annotate_sequence(sequence) for _ in range(1000)]\n\n self.assertIn(annotated1, results)\n self.assertNotIn(annotated2, results)\n\n\nclass TestKTSSValidatorDistances(TestCase):\n def setUp(self) -> None:\n alphabet = {\"a\", \"b\", \"d\", \"z\", \"x\"}\n states = {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\"}\n transitions = {\n \"1\": {\"a\": \"2\", \"b\": \"3\"},\n \"2\": {\"a\": \"4\", \"b\": \"5\", \"d\": \"6\", \"l\": \"15\"},\n \"3\": {\"a\": \"7\", \"b\": \"8\", \"d\": \"9\"},\n \"4\": {\"z\": \"10\", \"x\": \"11\"},\n \"6\": {\"z\": \"12\"},\n \"9\": {\"z\": \"13\", \"x\": \"14\"},\n \"15\": {\"z\": \"16\"},\n }\n probabilities = {\n \"1\": {\"a\": 1 / 2, \"b\": 1 / 2},\n \"2\": {\"a\": 1 / 4, \"b\": 1 / 4, \"d\": 1 / 4, \"l\": 1 / 4},\n \"3\": {\"a\": 1 / 3, \"b\": 1 / 3, \"d\": 1 / 3},\n \"4\": {\"z\": 1 / 2, \"x\": 1 / 2},\n \"6\": {\"z\": 1},\n \"9\": {\"z\": 1 / 2, \"x\": 1 / 2},\n \"15\": {\"z\": 1},\n }\n initial_state = \"1\"\n final_states = {}\n model = {\n \"alphabet\": alphabet,\n \"states\": states,\n \"transitions\": transitions,\n \"initial_state\": initial_state,\n \"final_states\": final_states,\n \"probabilities\": probabilities,\n }\n self.ktss_validator = KTSSValidator(model)\n self.ktss_validator.parser = ParserFactoryKTSSValidatorDistances\n return super().setUp()\n\n def test_generate_distances(self):\n sequences = [(\"AAA\", \"aaz\")]\n annotated1 = {\"aaz-aaz\": 0, \"error\": 0}\n annotated2 = {\"aaz-alz\": 1, \"error\": 1 / 3}\n\n results = [\n self.ktss_validator.generate_distances(sequences) for _ in range(1000)\n ]\n\n self.assertIn(annotated1, results)\n self.assertNotIn(annotated2, results)\n\n def test_generate_distances_empty(self):\n sequences = []\n empty = {}\n\n result = self.ktss_validator.generate_distances(sequences)\n\n self.assertEqual(result, empty)\n\n def test__get_possible_symbols(self):\n symbol = \"A\"\n current_state = \"2\"\n possible_symbols = [\"a\", \"l\"]\n\n result = self.ktss_validator._get_possible_symbols(current_state, symbol)\n\n self.assertEqual(result, possible_symbols)\n\n def test__get_possible_symbols_not_present(self):\n symbol = \"K\"\n current_state = \"2\"\n possible_symbols = [\"a\", \"b\", \"d\", \"l\"]\n\n result = self.ktss_validator._get_possible_symbols(current_state, symbol)\n\n self.assertEqual(result, possible_symbols)\n\n def test__get_possible_symbols_bad_state(self):\n symbol = \"A\"\n current_state = \"-1\"\n result = self.ktss_validator._get_possible_symbols(current_state, symbol)\n\n self.assertFalse(result)\n","repo_name":"agranadosb/TFG","sub_path":"src/model/tests/test_ktss_validation.py","file_name":"test_ktss_validation.py","file_ext":"py","file_size_in_byte":11129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5935475391","text":"def pattern_count(pattern, text):\r\n count = 0\r\n for i in range(len(text)-len(pattern)+1):\r\n if text[i:i+len(pattern)] == pattern:\r\n count = count+1\r\n return count\r\n\r\n\r\ndef faster_symbol_array(genome, symbol):\r\n array = {}\r\n n = len(genome)\r\n extended_genome = genome + genome[0:n//2]\r\n array[0] = pattern_count(symbol, genome[0:n//2])\r\n for i in range(1, n):\r\n array[i] = array[i-1]\r\n if extended_genome[i-1] == symbol:\r\n array[i] = array[i] - 1\r\n if extended_genome[(n//2) + i - 1] == symbol:\r\n array[i] = array[i] + 1\r\n return array\r\n\r\n\r\ngenome = \"AAAAGGGG\"\r\nsymbol = \"A\"\r\nprint(faster_symbol_array(genome, symbol))\r\n\r\n","repo_name":"Marzan1/Biology-Meets-Programming","sub_path":"faster-symbol_array.py","file_name":"faster-symbol_array.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72904933903","text":"import logging\nfrom pathlib import Path\nfrom typing import Tuple\n\nfrom slither.core.declarations import Contract\nfrom slither.tools.properties.addresses.address import Addresses\nfrom slither.tools.properties.utils import write_file\n\nlogger = logging.getLogger(\"Slither\")\n\n\ndef generate_solidity_properties(\n contract: Contract, type_property: str, solidity_properties: str, output_dir: Path\n) -> Path:\n\n solidity_import = 'import \"./interfaces.sol\";\\n'\n solidity_import += f'import \"../{contract.source_mapping.filename.short}\";'\n\n test_contract_name = f\"Properties{contract.name}{type_property}\"\n\n solidity_content = (\n f\"{solidity_import}\\ncontract {test_contract_name} is CryticInterface,{contract.name}\"\n )\n solidity_content += f\"{{\\n\\n{solidity_properties}\\n}}\\n\"\n\n filename = f\"{test_contract_name}.sol\"\n write_file(output_dir, filename, solidity_content)\n\n return Path(filename)\n\n\ndef generate_test_contract(\n contract: Contract,\n type_property: str,\n output_dir: Path,\n property_file: Path,\n initialization_recommendation: str,\n) -> Tuple[str, str]:\n test_contract_name = f\"Test{contract.name}{type_property}\"\n properties_name = f\"Properties{contract.name}{type_property}\"\n\n content = \"\"\n content += f'import \"./{property_file}\";\\n'\n content += f\"contract {test_contract_name} is {properties_name} {{\\n\"\n content += \"\\tconstructor() public{\\n\"\n content += \"\\t\\t// Existing addresses:\\n\"\n content += \"\\t\\t// - crytic_owner: If the contract has an owner, it must be crytic_owner\\n\"\n content += \"\\t\\t// - crytic_user: Legitimate user\\n\"\n content += \"\\t\\t// - crytic_attacker: Attacker\\n\"\n content += \"\\t\\t// \\n\"\n content += initialization_recommendation\n content += \"\\t\\t// \\n\"\n content += \"\\t\\t// \\n\"\n content += \"\\t\\t// Update the following if totalSupply and balanceOf are external functions or state variables:\\n\\n\"\n content += \"\\t\\tinitialTotalSupply = totalSupply();\\n\"\n content += \"\\t\\tinitialBalance_owner = balanceOf(crytic_owner);\\n\"\n content += \"\\t\\tinitialBalance_user = balanceOf(crytic_user);\\n\"\n content += \"\\t\\tinitialBalance_attacker = balanceOf(crytic_attacker);\\n\"\n\n content += \"\\t}\\n}\\n\"\n\n filename = f\"{test_contract_name}.sol\"\n write_file(output_dir, filename, content, allow_overwrite=False)\n\n return filename, test_contract_name\n\n\ndef generate_solidity_interface(output_dir: Path, addresses: Addresses):\n content = f\"\"\"\ncontract CryticInterface{{\n address internal crytic_owner = address({addresses.owner});\n address internal crytic_user = address({addresses.user});\n address internal crytic_attacker = address({addresses.attacker});\n uint internal initialTotalSupply;\n uint internal initialBalance_owner;\n uint internal initialBalance_user;\n uint internal initialBalance_attacker;\n}}\"\"\"\n\n # Static file, we discard if it exists as it should never change\n write_file(output_dir, \"interfaces.sol\", content, discard_if_exist=True)\n","repo_name":"crytic/slither","sub_path":"slither/tools/properties/solidity/generate_properties.py","file_name":"generate_properties.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":4676,"dataset":"github-code","pt":"47"} +{"seq_id":"41021683325","text":"import os\nimport torch\nimport torch.utils.data as data\nfrom torch.utils.data import DataLoader\nfrom torch.nn import functional as F\nimport numpy as np\nfrom PIL import Image\nimport glob\nimport json\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\nfrom transformations_torch import *\nfrom model import HorizonNet\n \n\ndef warp(src_transformer, target_transformer, target_bon, H, W, ceiling_z): \n N = target_bon.shape[0] # (N, 2, 1024)\n device = target_bon.device\n \n theta = ((torch.arange(W).expand(N, W).to(device) / W)) * 2 * np.pi # (N, 1024)\n target_3d_coord = []\n \n z_bons = [ceiling_z, torch.zeros_like(ceiling_z).to(device) - 1.] # (N, 1)\n for i, z_bon in enumerate(z_bons):\n y_start = (H // 2) * i\n y_end = y_start + (H // 2)\n \n y_grid, x_grid = torch.meshgrid(torch.arange(y_start, y_end), torch.arange(W)) \n y_grid, x_grid = y_grid.expand(N, H//2, W), x_grid.expand(N, H//2, W) # (N, 256, 1024)\n x_grid = x_grid.to(device)\n y_grid = y_grid.to(device)\n \n boundary_phi = (0.5 - (target_bon[:, i] / H)) * np.pi # (N, 1024)\n boundary_dist = torch.abs(z_bon / torch.tan(boundary_phi)) # (N, 1024)\n\n boundary_x = boundary_dist * torch.sin(theta) # (N, 1024)\n boundary_y = boundary_dist * torch.cos(theta + np.pi) # (N, 1024)\n boundary_R = torch.abs(z_bon / torch.sin(boundary_phi)) # (N, 1024)\n\n assert not torch.any(torch.isnan(boundary_dist))\n\n all_phi = (0.5 - (y_grid / H)) * np.pi # (N, 256, 1024)\n all_dist = torch.min(torch.abs(z_bon[:, :, None] / torch.tan(all_phi)), boundary_dist[:, None, :]) # (N, 256, 1024)\n all_x = all_dist * torch.sin(theta[:, None, :]) # (N, 256, 1024)\n all_y = all_dist * torch.cos(theta[:, None, :] + np.pi) # (N, 256, 1024) \n all_z = torch.max(torch.min(boundary_R[:, None, :] * torch.sin(all_phi), ceiling_z[:, :, None]), torch.Tensor([-1.]).to(device))\n\n target_3d_coord.append(torch.stack([all_x, all_y, all_z], dim=-1).view(N, -1, 3)) # (N, 256*1024, 3)\n \n\n target_3d_coord = torch.cat(target_3d_coord, dim=1) # (N, 512*1024, 3)\n global_2d_coord = target_transformer.to_global(target_3d_coord[:, :, :2]) # (N, 512*1024, 2)\n\n src_2d_coord = src_transformer.apply_inverse(global_2d_coord) # (N, 512*1024, 2)\n src_3d_coord = torch.cat([src_2d_coord, target_3d_coord[:, :, 2:]], dim=-1).view(-1, 3) # (N*512*1024, 3)\n\n src_pix_coord = TransformationSpherical.cartesian_to_pixel(src_3d_coord, W) # (N*512*1024, 2)\n \n src_x_grid = ((src_pix_coord[:, 0] / W - 0.5) * 2).view(N, H, W) # (N, 512, 1024)\n src_y_grid = ((src_pix_coord[:, 1] / H - 0.5) * 2).view(N, H, W) # (N, 512, 1024)\n \n grid = torch.stack([src_x_grid, src_y_grid], axis=-1) # (N, 512, 1024, 2)\n \n return grid \n\ndef generate_mask(shape, margin):\n N, C, H, W = shape\n return torch.cat([torch.ones(N, C, H - margin, W), torch.zeros(N, C, margin, W)], axis=2)\n\ndef inference_ceiling(y_bon, H, W):\n N, C, _ = y_bon.shape\n local_2d = compute_local(y_bon, H, W, torch.ones(N, 1).to(y_bon.device)) # (N, 2*1024, 2)\n ceil_2d = local_2d[:, :W, :]\n floor_2d = local_2d[:, W:, :]\n \n ceil_dist = torch.norm(ceil_2d, dim=-1)\n floor_dist = torch.norm(floor_2d, dim=-1)\n \n scale = (floor_dist / ceil_dist).mean(dim=-1) \n\n return scale\n\ndef compute_local(src_bon, H, W, ceiling_z):\n N, C, _ = src_bon.shape\n device = src_bon.device\n theta = ((torch.arange(W).expand(N, C, W).to(device) / W)) * 2 * np.pi #(N, 2, 1024)\n \n z_bons = torch.cat([ceiling_z, torch.zeros_like(ceiling_z) - 1.], dim=-1)\n src_phi = (0.5 - (src_bon / H)) * np.pi # (N, 2, 1024)\n src_dist = z_bons[:, :, None] / torch.tan(src_phi) # (N, 2, 1024)\n src_x = src_dist * torch.sin(theta) # (N, 2, 1024)\n src_y = src_dist * torch.cos(theta + np.pi) # (N, 2, 1024)\n src_local_2d = torch.stack([src_x, src_y], axis=-1).view(N, -1, 2) # (N, 2*1024, 2)\n \n return src_local_2d\n\ndef compute_global(src_bon, src_transformer, H, W, ceiling_z):\n src_local_2d = compute_local(src_bon, H, W, ceiling_z) # (N, 2*1024, 2)\n src_global_2d = src_transformer.to_global(src_local_2d) # (N, 2*1024, 2)\n \n return src_global_2d\n\n\n","repo_name":"joshua049/Stereo-360-Layout","sub_path":"DLVR.py","file_name":"DLVR.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"} +{"seq_id":"11785238096","text":"from mongoengine import *\nfrom zatiq_food_items import Zatiq_Food_Items\nfrom zatiq_businesses import Zatiq_Businesses\nimport random\n\nclass ZatiqGuestsClient(object):\n def get_guest_food_by_button(self, button):\n if button == 'promotions':\n try:\n food_items = Zatiq_Food_Items.objects.order_by('discount_price')\n except Exception as e:\n return(\"Error \\n %s\" % (e))\n\n if len(food_items) > 0:\n return(self.generate_food_items_dict(food_items))\n else:\n return([])\n\n elif button == 'top_picks':\n try:\n food_items = Zatiq_Food_Items.objects.order_by('-views')\n except Exception as e:\n return(\"Error \\n %s\" % (e))\n\n if len(food_items) > 0:\n food_items_dict = self.generate_food_items_dict(food_items)\n return(food_items_dict)\n else:\n return([])\n\n elif button == 'newest':\n try:\n food_items = Zatiq_Food_Items.objects.order_by('-date_created')\n except Exception as e:\n return(\"Error \\n %s\" % (e)) \n\n if len(food_items) > 0:\n food_items_dict = self.generate_food_items_dict(food_items)\n return(food_items_dict)\n else:\n return([])\n\n elif button == 'surprise_me':\n try:\n food_items = Zatiq_Food_Items.objects.order_by('views')\n except Exception as e:\n return(\"Error \\n %s\" % (e))\n\n if len(food_items) > 0:\n food_items_dict = self.generate_food_items_dict(food_items)\n return(food_items_dict) \n else:\n return([])\n\n else:\n return('Category not found')\n \n\n def generate_food_items_dict(self, food_items):\n food_items_list = []\n for food_item in range(len(food_items)):\n try:\n Zatiq_Food_Items.objects(id=food_items[food_item].id).modify(inc__views=1)\n except Exception as e:\n print(\"Error \\n %s\" % (e))\n food_item_id = food_items[food_item].id\n restaurant_id = food_items[food_item].restaurant_id.id\n restaurant_info = self.get_restaurant_info(restaurant_id)\n item_name = food_items[food_item].item_name\n overview = food_items[food_item].overview\n image = \"http://167.99.177.29:5000/image/\"+str(food_items[food_item].image)\n item_price = food_items[food_item].item_price\n is_beverage = food_items[food_item].is_beverage\n meal_types = self.generate_meals_dict(food_items[food_item].meal_type)\n image_aspect_ratio = food_items[food_item].image_aspect_ratio\n tags = self.generate_tags_dict(food_items[food_item].tags, is_beverage)\n meats = self.generate_meats_dict(food_items[food_item].tags.meat)\n seafoods = self.generate_seafoods_dict(food_items[food_item].tags.seafood)\n calories = food_items[food_item].calories\n food_item_info = {'food_item_id': str(food_item_id), 'restaurant_id': str(restaurant_id), 'restaurant_info': restaurant_info, 'item_name': item_name, 'meal_type': meal_types, 'item_price': str(item_price), 'overview': overview, 'image': {'base64': image, 'image_aspect_ratio': image_aspect_ratio}, 'tags': tags, 'meat': meats, 'seafood': seafoods, 'calories': calories}\n food_items_list.append(food_item_info)\n if len(food_items_list) > 5:\n food_items_list = random.sample(food_items_list, 5)\n return(food_items_list)\n\n def get_restaurant_info(self, restaurant_id):\n try:\n zatiq_business = Zatiq_Businesses.objects(id=restaurant_id)\n except Exception as e:\n return(\"Error \\n %s\" % (e))\n\n if len(zatiq_business) > 0:\n restaurant_id = zatiq_business[0].id\n email = zatiq_business[0].business_email\n name = zatiq_business[0].business_name\n website = zatiq_business[0].website\n hours = self.generate_business_hours(zatiq_business[0].hours)\n number = zatiq_business[0].number\n features = {'delivery': zatiq_business[0].delivery, 'takeout': zatiq_business[0].takeout, 'reservation': zatiq_business[0].reservation, 'patio': zatiq_business[0].patio, 'wheelchair_accessible': zatiq_business[0].wheelchair_accessible, 'parking': zatiq_business[0].parking, 'buffet': zatiq_business[0].buffet, 'family_friendly': zatiq_business[0].family_friendly, 'pescetarian_friendly': zatiq_business[0].pescetarian_friendly, 'wifi': zatiq_business[0].wifi}\n image = {'base64': \"http://167.99.177.29:5000/image/\"+str(zatiq_business[0].image), 'image_aspect_ratio': zatiq_business[0].image_aspect_ratio}\n \n address = zatiq_business[0].address\n restaurant_info = {'restaurant_id': str(restaurant_id), 'email': email, 'name': name, 'website': website, 'hours': hours, 'number': number, 'features': features, 'image': image, 'address': address}\n return(restaurant_info)\n else:\n return('Could not find a restaurant with that id')\n\n def generate_business_hours(self, business):\n hours_dict = {'start': {\n 'monday': business.monday_start,\n 'tuesday': business.tuesday_start,\n 'wednesday': business.wednesday_start,\n 'thursday': business.thursday_start,\n 'friday': business.friday_start,\n 'saturday': business.saturday_start,\n 'sunday': business.sunday_start\n }, 'end': {\n 'monday': business.monday_end,\n 'tuesday': business.tuesday_end,\n 'wednesday': business.wednesday_end,\n 'thursday': business.thursday_end,\n 'friday': business.friday_end,\n 'saturday': business.saturday_end,\n 'sunday': business.sunday_end\n }}\n return(hours_dict)\n\n def generate_tags_dict(self, tags, is_beverage):\n tags_dict = {'indian': tags.indian, 'greek': tags.greek, 'chinese': tags.chinese, 'japanese': tags.japanese, 'korean': tags.korean, 'sushi': tags.sushi, 'dessert': tags.dessert, 'burger': tags.burger,\n 'pizza': tags.pizza, 'fast_food': tags.fast_food, 'halal': tags.halal, 'caribbean': tags.caribbean, 'mexican': tags.mexican, 'spicy': tags.spicy, 'fine_food': tags.fine_food, 'kosher': tags.kosher,\n 'healthy': tags.healthy, 'vegan': tags.vegan, 'vegetarian': tags.vegetarian, 'gluten_free': tags.gluten_free, 'italian': tags.italian, 'middle_eastern': tags.middle_eastern, 'snack': tags.snack, 'thai': tags.thai,\n 'canadian': tags.canadian, 'vietnamese': tags.vietnamese, 'has_soybeans': tags.has_soybeans, 'has_eggs': tags.has_eggs, 'jain': tags.jain, 'has_wheat': tags.has_wheat, 'has_treenuts': tags.has_treenuts, 'has_peanuts': tags.has_peanuts, 'lactose_free': tags.lactose_free, 'is_beverage': is_beverage}\n return(tags_dict)\n\n def generate_meals_dict(self, meal_types):\n meals_dict = {'breakfast': meal_types.breakfast, 'lunch': meal_types.lunch, 'dinner': meal_types.dinner, 'brunch': meal_types.brunch}\n return(meals_dict)\n\n def generate_meats_dict(self, meats):\n meats_dict = {'bear': meats.bear, 'beef': meats.beef, 'buffalo': meats.buffalo, 'calf': meats.calf, 'caribou': meats.caribou, 'goat': meats.goat, 'ham': meats.ham, 'horse': meats.horse, 'kangaroo': meats.kangaroo, 'lamb': meats.lamb,\n 'moose': meats.moose, 'mutton': meats.mutton, 'opossum': meats.opossum, 'pork': meats.pork, 'bacon': meats.bacon, 'rabbit': meats.rabbit, 'snake': meats.snake, 'squirrel': meats.squirrel, 'turtle': meats.turtle, 'veal': meats.veal,\n 'chicken': meats.chicken, 'hen': meats.hen, 'duck': meats.duck, 'goose': meats.goose, 'ostrich': meats.ostrich, 'quail': meats.quail, 'turkey': meats.turkey}\n return(meats_dict)\n\n def generate_seafoods_dict(self, sea):\n seafoods_dict = {'clam': sea.clam, 'pangasius': sea.pangasius, 'cod': sea.cod, 'crab': sea.crab, 'catfish': sea.catfish, 'alaska_pollack': sea.alaska_pollack, 'tilapia': sea.tilapia, 'salmon': sea.salmon, 'tuna': sea.tuna, 'shrimp': sea.shrimp,\n 'lobster': sea.lobster, 'eel': sea.eel, 'trout': sea.trout, 'pike': sea.pike, 'shark': sea.shark}\n return(seafoods_dict) ","repo_name":"hasanjafri/Zatiq-Middleware","sub_path":"src/zatiq_guests_client.py","file_name":"zatiq_guests_client.py","file_ext":"py","file_size_in_byte":8495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26907842503","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('account', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ExpiringToken',\n fields=[\n ('key', models.CharField(max_length=40, serialize=False, primary_key=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('expiry_time', models.DateTimeField(default=datetime.datetime.now)),\n ('user', models.OneToOneField(related_name='auth_token', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n","repo_name":"SaurabhGoyal/ExpenseManager_API","sub_path":"apps/account/migrations/0002_expiringtoken.py","file_name":"0002_expiringtoken.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1167582288","text":"from django import forms\nfrom .models import *\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nchoices = (('Accessory&Clothing', 'Accessory&Clothing'),\n ('Food Items', 'Food Items'),\n ('Toys', 'Toys'),\n ('Medicines', 'Medicines'),\n ('Animal Furniture', 'Animal Furniture'),)\nanimal_choices = (('Dog', 'Dog'),\n ('Cat', 'Cat'),\n ('Rabbit', 'Rabbit'),\n ('Guinea Pig', 'Guinea Pig'),\n ('Hamster', 'Hamster'),\n ('Turtle', 'Turtle'),\n ('Parrot', 'Parrot'),\n ('Lizard', 'Lizard'),\n ('Fish', 'Fish'),)\nch = (('M', 'Male'),\n ('F', 'Female'),)\nch1=(('Yes','Yes'),(\"No\",\"No\"),)\n\ncolorchoice=(('brown', 'Brown'),\n ('golden', 'Golden'),\n ('apricot', 'Apricot'),\n ('chestnut','Chestnut'),\n ('sable','Sable'),\n ('cream','Cream'),\n ('tan','Tan'),\n ('fuschia','Fuschia'),\n ('merle','Merle'),\n ('chocolate','Chocolate'),\n ('brindle','Brindle'),\n)\nchoices12 = (('Hairless', 'Hairless'), ('Short', 'Short'), ('Medium','Medium'), ('Long', 'Long'), ('Wire', 'Wire'), ('Curly', 'Curly'))\n\n\nclass UserRegisterForm(UserCreationForm):\n email = forms.EmailField()\n\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'username',\n 'email', 'password1', 'password2']\n\n\nclass UserUpdateForm(forms.ModelForm):\n email = forms.EmailField()\n\n class Meta:\n model = User\n fields = ['username', 'email']\n\n\nclass ProfileUpdateForm(forms.ModelForm):\n\n class Meta:\n model = Profile\n fields = ['photo', 'bio', 'phone',\n 'numberofchildren', 'animalpreferences']\n\n\nclass AddAnimal(forms.Form):\n animal_type = forms.ChoiceField(\n choices=animal_choices, label='Type of Animal')\n animal_breed = forms.CharField(\n required=True, min_length=3, strip=True, label='And Breed')\n avglife = forms.IntegerField(min_value=1, label=\"Average Life\")\n color = forms.CharField(required=True, min_length=3,\n strip=True, label='Colors Available')\n temperament = forms.CharField(\n required=True, min_length=3, strip=True, label='General Temperament')\n\n\n\nclass AddMultipleImages(forms.ModelForm):\n photo1 = forms.ImageField(label='Image') \n class Meta:\n model = pictures\n fields = ['photo1']\n\n\nclass AddItem(forms.Form):\n name = forms.CharField(required=True, min_length=3,\n strip=True, label='Name of Item')\n type = forms.ChoiceField(choices=choices, label='Type of Item')\n description = forms.CharField(\n required=True, min_length=3, strip=True, label='Description')\n cost = forms.FloatField(min_value=1.0, label='Cost of Item')\n rating = forms.FloatField(min_value=1.0, label='Rating')\n brand = forms.CharField(required=True, min_length=3,\n strip=True, label='Brand')\n animal_types = forms.ChoiceField(\n choices=animal_choices, label='For Animal')\n animal_breeds = forms.CharField(\n required=True, min_length=3, strip=True, label='And Breed')\n\n\nclass AddLocation(forms.Form):\n housenumber = forms.CharField(\n required=True, min_length=2, strip=True, label=\"House Name\")\n street = forms.CharField(required=True, min_length=2,\n strip=True, label=\"Street\")\n pincode = forms.CharField(\n required=True, min_length=5, max_length=6, strip=True, label=\"Pincode\")\n\n\nclass AddPet(forms.Form):\n pet_name = forms.CharField(\n required=True, min_length=2, strip=True, label=\"Name of Pet\")\n age = forms.IntegerField(min_value=0, label=\"Age of Pet\")\n gender = forms.ChoiceField(choices=ch, label='Gender')\n remarks = forms.CharField(strip=True, label=\"Remarks(if any)\")\n disease = forms.CharField(\n strip=True, label=\"Diseases(if any): [Separate using ,]\")\n animal_types = forms.CharField(\n required=True, min_length=3, strip=True, label='Animal')\n animal_breeds = forms.CharField(\n required=True, min_length=3, strip=True, label='Breed')\n height = forms.FloatField(min_value=0.1, label=\"Height\")\n weight = forms.FloatField(min_value=0.1, label=\"Weight\")\n coatlength = forms.ChoiceField(choices=choices12, label='Coat Length')\n color = forms.CharField(strip=True, required=True,\n min_length=3, label=\"Color of the Pet\")\n spayneuter = forms.ChoiceField(choices=ch1,label=\"Is it Spayed/Neutered?\")\n photo = forms.ImageField()\n\n\nclass AddBrand(forms.ModelForm):\n email = forms.EmailField()\n rating = forms.FloatField(min_value=1.0)\n contact = forms.CharField(\n required=True, strip=True, min_length=10, max_length=13)\n\n class Meta:\n model = brand\n fields = ['brand_name', 'rating', 'email', 'contact', ]\n\n \n \n\nclass adoptform(forms.Form):\n animal_breed=forms.CharField(strip=True, max_length=100, required = False)\n color = forms.ChoiceField(choices=colorchoice, label=\"Color\")\n coatlength = forms.ChoiceField(choices=choices12, label=\"Coat Length\") \n gender = forms.ChoiceField(choices=ch, label=\"Gender\")","repo_name":"DhruvNair/Barkery","sub_path":"aio/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32856460165","text":"import logging\nimport os\nimport platform\nfrom contextlib import ExitStack, contextmanager, redirect_stderr, redirect_stdout\nfrom functools import wraps\nfrom importlib.util import find_spec\nfrom io import StringIO\nfrom pathlib import Path\nfrom typing import Iterator, List\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom jsonargparse import ArgumentParser\nfrom jsonargparse._optionals import (\n docstring_parser_support,\n fsspec_support,\n jsonschema_support,\n set_docstring_parse_options,\n url_support,\n)\n\nif docstring_parser_support:\n from docstring_parser import DocstringStyle\n\n set_docstring_parse_options(style=DocstringStyle.GOOGLE)\n\n\nis_cpython = platform.python_implementation() == \"CPython\"\nis_posix = os.name == \"posix\"\n\nskip_if_not_cpython = pytest.mark.skipif(\n not is_cpython,\n reason=\"only supported in CPython\",\n)\n\nskip_if_not_posix = pytest.mark.skipif(\n not is_posix,\n reason=\"only supported in posix systems\",\n)\n\n\nskip_if_jsonschema_unavailable = pytest.mark.skipif(\n not jsonschema_support,\n reason=\"jsonschema package is required\",\n)\n\nskip_if_fsspec_unavailable = pytest.mark.skipif(\n not fsspec_support,\n reason=\"fsspec package is required\",\n)\n\nskip_if_docstring_parser_unavailable = pytest.mark.skipif(\n not docstring_parser_support,\n reason=\"docstring-parser package is required\",\n)\n\nskip_if_requests_unavailable = pytest.mark.skipif(\n not url_support,\n reason=\"requests package is required\",\n)\n\nresponses_available = bool(find_spec(\"responses\"))\n\nskip_if_responses_unavailable = pytest.mark.skipif(\n not responses_available,\n reason=\"responses package is required\",\n)\n\nif responses_available:\n import responses\n\n responses_activate = responses.activate\nelse:\n\n def nothing_decorator(func):\n return func\n\n responses_activate = nothing_decorator\n\n\n@pytest.fixture\ndef parser() -> ArgumentParser:\n return ArgumentParser(exit_on_error=False)\n\n\n@pytest.fixture\ndef subparser() -> ArgumentParser:\n return ArgumentParser(exit_on_error=False)\n\n\n@pytest.fixture\ndef example_parser() -> ArgumentParser:\n parser = ArgumentParser(prog=\"app\", exit_on_error=False)\n group_1 = parser.add_argument_group(\"Group 1\", name=\"group1\")\n group_1.add_argument(\"--bool\", type=bool, default=True)\n group_2 = parser.add_argument_group(\"Group 2\")\n group_2.add_argument(\"--nums.val1\", type=int, default=1)\n group_2.add_argument(\"--nums.val2\", type=float, default=2.0)\n return parser\n\n\n@pytest.fixture\ndef tmp_cwd(tmpdir) -> Iterator[Path]:\n with tmpdir.as_cwd():\n yield Path(tmpdir)\n\n\n@pytest.fixture\ndef file_r(tmp_cwd) -> Iterator[str]:\n filename = \"file_r\"\n Path(filename).touch()\n yield filename\n\n\n@pytest.fixture\ndef logger() -> logging.Logger:\n logger = logging.getLogger(__name__)\n logger.level = logging.DEBUG\n logger.parent = None\n logger.handlers = [logging.StreamHandler()]\n return logger\n\n\n@contextmanager\ndef capture_logs(logger: logging.Logger) -> Iterator[StringIO]:\n with ExitStack() as stack:\n captured = StringIO()\n for handler in logger.handlers:\n if isinstance(handler, logging.StreamHandler):\n stack.enter_context(patch.object(handler, \"stream\", captured))\n yield captured\n\n\n@contextmanager\ndef source_unavailable():\n with patch(\"inspect.getsource\", side_effect=OSError(\"could not get source code\")):\n yield\n\n\ndef get_parser_help(parser: ArgumentParser) -> str:\n out = StringIO()\n with patch.dict(os.environ, {\"COLUMNS\": \"200\"}):\n parser.print_help(out)\n return out.getvalue()\n\n\ndef get_parse_args_stdout(parser: ArgumentParser, args: List[str]) -> str:\n out = StringIO()\n with redirect_stdout(out), pytest.raises(SystemExit):\n parser.parse_args(args)\n return out.getvalue()\n\n\ndef get_parse_args_stderr(parser: ArgumentParser, args: List[str]) -> str:\n err = StringIO()\n with patch.object(parser, \"exit_on_error\", return_value=True):\n with redirect_stderr(err), pytest.raises(SystemExit):\n parser.parse_args(args)\n return err.getvalue()\n\n\nclass BaseClass:\n def __init__(self):\n pass\n\n\ndef wrap_fn(fn):\n @wraps(fn)\n def wrapped_fn(*args, **kwargs):\n return fn(*args, **kwargs)\n\n return wrapped_fn\n","repo_name":"omni-us/jsonargparse","sub_path":"jsonargparse_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"47"} +{"seq_id":"1253473843","text":"import ast\nfrom collections import deque\nt = int(input())\n\nfor _ in range(t):\n cmds = list(input())\n n = int(input())\n a = input()\n a = deque(map(int, ast.literal_eval(a)))\n\n isReverse = False\n isError = False\n for cmd in cmds:\n if cmd == \"R\":\n if isReverse == True:\n isReverse = False\n else:\n isReverse = True\n else:\n if len(a) == 0:\n print('error')\n isError = True\n break\n else:\n if isReverse:\n a.pop()\n else:\n a.popleft()\n if not isError:\n if isReverse:\n a = list(reversed(list(a)))\n\n print(\"[\" + ','.join(map(str, a)) + \"]\")\n\n","repo_name":"minyoung62/Algorithm-Study","sub_path":"백준/Gold/5430. AC/AC.py","file_name":"AC.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25135974412","text":"n=int(input())\na=list(map(int,input().split()))\ns=sum(a)\nans=n-1\nif s%n!=0:\n print(-1)\nelse:\n flg=1\n for i in range(n-1):\n if sum(a[:i+1])/(i+1)==s//n:\n ans-=1\n for j in range(i+1):\n a[j]=s//n\n else:\n flg+=1\n print(ans)","repo_name":"tttnishimo/procon-archive","sub_path":"atcoder.jp/abc027/abc027_b/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11849666285","text":"import numpy as np\nimport pandas as pd\n\nfrom pmsys_pipeline.preprocessing_module import TSTrainValSplit\n\ninput_ts = pd.DataFrame(\n {\n \"feature_a\": np.arange(1, 20),\n \"feature_b\": np.arange(1, 20),\n \"feature_c\": np.arange(1, 20),\n }\n)\ny_ts = np.array(input_ts[\"feature_a\"]).reshape(-1, 1)\n\noutput_first_train = [\n pd.DataFrame(\n np.array(\n [\n [1, 1, 1],\n [2, 2, 2],\n [3, 3, 3],\n [4, 4, 4],\n [5, 5, 5],\n [6, 6, 6],\n [7, 7, 7],\n ]\n ),\n columns=[\"feature_a\", \"feature_b\", \"feature_c\"],\n ),\n pd.DataFrame(\n np.array(\n [\n [6, 6, 6],\n [7, 7, 7],\n [8, 8, 8],\n [9, 9, 9],\n [10, 10, 10],\n [11, 11, 11],\n [12, 12, 12],\n ]\n ),\n columns=[\"feature_a\", \"feature_b\", \"feature_c\"],\n ),\n]\n\noutput_first_val = [pd.DataFrame([[8], [9], [10]]), pd.DataFrame([[13], [14], [15]])]\n\noutput_second_train = [\n pd.DataFrame(\n np.array(\n [\n [1, 1, 1],\n [2, 2, 2],\n [3, 3, 3],\n [4, 4, 4],\n [5, 5, 5],\n [6, 6, 6],\n [7, 7, 7],\n [8, 8, 8],\n [9, 9, 9],\n ]\n ),\n columns=[\"feature_a\", \"feature_b\", \"feature_c\"],\n ),\n]\n\noutput_second_val = [pd.DataFrame([[10]])]\n\n\ndef test_train_val_split():\n first_split = TSTrainValSplit(window_size=10, lag=5, validation_size=3)\n train, val = first_split.fit_transform(input_ts, y_ts)\n assert [\n pd.testing.assert_frame_equal(left, right)\n for left, right in zip(train, output_first_train)\n ]\n assert [\n pd.testing.assert_frame_equal(left, right)\n for left, right in zip(val, output_first_val)\n ]\n second_split = TSTrainValSplit(window_size=10, lag=10, validation_size=1)\n train, val = second_split.fit_transform(input_ts, y_ts)\n assert [\n pd.testing.assert_frame_equal(left, right)\n for left, right in zip(train, output_second_train)\n ]\n assert [\n pd.testing.assert_frame_equal(left, right)\n for left, right in zip(val, output_second_val)\n ]\n","repo_name":"matthiasboeker/imputation-pipeline","sub_path":"tests/test_train_val_split.py","file_name":"test_train_val_split.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26869819352","text":"class Solution:\n def plusOne(self, digits: List[int]) -> List[int]:\n idx = 0\n while True:\n idx += 1\n if idx > len(digits):\n digits = [0] + digits\n digits[-idx] += 1\n if digits[-idx] < 10:\n return digits\n else:\n digits[-idx] = 0","repo_name":"RotBeer/leethub","sub_path":"0066-plus-one/0066-plus-one.py","file_name":"0066-plus-one.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38680614049","text":"from flask import Flask, render_template, redirect, request\nfrom modules.songs_backend import get_auth_params, initialize_state, request_token, get_liked_songs, refresh_token, invalid_state\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\"\"\"\nExample route to retrieve data.\nIn this example, upon a button click, a user's liked songs are retrieved.\n\"\"\"\n@app.route('/likedsongs')\ndef liked_songs():\n liked_songs = get_liked_songs()\n return render_template('redirect.html')\n\n\"\"\"\nBase this route on the callback URI.\nAfter step 1 is completed, we are redirected here.\nCheck state variable matches existing, only complete step 2 if so.\n\"\"\"\n#Example is localhost:PORT/callback\n@app.route('/callback')\ndef callback():\n if invalid_state(request.args.get('state')):\n redirect(\"/\")\n code = request.args.get('code')\n request_token(code)\n return render_template('redirect.html')\n\n\"\"\"\nIn example, clicking authorize button redirects here\nIf token exists, refresh token.\nIf not, complete step 1.\n\"\"\"\n@app.route('/authorize', methods=['GET'])\ndef authorize():\n if refresh_token():\n return render_template('redirect.html')\n else:\n initialize_state()\n params = get_auth_params()\n authorization_url = 'https://accounts.spotify.com/authorize'\n return redirect(authorization_url + '?' + '&'.join([f'{k}={v}' for k, v in params.items()]))\n\nif __name__ == '__main__':\n app.run(host='localhost', port=8080)","repo_name":"sartain/spotify-auth-example","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"28234424072","text":"## 용기는 n x n 배열\n#빈 용기 = 0, 화학물질이 들어있으면 종류에 따라 1~9 \n#조건 1 화학물질이 담긴 용기들이 사각형을 이루고 있으며, 내부에 빈 용기 없음 (행렬 안에 0이 없음 )\n#조건 2 반드시 가로와 세로가 다름 \n#조건 3 행렬과 행렬 사이에는 빈 용기 (0xn or nxo 행렬)이 있다. (대각선은 있을수도 없을수도)\n# 출력 형식은 n*m이 작은 순서대로, 같을 경우에는 행(n)이 작은 순으로 출력 \n\n# import sys\n\n# sys.stdin = open('input.txt','r')\n\nT = int(input())\nfor t in range(1,2+1) :\n n = int(input())\n\n grid = [list(map(int,input().split())) for _ in range(n)]\n #결과를 담을 리스트\n res = []\n\n #행렬을 돌며\n for r in range(n) :\n for c in range(n) :\n #값이 있으면 x,y의 초기값으로 설정 \n if grid[r][c] :\n x = r\n y = c \n \n #행의 길이, r의 길이 초기값\n len_r = 0\n len_c = 0\n #열을 순회하며 범위 내면서, 값이 있으면 y를 증가시킴 \n while 0<=y bool:\n \"\"\"Returns true if this StopTime is a flag stop\"\"\"\n return self.trip.route.route_type == \"2\" and (\n self.pickup_type == \"3\" or self.drop_off_type == \"3\"\n )\n\n def is_early_departure(self) -> bool:\n \"\"\"Returns true if this StopTime is an early departure stop\"\"\"\n return (\n self.trip.route.route_type == \"2\"\n and self.timepoint == \"0\"\n and not self.is_destination()\n )\n\n def is_active(self, date: datetime) -> bool:\n \"\"\"Returns true if this StopTime is active on the given date and time\"\"\"\n\n return self.trip.calendar.operates_on_date(date) and self.departure_seconds > (\n get_current_time().timestamp() - get_date().timestamp()\n )\n\n def is_destination(self) -> bool:\n \"\"\"Returns true if this StopTime is the last stop in the trip\"\"\"\n return self.stop_sequence == max(\n st.stop_sequence for st in self.trip.stop_times\n )\n\n def as_html(self) -> str:\n \"\"\"Returns a StopTime obj as an html row\"\"\"\n\n trip_name = self.trip.trip_short_name or self.trip_id\n\n flag_stop = (\n \"
\"\n f\"{trip_name}\"\n \"Flag stop.
\"\n if self.is_flag_stop()\n else \"\"\n )\n\n early_departure = (\n \"
\"\n f\"{trip_name}\"\n \"Early departure stop.
\"\n if self.is_early_departure()\n else \"\"\n )\n\n return (\n f\"\"\" \"\"\"\n f\"\"\"{self.trip.route.route_short_name or self.trip.route.route_long_name}\"\"\"\n f\"\"\"{flag_stop or early_departure or trip_name}\"\"\"\n f\"\"\"{self.destination_label}\"\"\"\n f\"\"\"{format_time(self.departure_time)}\"\"\"\n f\"\"\"{self.stop.platform_name or \"\"}\"\"\"\n )\n","repo_name":"tandy-c/mbta_mapper","sub_path":"gtfs_orms/stop_time.py","file_name":"stop_time.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"27694360839","text":"f = open(\"day2input.txt\", 'r')\nspread = []\nfor line in f:\n\tl = line.strip().split()\n\tfor x in range(len(l)):\n\t\tl[x] = int(l[x])\n\tspread.append(l)\n\nsum = 0\nfor l in spread:\n\tfor i in range(len(l)):\n\t\tfor j in range(i+1, len(l)):\n\t\t\tif l[i] % l[j] == 0:\n\t\t\t\tsum += l[i]/l[j]\n\t\t\tif l[j] % l[i] == 0:\n\t\t\t\tsum += l[j]/l[i]\n\nprint(sum)","repo_name":"NathanielLovin/AdventOfCode2017","sub_path":"day2b.py","file_name":"day2b.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12439144879","text":"name = \"e-mail_1\"\nversion = \"2016-03-15T1401Z\"\n\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\ndef main():\n\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = \"We are watching you.\"\n message[\"From\"] = \"thecolonel@localhost\"\n message[\"To\"] = \"mulder@fbi.g0v\"\n \n text = \"The Event is at hand.\"\n html = \"\"\"\\\n \n \n \n

\n The Event is at hand.\n

\n \n \n \"\"\"\n \n part1 = MIMEText(text, \"plain\")\n part2 = MIMEText(html, \"html\")\n \n message.attach(part1)\n message.attach(part2)\n \n try:\n server = smtplib.SMTP(\"localhost\")\n server.sendmail(\n message[\"From\"],\n message[\"To\"],\n message.as_string()\n )\n server.quit()\n except smtplib.SMTPException:\n print(\"e-mail send error\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wdbm/goutong","sub_path":"e-mail_1.py","file_name":"e-mail_1.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72599192462","text":"# Importando o módulo forms\nfrom django import forms\n\n# importando o modelo Topic que definimos e o modelo Entry\nfrom .models import Topic, Entry\n\n\n# Definimos a classe TopicForms quer herda de forms.modelForm\nclass TopicForm(forms.ModelForm):\n # A classe aninhada Meta, diz ao django para em qual modelo\n #o formulario deve se basear e quais seus campos\n class Meta:\n # O modelo se baseará em Topic (Template)\n model = Topic\n # Incluindo o campo Text\n fields = ['text']\n # Especifica ao django para não gerar um rótulo para o campo\n labels = {'text': ''}\n\n# Essa classe herda de ModelForm\nclass EntryForm(forms.ModelForm):\n # Classe Meta aninhada que lista o modelo no qual ela está baseada\n #e o campo a ser incluído no formulário\n class Meta:\n model = Entry\n fields = ['text']\n # Rótulo vazio para text\n labels = {'text': ''}\n # Widget é um elemento de formulário HTML, com ele podemos \n #sobrescrever atributos default de um elemento.\n # Nesse caso estamos definindo que text area terá 80 colunas \n #ao invés das 40 padrão.\n widgets = {'text': forms.Textarea(attrs={'cols':80})}\n","repo_name":"fabioTowers/learning_log","sub_path":"learning_logs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16307980832","text":"PIECES = 7\nNUM_DICES = 4\nDICE = [0, 1, 0, 1]\n\nSTART_BOXES = 4\nMIDDLE_BOXES = 8\nEND_BOXES = 2\nBOXES = START_BOXES + MIDDLE_BOXES + END_BOXES + 2\n\nWHITE = 1\nBLACK = -1\n\nROSETTA = [4, 8, 14]\n\nUPDATE_VIEW_SCORE = 10000\n","repo_name":"augustocolo/gameOfUr","sub_path":"constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12734288928","text":"from flask import jsonify\nfrom flask import Flask,request\nimport pyotp\nimport pyqrcode\nfrom io import BytesIO\nfrom asyncio import streams\nimport os\n\napp = Flask(__name__)\nsecret = pyotp.random_base32()\nname = 'al1eum'\nissuer_name = 'al1enum.local'\nuri = pyotp.totp.TOTP(secret).provisioning_uri(name, issuer_name)\nprint (uri)\ntotp = pyotp.TOTP(secret)\n\n@app.route('/', methods=['GET'])\ndef qrcode():\n\n url = pyqrcode.create(uri)\n stream = BytesIO()\n url.svg(stream, scale=5)\n return stream.getvalue(),200,{\n 'Content-Type': 'image/svg+xml',\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '0' \n }\n\n@app.route('/verify',methods=['GET'])\ndef otp():\n\n code = totp.now()\n if totp.verify(code):\n return \"Verified\"\n \nif __name__ == \"__main__\":\n app.secret_key = os.urandom(12)\n app.run(debug=False,host='0.0.0.0',port=5000)\n","repo_name":"AL1ENUM/Miscellaneous-Scripts","sub_path":"OTP.py","file_name":"OTP.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6939377948","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', type=str, required=True)\nparser.add_argument('--output', type=str, required=True)\nargs = parser.parse_args()\n\nINPUT_FILE = args.input\nOUTPUT_FILE = args.output\n\nPROBLEM_NUMBERS = list(map(str, range(1001, 8000)))\n\nwith open(INPUT_FILE, \"r\") as f:\n lines = f.readlines()\n # remove \"\\n\" from each line\n lines = [line[:-1] for line in lines]\n\n# keep only lines after \"\\section{CONTENTS}\"\nlines = lines[lines.index(\"\\section{CONTENTS}\") + 1:]\n# keep only lines after the first uppercase section\nlines = lines[min(i for i, line in enumerate(lines) if (line.startswith(r'\\section{') and line[8:].isupper())):]\n# keep only lines before \"\\section{INDEX TO PROBLEMS}\" or \"\\section{Index to Problems}\"\nif \"\\section{INDEX TO PROBLEMS}\" in lines:\n lines = lines[:lines.index(\"\\section{INDEX TO PROBLEMS}\")]\nif \"\\section{Index to Problems}\" in lines:\n lines = lines[:lines.index(\"\\section{Index to Problems}\")]\n\n# replace \"\\section{Solution:}\" with \"Solution:\"\nlines = [line.replace(r\"\\section{Solution:}\", \"Solution:\") for line in lines]\nlines = [line.replace(r\"\\section{Solution}\", \"Solution:\") for line in lines]\nlines = [line.replace(r\"\\section{Solution: \\\\ Solution:}\", \"Solution:\") for line in lines]\n\n# remove all \"\\section{...}\" that contain only uppercase letters -- those correspond to headings\nlines = [line for line in lines if not (line.startswith(r'\\section{') and line[8:].isupper())]\n\n# if a \"\\section{...}\" contains only a number, replace the whole line with \"Problem:\"\nlines = [line if not (line.startswith(r\"\\section{\") and line[9].isdigit()) else \"Problem:\" for line in lines]\n\n# remove all other section environments\nassert sum(line.startswith(r\"\\section{\") for line in lines) == sum(line.find(r\"\\section{\") != -1 for line in lines) # max one section per line\n#assert all(line.endswith(\"}\") for line in lines if line.startswith(r\"\\section{\"))\nlines = [line[9:-1] if line.startswith(r\"\\section{\") and line.endswith(\"}\") else line for line in lines]\nlines = [line[9:] if line.startswith(r\"\\section{\") else line for line in lines]\n# same for subsections\nlines = [line[12:-1] if line.startswith(r\"\\subsection{\") and line.endswith(\"}\") else line for line in lines]\n\n# remove all \"\\title{...}\" and \"\\author{...}\" environments\nfor i in range(len(lines)):\n if lines[i].startswith(r\"\\title{\") or lines[i].startswith(r\"\\author{\"):\n assert lines[i + 2].startswith(\"}\")\n lines[i] = \"\"\n lines[i + 2] = \"\"\n\n# remove all problem sources\nPROBLEM_SOURCES = [\n \"(Wisconsin)\",\n \"(UC,Berkeley)\",\n \"(UC,BBerkeley)\",\n \"(Columbia)\",\n \"(Coulumbia)\",\n \"(Princeton)\",\n \"(Chicago)\",\n \"(CUSPEA)\",\n \"(SUNY,Buffalo)\",\n \"(MIT)\",\n \"(CCT)\",\n \"(Buffalo)\",\n]\nnew_lines = []\nfor line in lines:\n mod_line = line.strip() \\\n .replace(\"$\", \"\") \\\n .replace(\" \", \"\") \\\n .replace(r\"\\operatorname\", \"\") \\\n .replace(r\"\\text\", \"\") \\\n .replace(\"{\", \"\") \\\n .replace(\"}\", \"\")\n if mod_line in PROBLEM_SOURCES:\n continue\n new_lines.append(line)\nlines = new_lines\n\n# count lines that start with \"![](https://cdn.mathpix.com\"\nprint(\"Number of image links:\", len([line for line in lines if line.startswith(\"![](https://cdn.mathpix.com\")]))\n\n# sometimes the problem number is in $$ ... $$, so we need to fix that\nlines = \"\\n\".join(lines)\nfor x in PROBLEM_NUMBERS:\n lines = lines.replace(f\"$$\\n{x}\\n$$\\n\", f\"{x}\\n\")\nlines = lines.split(\"\\n\")\n\n# remove \"\\begin{abstract}\" and \"\\end{abstract}\"\nlines = [line for line in lines if not line.startswith(r\"\\begin{abstract}\") and not line.startswith(r\"\\end{abstract}\")]\n\n# manual fixes\nlines = \"\\n\".join(lines)\nlines = lines.replace(\"$$\\n\\\\begin{gathered}\\n1001 \\\\\\\\\", \"1001\\n$$\\n\\\\begin{gathered}\")\nif \"optics\" in INPUT_FILE:\n lines = lines.replace(\"1084\", \"1034\")\n lines = lines.replace(\"2081\", \"2031\")\n lines = lines.replace(\"2083\", \"2033\")\n lines = lines.replace(\"A glass cube\", \"Problem:\\n\\nA glass cube\")\n lines = lines.replace(\"A rainbow\", \"Problem:\\n\\nA rainbow\")\nlines = lines.replace(\"Solutlon\", \"Solution\")\nlines = lines.replace(\"Solntion\", \"Solution\")\nlines = lines.replace(\"Alternative Solution:\", \"Alternative solution:\") # hack so that it's not confused with \"Solution:\"\nlines = lines.replace(\"〉\", \">\")\nlines = lines.replace(\"〈\", \"<\")\nlines = lines.replace(\"ẹ\", \"e\")\nlines = lines.replace(\"Fis.\", \"Fig.\")\nlines = lines.replace(\"Pig.\", \"Fig.\")\nlines = lines.split(\"\\n\")\n\n# split every \"Solution:\" into a separate line\nlines = \"\\n\".join(lines)\nlines = lines.replace(\"Solution: \", \"Solution:\\n\")\nlines = lines.replace(\" Solution:\", \"\\nSolution:\")\nlines = lines.split(\"\\n\")\n\n# replace all occurrences of a problem number with \"Problem (number):\"\nnew_lines = []\nfor line in lines:\n if not any(x in line for x in PROBLEM_NUMBERS) or \\\n line.startswith(\"![](https://cdn.mathpix.com\") or \\\n any(f\"Problem {x}\" in line for x in PROBLEM_NUMBERS) or \\\n any(f\"problem {x}\" in line for x in PROBLEM_NUMBERS) or \\\n any(f\"Problems {x}\" in line for x in PROBLEM_NUMBERS):\n new_lines.append(line)\n continue\n #assert line[-4:] in PROBLEM_NUMBERS, line\n if not line[-4:] in PROBLEM_NUMBERS:\n new_lines.append(line)\n continue\n new_lines.append(line[:-4])\n new_lines.append(f\"Problem:[{line[-4:]}]\")\nlines = new_lines\n\nprint(\"Number of solutions:\", len([line for line in lines if line.startswith(\"Solution:\")]))\nprint(\"Number of problems without number:\", len([line for line in lines if line == \"Problem:\"]))\nprint(\"Number of problems with number:\", len([line for line in lines if line.startswith(\"Problem:[\")]))\nprint(\"Total number of problems:\", len([line for line in lines if line.startswith(\"Problem:\")]))\n\nwith open(OUTPUT_FILE, \"w\") as f:\n f.write(\"\\n\".join(lines))","repo_name":"techthiyanes/DUCK-datasets","sub_path":"old_data/physics_books/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"33055269910","text":"from time import time\nimport json\nimport pandas as pd\nimport numpy\n\ndef create_all_labels_dict_file_from_csv(paths_to_datasets, path_to_all_labels_file):\n clean_g = pd.read_csv(paths_to_datasets[0])\n malw_g = pd.read_csv(paths_to_datasets[1])\n pua_g = pd.read_csv(paths_to_datasets[2])\n\n clean_g_list = clean_g.iloc[:, -1].tolist()\n malw_g_list = malw_g.iloc[:, -1].tolist()\n pua_g_list = pua_g.iloc[:, -1].tolist()\n\n del clean_g\n del malw_g\n del pua_g\n\n dict_ = {} # 0 - clean, 1 - pua, 2 - malw\n\n # we need a good FPP\n\n for item in clean_g_list:\n if item not in dict_:\n dict_[item] = 0\n\n for item in pua_g_list:\n if item not in dict_:\n dict_[item] = 1\n else:\n dict_[item] = 1\n\n for item in malw_g_list:\n if item not in dict_:\n dict_[item] = 2\n\n with open(path_to_all_labels_file, 'w') as file:\n file.write(json.dumps(dict_))\n\ndef X_Y_from_embeddings(all_labels_file, embeddings_file):\n with open(all_labels_file, 'r') as file:\n all_labels = json.loads(file.read())\n\n with open(embeddings_file, 'r') as file:\n embeddings = json.loads(file.read())\n\n x, y = [], []\n for key in embeddings:\n md5 = key.split('/')[-1].split('.')[0]\n x.append(embeddings[key])\n y.append(all_labels[md5])\n\n X = numpy.array([numpy.array(xi) for xi in x])\n Y = numpy.array([numpy.array(xi) for xi in y])\n return X, Y","repo_name":"Grigorii-P/KL_graph_embeddings","sub_path":".ipynb_checkpoints/label_utils-checkpoint.py","file_name":"label_utils-checkpoint.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2414854441","text":"\"\"\"\nThe TMP102 is a two-wire, serial output temperature\nsensor available in a tiny SOT563 package. Requiring\nno external components, the TMP102 is capable of\nreading temperatures to a resolution of 0.0625°C.\n\nhttps://www.sparkfun.com/datasheets/Sensors/Temperature/tmp102.pdf\n\n\"\"\"\nimport esphome.codegen as cg\nimport esphome.config_validation as cv\nfrom esphome.components import i2c, sensor\nfrom esphome.const import (\n DEVICE_CLASS_TEMPERATURE,\n STATE_CLASS_MEASUREMENT,\n UNIT_CELSIUS,\n)\n\nCODEOWNERS = [\"@timsavage\"]\nDEPENDENCIES = [\"i2c\"]\n\ntmp102_ns = cg.esphome_ns.namespace(\"tmp102\")\nTMP102Component = tmp102_ns.class_(\n \"TMP102Component\", cg.PollingComponent, i2c.I2CDevice, sensor.Sensor\n)\n\nCONFIG_SCHEMA = (\n sensor.sensor_schema(\n TMP102Component,\n unit_of_measurement=UNIT_CELSIUS,\n accuracy_decimals=1,\n device_class=DEVICE_CLASS_TEMPERATURE,\n state_class=STATE_CLASS_MEASUREMENT,\n )\n .extend(cv.polling_component_schema(\"60s\"))\n .extend(i2c.i2c_device_schema(0x48))\n)\n\n\nasync def to_code(config):\n var = await sensor.new_sensor(config)\n await cg.register_component(var, config)\n await i2c.register_i2c_device(var, config)\n","repo_name":"esphome/esphome","sub_path":"esphome/components/tmp102/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":6791,"dataset":"github-code","pt":"47"} +{"seq_id":"73251432782","text":"#import the dependencies\nimport numpy as np \n\nimport datetime as dt\nfrom dateutil.relativedelta import relativedelta\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n#################################################\n# Database Setup\n#################################################\n\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n\n#################################################\n# Flask Setup\n#################################################\n\napp = Flask(__name__)\n\n#################################################\n# Flask Routes\n#################################################\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n# Create a home page route\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Welcome to the Hawaii Climate Database:

\"\n f\"/api/v1.0/precipitation
Dictionary of date and precipitation

\"\n f\"/api/v1.0/h_stations
List of the weather stations

\"\n f\"/api/v1.0/tobs
Dictionary of date and tobs of the most active station\\\n for the last year of the data

\"\n f\"/api/v1.0/
Min, max, avg tobs for all dates greater than\\\n and equal to the start date.
\\\n Please add a date between 2010-01-01 & 2017-08-23 after the '/'

\"\n f\"/api/v1.0//
Min, max, avg tobs for dates\\\n between the start and end date inclusive.
\\\n Please add a date between 2010-01-01 & 2017-08-23 after the '/'\\\n after 'v1.0'.

\"\n )\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # Create our session (link) from Python to the DB\n #session = Session(engine)\n\n # Query the date and prcp for the last 12 months\n results = session.query(Measurement.date,Measurement.prcp).\\\n filter(Measurement.date>='2016-08-23').group_by(Measurement.date).\\\n order_by(Measurement.date).all()\n\n session.close()\n\n #create a dictionary using date as key and prcp as the value\n precipitation_list = []\n for date, prcp in results:\n precipitation_dict = {}\n precipitation_dict['date'] = date\n precipitation_dict['prcp'] = prcp\n precipitation_list.append(precipitation_dict)\n\n #Return the JSON representation of your dictionary\n return jsonify(precipitation_list)\n\n\n@app.route(\"/api/v1.0/h_stations\")\ndef stations ():\n # Create our session (link) from Python to the DB\n #session = Session(engine)\n\n # Query the station name\n result = session.query(Station.station).all()\n\n session.close()\n\n #create a list of stations\n station_list = []\n for row in result:\n station_dict = {}\n station_dict['station'] = row.station\n station_list.append(station_dict)\n\n #Return a JSON list of stations from the dataset\n return jsonify(station_list)\n \n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Create our session (link) from Python to the DB\n # session = Session(engine)\n\n\n\n #Query the dates and temperature observations of the most active station\n Result = session.query(Measurement.date,Measurement.tobs).\\\n filter(Measurement.date>='2016-08-23').filter(Station.station == Measurement.station).\\\n filter(Station.station == 'USC00519281').all()\n\n session.close()\n\n #Return a JSON list of temperature observations (TOBS)\n tobs_list = []\n for date, tobs in Result:\n tobs_dict = {}\n tobs_dict['date'] = date\n tobs_dict['tobs'] = tobs \n tobs_list.append(tobs_dict)\n\n #Return a JSON list of tobs from the dataset\n return jsonify(tobs_list)\n\n\n@app.route(\"/api/v1.0/\")\ndef start_date(start_date):\n # Create our session (link) from Python to the DB\n #session = Session(engine)\n\n start_date = session.query(func.min(Measurement.date)).scalar()\n\n results = session.query(func.min(Measurement.tobs), \n func.max(Measurement.tobs), \n func.avg(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).all()\n\n session.close()\n\n # Extract the temperature values from the results\n tmin, tmax, tavg = results[0]\n\n # Create a dictionary with the temperature data\n temperature_data = {\n \"Start Date\": start_date,\n \"TMIN\": tmin,\n \"TMAX\": tmax,\n \"TAVG\": tavg\n }\n\n return jsonify(temperature_data)\n\n@app.route(\"/api/v1.0//\")\ndef StartDateEndDate(start_date,end_date):\n # Create our session (link) from Python to the DB\n #session = Session(engine)\n\n Resultss = session.query(Measurement.date, \n func.min(Measurement.tobs), \n func.max(Measurement.tobs),\n func.avg(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).\\\n filter(Measurement.date<=end_date).\\\n group_by(Measurement.date).all()\n\n session.close()\n\n #Return JSON list of max, min, avg tobs\n startend_list = []\n for date,Tmin,Tmax,Tavg in Resultss:\n startend_dict = {}\n startend_dict['Date'] = date\n startend_dict['TMIN'] = Tmin\n startend_dict['TMAX'] = Tmax\n startend_dict['TAVG'] = Tavg\n startend_list.append(startend_dict)\n\n return jsonify(startend_list)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Lex1996/sqlalchemy-challenge","sub_path":"SURFSUP/app_final.py","file_name":"app_final.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22142658074","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.messages import get_messages\nfrom .models import Profile\n\n# Create your tests here.\n\n\nclass ProfileTests(TestCase):\n def test_profile_normal_access(self):\n \"\"\"\n Once a user is logged in, the profile page should be accessible\n \"\"\"\n user = User.objects.create_user(\"Howard\", \"howard@gmail.com\")\n self.client.force_login(user=user)\n response = self.client.get(reverse(\"accounts:profile\"))\n self.assertEqual(response.status_code, 200)\n\n def test_post_profile(self):\n \"\"\"\n A user goes to the profile page and is able to update the user\n information\n \"\"\"\n user = User.objects.create_user(\"Howard\", \"howard@gmail.com\")\n self.client.force_login(user=user)\n response = self.client.post(\n reverse(\"accounts:profile\"),\n data={\n \"email\": \"Hao@gmail.com\",\n \"profilename\": \"Howard\",\n \"accessible\": \"True\",\n \"family_friendly\": \"False\",\n \"transaction_not_required\": \"False\",\n },\n )\n self.assertEqual(response.status_code, 302)\n self.assertEqual(User.objects.all()[0].email, \"Hao@gmail.com\")\n self.assertEqual(Profile.objects.all()[0].accessible, True)\n self.assertEqual(Profile.objects.all()[0].family_friendly, False)\n self.assertEqual(Profile.objects.all()[0].transaction_not_required, False)\n messages = [m.message for m in get_messages(response.wsgi_request)]\n self.assertIn(messages[0], \"Your account has been updated!\")\n","repo_name":"jonrblue/Team2","sub_path":"teamtwo/accounts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22350525142","text":"# -*- coding: utf-8 -*-\n\nfrom copy import copy\nfrom reportlab.platypus import (SimpleDocTemplate, Paragraph, Spacer, Table,\n TableStyle)\n#from reportlab.lib import colors\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib.units import inch\nfrom .personal import address_paragraph\n\nPAGE_HEIGHT = 792.0\nPAGE_WIDTH = 612.0\nstylesheet = getSampleStyleSheet()\n\nTABLE_STYLE = TableStyle([\n ('ALIGN', (2,0), (-1,-1), 'RIGHT'),\n ('VALIGN', (0,0), (-1,-1), 'TOP'),\n ])\n #('LINEABOVE', (0,0), (-1,0), 2, colors.green),\n #('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),\n #('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),\n\nclass Layout(object):\n\n def __init__(self, title):\n self.title = title\n\n def myFirstPage(self, canvas, doc):\n canvas.saveState()\n canvas.setFont('Times-Bold',16)\n canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-108, self.title)\n canvas.setFont('Times-Roman',9)\n canvas.drawString(inch, 0.75 * inch, \"Invoice page %d\" % (doc.page,))\n canvas.restoreState()\n\n def myLaterPages(self, canvas, doc):\n canvas.saveState()\n canvas.setFont('Times-Roman',9)\n canvas.drawString(inch, 0.75 * inch, \"Invoice page %d\" % (doc.page,))\n canvas.restoreState()\n\ndef format_invoice(title, entries):\n doc = SimpleDocTemplate('invoice.pdf', pagesize=letter)\n style = stylesheet[\"Normal\"]\n times = copy(style)\n times.fontName = 'Times-Roman'\n times.fontSize = 12\n times.leading = 14\n\n story = [Spacer(1,0.75*inch), Paragraph(address_paragraph, times)]\n\n entries = sorted(entries, key=lambda entry: entry.start)\n data = [[u'', u'', u'Time', u'Rate', u'Total']]\n\n for i, entry in enumerate(entries):\n d0 = entry.start.date()\n d1 = entry.end.date()\n if d0 == d1:\n dates = '{:%B %d}'.format(d0).replace(u' 0', u' ')\n else:\n dates = u'{:%b %d} – {:%b %d}'.format(d0, d1).replace(u' 0', u' ')\n\n h, m = divmod(entry.minutes, 60)\n elapsed = u'{}:{:02}'.format(int(h), int(m))\n\n p = Paragraph(entry.description, style)\n #story.append(p)\n #story.append(Spacer(1,0.2*inch))\n\n dollar = u'' if i else u'$'\n data.append([dates, p, elapsed,\n u'{} {:,}'.format(dollar, entry.rate),\n u'{} {:,}'.format(dollar, entry.amount)])\n\n total_amount = sum(entry.amount for entry in entries)\n total_elapsed = sum(entry.minutes for entry in entries)\n\n data.append([u'', u'Total ' + '.' * 80,\n u'{}:{:02}'.format(*divmod(total_elapsed, 60)),\n u'', u'$ {:,}'.format(total_amount)])\n\n table = Table(data, colWidths=[84.0, 260.0, 36.0, 30.0, 40.0],\n style=TABLE_STYLE)\n story.append(table)\n\n layout = Layout(title)\n doc.build(\n story,\n onFirstPage=layout.myFirstPage,\n onLaterPages=layout.myLaterPages,\n )\n","repo_name":"brandon-rhodes/org-mode-invoicer","sub_path":"orginvoicer/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"4601074073","text":"import matplotlib.pyplot as plot\n\nuserInput = \"\"\nx = [10,20,30,40,50,60,70,80,90,100]\ny = [10,78,32.66,45,20,10,54,23,34,0]\n\nu = [10,20,30,40,50,60,70,80,90,100]\nv = [99,34,56,23,67,34,46,86,0,99]\n\ndef setInputLable(quary):\n global userInput\n userInput = quary\n print(userInput)\n\ndef drawBasicGraph(xList,yList, xLable, yLable , title):\n plot.plot(xList, yList, color=\"red\")\n plot.title(title)\n plot.xlabel(xLable)\n plot.ylabel(yLable)\n avg = 0;\n for i in yList:\n avg = avg + i\n avg = avg / len(yList)\n avgs = []\n for i in yList:\n avgs.append(avg)\n plot.plot(xList, avgs, color = \"red\", linestyle = \"dotted\")\n plot.gca().legend((\"Elon Musk\", \"Elon's Average\")) # loc=\"upper left\")\n plot.show()\n\ndef compareBasicGraphs(xList0,yList0, xLable, yLable, title,xList1,yList1):\n plot.plot(xList0, yList0, color=\"red\")#, lable=\"elonMusk\")\n plot.title(title)\n plot.xlabel(xLable)\n plot.ylabel(yLable)\n avg = 0;\n for i in yList0:\n avg = avg + i\n avg = avg / len(yList0)\n avgs = []\n for i in yList0:\n avgs.append(avg)\n plot.plot(xList0, avgs, color = \"red\", linestyle = \"dotted\")#, lable=\"elonMusk avg\")\n\n plot.plot(xList1, yList1, color=\"blue\")#, lable=userInputLable)\n avg = 0;\n for i in yList1:\n avg = avg + i\n avg = avg / len(yList1)\n avgs = []\n for i in yList1:\n avgs.append(avg)\n plot.plot(xList1, avgs, color = \"blue\", linestyle = \"dotted\")#, lable = userInputLable + \" avg\")\n\n plot.gca().legend((\"Elon Musk\",\"Elon's Average\",userInput,userInput + \"'s Average\"))#loc=\"upper left\")\n plot.show()\n\n\n\n\n#drawBasicGraph(x,y,\"x\",\"y\",\"simple graph\")\n#compareBasicGraphs(x,y,\"x\",\"y\",\"simple graph\",u,v)","repo_name":"lopyxel/DVPROJ21","sub_path":"DVProj4/Graphic.py","file_name":"Graphic.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72643748621","text":"#!/usr/bin/python3\n\"\"\"\nFile Storage Unittest Module\n\"\"\"\nimport unittest\nfrom models.engine.file_storage import FileStorage\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\nfrom models.state import State\nfrom models import storage\nimport os\n\n\nclass TestFileStorage(unittest.TestCase):\n \"\"\"File Storage Class Tests\"\"\"\n __dict = {'BaseModel': BaseModel(),\n 'User': User(),\n 'Place': Place(),\n 'State': State(),\n 'City': City(),\n 'Amenity': Amenity(),\n 'Review': Review()}\n\n def setUp(self):\n \"\"\"Sets up method\"\"\"\n pass\n\n def tearDown(self):\n \"\"\"Tears down methods\"\"\"\n FileStorage._FileStorage__objects = {}\n try:\n os.remove(FileStorage._FileStorage__file_path)\n except IOError:\n pass\n\n def test_FileStorage_empty(self):\n \"\"\"File Storage type class test\"\"\"\n self.assertEqual(FileStorage, type(FileStorage()))\n self.assertEqual(FileStorage, type(storage))\n self.assertEqual(storage.all(), {})\n\n def test_FileStorage_init_none(self):\n \"\"\"File Storage none test\"\"\"\n with self.assertRaises(TypeError):\n FileStorage(None)\n\n def test_FileStorage_class_instances(self):\n \"\"\"File Storage none test\"\"\"\n self.assertEqual(dict, type(FileStorage._FileStorage__objects))\n self.assertEqual(str, type(FileStorage._FileStorage__file_path))\n\n def test_FileStorage_all_class_new(self):\n \"\"\"File Storage none test\"\"\"\n for v in TestFileStorage.__dict.values():\n storage.new(v)\n for k, v in TestFileStorage.__dict.items():\n self.assertIn(k + \".\" + v.id, storage.all().keys())\n self.assertIn(v, storage.all().values())\n\n def test_FileStorage_all_class_save(self):\n \"\"\"File Storage save test\"\"\"\n for k, v in TestFileStorage.__dict.items():\n k = v\n storage.save()\n for v in TestFileStorage.__dict.values():\n storage.new(v)\n storage.save()\n\n def test_FileStorage_new_none(self):\n \"\"\"File Storage new none test\"\"\"\n with self.assertRaises(AttributeError):\n storage.new(None)\n\n def test_FileStorage_reload_none(self):\n \"\"\"File Storage reload none test\"\"\"\n with self.assertRaises(TypeError):\n storage.reload(None)\n\n def test_FileStorage_reload(self):\n \"\"\"File Storage reload test\"\"\"\n for k, v in TestFileStorage.__dict.items():\n k = v\n storage.save()\n storage.reload()\n objects = storage.all()\n for k in objects.keys():\n obj = objects[k]\n self.assertTrue(issubclass(type(obj), BaseModel))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"campopinillos/AirBnB_clone","sub_path":"tests/test_models/test_engine/test_file_storage.py","file_name":"test_file_storage.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20940669242","text":"def solution(m, n, puddles):\n\n school = [[0]*(m+1) for _ in range(n+1)]\n school[1][1] = 1\n for i in range(1, n+1):\n for j in range(1, m+1):\n # 초기 설정값 (1,1) 이거나 puddles에 있는 값이면 pass\n if [i,j] == [1, 1] or [j,i] in puddles:\n continue\n else:\n school[i][j] = (school[i-1][j] + school[i][j-1])\n\n return school[-1][-1] % 1000000007","repo_name":"rkdms0116/programmers","sub_path":"DP/등굣길.py","file_name":"등굣길.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23309943379","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2018/6/11 10:28\n# @Author : xingyuezhiji\n# @Email : zhong180@126.com\n# @File : login_tiantancha.py\n# @Software: PyCharm Community Edition\n#coding=utf-8\n#!/usr/bin/env python\n# encoding = utf-8\n\nfrom urllib.request import urlretrieve\nimport requests\nfrom bs4 import BeautifulSoup\nfrom os import remove\nimport http.cookiejar as cookielib\nfrom PIL import Image\nimport json\nimport re\n\n\nclass tiantancha(object):\n url = 'https://www.tianyancha.com/cd/login.json'\n\n # login_form_data = {'autoLogin':'true','cdpassword':\"25990dae6bb33bcc8bc5ef0c3bd7cce1\",'loginway':\"PL\",'mobile':\"15622890079\"}\n login_form_data ={\"mobile\": \"15622890079\", \"cdpassword\": \"25990dae6bb33bcc8bc5ef0c3bd7cce1\", \"loginway\": \"PL\"}\n headers = {'Host': 'www.tianyancha.com',\n 'Referer': 'https://www.tianyancha.com/',\n\n 'Content-Length': '105',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/json;charset=UTF-8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Origin': 'https://www.tianyancha.com',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Accept-Encoding': 'gzip, deflate, br'}\n headers1 = {}\n # headers1['Referer'] = 'https://www.tianyancha.com/usercenter/modifyInfo'\n # headers1['Accept']='text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n # headers1['Accept-Encoding']='gzip, deflate, sdch'\n\n session = None\n # print(headers)\n\n @classmethod\n def load_cookie(cls):\n \"\"\"\n 首先加载cookie\n :return:\n \"\"\"\n cls.session = requests.session()\n cls.session.cookies = cookielib.LWPCookieJar(filename='Cookies_tyc')\n try:\n cls.session.cookies.load(ignore_discard=True)\n except BaseException as e:\n print(e)\n cls.login_form_data['mobile'] = input('请输入账号:')\n cls.login_form_data['cdpassword'] = input('请输入md5加密的密码:')\n cls.login_form_data['loginway'] = \"PL\"\n cls.login_form_data['autoLogin'] = True\n\n @classmethod\n\n def is_login(cls):\n \"\"\"\n 通过访问个人账户来判断是否已经登录\n :return:\n \"\"\"\n url = \"https://www.tianyancha.com/usercenter/modifyInfo\"\n # print(url)\n login_code = cls.session.get(url, headers=cls.headers1,\n allow_redirects=False).status_code\n\n # print('111hhh')\n # print(\"is login\")\n if login_code == 200:\n print(\"登录成功~\")\n return True\n else:\n return False\n\n @classmethod\n\n def login(cls):\n cls.session = requests.session()\n cls.session.cookies = cookielib.LWPCookieJar(filename='Cookies_tyc')\n\n cls.session.post(cls.url, data=cls.login_form_data, headers=cls.headers)\n # 保存登录cookie\n cls.session.cookies.save()\n # 判断是否登录成功\n if not cls.is_login():\n print(\"登录失败,请重新尝试~\")\n\n @classmethod\n def tyc_score(cls,keyword):\n headers = {'User-Agent': 'User-Agent:Mozilla/5.0'}\n # name = ''\n url = 'https://www.tianyancha.com/search?key={}'.format(keyword)\n response = requests.get(url, headers=headers)\n\n try:\n url_data = response.text.encode(response.encoding)\n except:\n url_data = response.text\n\n # print(html_data)\n soup = BeautifulSoup(url_data, 'lxml', from_encoding='utf-8')\n\n contents = soup.find_all(class_='in-block vertical-middle float-right search-right-center')\n\n score = []\n for content in contents[0:5]:\n content = content.get_text()[:-1]\n # print(content)\n try:\n score.append(int(content))\n except:\n score.append(60)\n return score\n\n \n @classmethod\n def main(cls,keyword):\n url = \"https://www.tianyancha.com/usercenter/modifyInfo\"\n tiantancha.load_cookie()\n\n login_code = cls.session.get(url,\n allow_redirects=True).status_code\n requests.request(method='GET', url=url)\n # 判断是否登录\n if not tiantancha.is_login():\n tiantancha.login()\n score=0\n # result_dict = tiantancha.search(keyword)\n else:\n score = tiantancha.tyc_score(keyword)\n\n print(score)\n return score\n\n\nif __name__ == '__main__':\n tiantancha.main('孟筱茜')\n\n","repo_name":"xingyuezhiji/scrapy_stock","sub_path":"login_tianyancha.py","file_name":"login_tianyancha.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73716460330","text":"import requests\n\n#!!! This giant block of imports should be something simpler, such as:\n# from great_exepectations.helpers.expectation_creation import *\nfrom great_expectations.execution_engine import PandasExecutionEngine\nfrom great_expectations.expectations.expectation import ColumnMapExpectation\nfrom great_expectations.expectations.metrics import (\n ColumnMapMetricProvider,\n column_condition_partial,\n)\n\n\n# This class defines a Metric to support your Expectation\n# For most Expectations, the main business logic for calculation will live here.\n# To learn about the relationship between Metrics and Expectations, please visit\n# https://docs.greatexpectations.io/en/latest/reference/core_concepts.html#expectations-and-metrics.\nclass ColumnValuesValidWikipediaArticles(ColumnMapMetricProvider):\n # This is the id string that will be used to reference your metric.\n # Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/metrics.html#metrics\n # for information on how to choose an id string for your Metric.\n condition_metric_name = \"column_values.valid_wikipedia_articles\"\n\n # This method defines the business logic for evaluating your metric when using a PandasExecutionEngine\n\n @column_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column, **kwargs):\n def is_valid_wikipedia_article(title):\n url = \"https://en.wikipedia.org/wiki/\" + title\n try:\n r = requests.head(url)\n # print(r.status_code)\n if r.status_code == 200:\n return True\n except requests.ConnectionError:\n print(\"failed to connect\")\n return False\n\n return column.apply(lambda x: is_valid_wikipedia_article(x))\n\n\n# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine\n# @column_condition_partial(engine=SqlAlchemyExecutionEngine)\n# def _sqlalchemy(cls, column, _dialect, **kwargs):\n# return column.in_([3])\n\n# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine\n# @column_condition_partial(engine=SparkDFExecutionEngine)\n# def _spark(cls, column, **kwargs):\n# return column.isin([3])\n\n\n# This class defines the Expectation itself\n# The main business logic for calculation lives here.\nclass ExpectColumnValuesToBeValidWikipediaArticles(ColumnMapExpectation):\n \"\"\"Expect column values to be valid Wikipedia article titles/slugs.\n\n It simply plugs the column value into the Wikipedia URL and checks whether the HTTP status \\\n code is 200. This Expectation can be used as a template for other (or a more generic) \"does this website \\\n exist\" type checks, e.g. for things like user handles, dictionary entries, etc.\n \"\"\"\n\n # These examples will be shown in the public gallery, and also executed as unit tests for your Expectation\n examples = [\n {\n \"data\": {\n \"a\": [\"Super_Bowl\", \"Tom_Brady\", \"Kansas_City_Chiefs\"],\n \"b\": [\"peytonman\", \"theweekeeend\", \"Super_Bowl\"],\n },\n \"tests\": [\n {\n \"title\": \"positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\"column\": \"a\"},\n \"out\": {\n \"success\": True,\n },\n },\n {\n \"title\": \"negative_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\"column\": \"b\"},\n \"out\": {\n \"success\": False,\n \"unexpected_index_list\": [0, 1],\n \"unexpected_list\": [\"peytonman\", \"theweekeeend\"],\n },\n },\n ],\n }\n ]\n\n # This dictionary contains metadata for display in the public gallery\n library_metadata = {\n \"maturity\": \"experimental\", # \"experimental\", \"beta\", or \"production\"\n \"tags\": [\"experimental\"], # Tags for this Expectation in the gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@annaliuu\",\n \"@wangzhongyi0510\",\n ],\n \"requirements\": [\"requests\"],\n }\n\n # This is the id string of the Metric used by this Expectation.\n # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.\n map_metric = \"column_values.valid_wikipedia_articles\"\n\n # This is a list of parameter names that can affect whether the Expectation evaluates to True or False\n # Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/expectations/expectations.html#expectation-concepts-domain-and-success-keys\n # for more information about domain and success keys, and other arguments to Expectations\n success_keys = (\"mostly\",)\n\n # This dictionary contains default values for any parameters that should have default values\n default_kwarg_values = {}\n\n # This method defines a question Renderer\n # For more info on Renderers, see\n # https://docs.greatexpectations.io/en/latest/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.html\n #!!! This example renderer should render RenderedStringTemplateContent, not just a string\n\n\n# @classmethod\n# @renderer(renderer_type=\"renderer.question\")\n# def _question_renderer(\n# cls, configuration, result=None, runtime_configuration=None\n# ):\n# column = configuration.kwargs.get(\"column\")\n# mostly = configuration.kwargs.get(\"mostly\")\n\n# return f'Do at least {mostly * 100}% of values in column \"{column}\" equal 3?'\n\n# This method defines an answer Renderer\n#!!! This example renderer should render RenderedStringTemplateContent, not just a string\n# @classmethod\n# @renderer(renderer_type=\"renderer.answer\")\n# def _answer_renderer(\n# cls, configuration=None, result=None, runtime_configuration=None\n# ):\n# column = result.expectation_config.kwargs.get(\"column\")\n# mostly = result.expectation_config.kwargs.get(\"mostly\")\n# regex = result.expectation_config.kwargs.get(\"regex\")\n# if result.success:\n# return f'At least {mostly * 100}% of values in column \"{column}\" equal 3.'\n# else:\n# return f'Less than {mostly * 100}% of values in column \"{column}\" equal 3.'\n\n# This method defines a prescriptive Renderer\n# @classmethod\n# @renderer(renderer_type=\"renderer.prescriptive\")\n# @render_evaluation_parameter_string\n# def _prescriptive_renderer(\n# cls,\n# configuration=None,\n# result=None,\n# runtime_configuration=None,\n# **kwargs,\n# ):\n#!!! This example renderer should be shorter\n# runtime_configuration = runtime_configuration or {}\n# include_column_name = False if runtime_configuration.get(\"include_column_name\") is False else True\n# styling = runtime_configuration.get(\"styling\")\n# params = substitute_none_for_missing(\n# configuration.kwargs,\n# [\"column\", \"regex\", \"mostly\", \"row_condition\", \"condition_parser\"],\n# )\n\n# template_str = \"values must be equal to 3\"\n# if params[\"mostly\"] is not None:\n# params[\"mostly_pct\"] = num_to_str(\n# params[\"mostly\"] * 100, precision=15, no_scientific=True\n# )\n# # params[\"mostly_pct\"] = \"{:.14f}\".format(params[\"mostly\"]*100).rstrip(\"0\").rstrip(\".\")\n# template_str += \", at least $mostly_pct % of the time.\"\n# else:\n# template_str += \".\"\n\n# if include_column_name:\n# template_str = \"$column \" + template_str\n\n# if params[\"row_condition\"] is not None:\n# (\n# conditional_template_str,\n# conditional_params,\n# ) = parse_row_condition_string_pandas_engine(params[\"row_condition\"])\n# template_str = conditional_template_str + \", then \" + template_str\n# params.update(conditional_params)\n\n# return [\n# RenderedStringTemplateContent(\n# **{\n# \"content_block_type\": \"string_template\",\n# \"string_template\": {\n# \"template\": template_str,\n# \"params\": params,\n# \"styling\": styling,\n# },\n# }\n# )\n# ]\n\nif __name__ == \"__main__\":\n ExpectColumnValuesToBeValidWikipediaArticles().print_diagnostic_checklist()\n","repo_name":"great-expectations/great_expectations","sub_path":"contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_wikipedia_articles.py","file_name":"expect_column_values_to_be_valid_wikipedia_articles.py","file_ext":"py","file_size_in_byte":8832,"program_lang":"python","lang":"en","doc_type":"code","stars":8970,"dataset":"github-code","pt":"55"} +{"seq_id":"3677954952","text":"import json, math, os\nfrom flask import current_app, session\nfrom PIL import Image\nfrom flask.ext.login import login_user,logout_user,login_required,current_user\nfrom app.exceptions import JsonOutputException, FormValidateError\nfrom app.decorators import api_login_required, permission_required\nfrom app.models import Attachment, Exam, User, Message, Question\nfrom app.utils import upload, pagination, image_save\nfrom flask import request, g\nfrom .forms import SmsForm, PaperUploadForm, RegisterInfoForm\nfrom werkzeug.datastructures import MultiDict\nfrom app.const import EXAM_STATUS, PAPER_TYPE_ORDER\nfrom . import api_blueprint\nfrom app.models import Region, School, ExamLog\nfrom app.sms import SmsServer\nfrom app.utils import render_api\n\n@api_blueprint.route('/province')\ndef province():\n title = request.args.get('title', '')\n provinces = Region.get_province(title)\n return {\n 'code': 0,\n 'data': provinces\n }\n\n@api_blueprint.route('/city')\ndef city():\n pro_id = request.args.get('pro_id')\n title = request.args.get('title', '')\n if not pro_id:\n raise JsonOutputException('need pro_id')\n cities = Region.get_city(pro_id, title)\n return {\n 'code': 0,\n 'data': cities\n }\n\n@api_blueprint.route('/area')\ndef area():\n city_id = request.args.get('city_id')\n title = request.args.get('title', '')\n if not city_id:\n raise JsonOutputException('need city_id')\n areas = Region.get_area(city_id, title)\n return {\n 'code': 0,\n 'data': areas\n }\n\n@api_blueprint.route('/school')\ndef school():\n ctid = request.args.get('ctid')\n title = request.args.get('title', '')\n if not ctid:\n raise JsonOutputException('need ctid')\n schools = School.get_schools_by_ctid(ctid, title)\n return {\n 'code': 0,\n 'data': schools\n }\n\n@api_blueprint.route('/sms')\ndef send_msg():\n form = SmsForm(request.args)\n if not form.validate():\n raise FormValidateError(form.errors)\n phone = form.phone.data\n sms = SmsServer()\n success, code = sms.generate_code(phone)\n if not success:\n return {\n 'code': 15,\n 'msg': '请求太频繁,请稍后重试'\n }\n return sms.send_code(code, phone)\n\n\n@api_blueprint.route('/uploads', methods=['POST'])\n@api_login_required\ndef upload_attachment():\n file = request.files.get('file')\n thumb = bool(request.args.get('thumb', False))\n if not file:\n raise JsonOutputException('请选择要上传的文件')\n file_type = request.form.get('type', '')\n data = upload(file, thumb)\n if data.get('status') is True:\n attachment = Attachment(\n name=data.get('original', ''),\n url=data.get('url', ''),\n user_id=g.user.id,\n file_type=file_type)\n attachment.save()\n #attachment.id\n return {\n 'code': 0,\n 'data': [attachment.url]\n }\n raise JsonOutputException('上传失败')\n\n@api_blueprint.route('/cropper', methods=['POST'])\n@api_login_required\ndef cropper_image():\n file_url = request.json.get('file_url')\n box = request.json.get('box')\n if not file_url:\n raise JsonOutputException('请传入图片')\n if not box or len(box) != 5:\n raise JsonOutputException('切割参数错误')\n app_path = current_app.config['APP_PATH']\n file_path = '{}{}'.format(app_path, file_url)\n if not os.path.isfile(file_path):\n raise JsonOutputException('图片不存在')\n origin_image = Image.open(file_path)\n degree= -box[-1]\n box = box[0:4]\n dest_image = origin_image.rotate(degree, expand=1).crop(box)\n return image_save(dest_image)\n\n#上传试卷\n@api_blueprint.route('/paper/upload', methods=['POST'])\n@api_login_required\n@permission_required('UPLOAD_PERMISSION')\ndef paper_upload():\n data = MultiDict(mapping=request.json)\n form = PaperUploadForm(data)\n if not form.validate():\n raise FormValidateError(form.errors)\n # todo 插入数据库\n attachments = request.json.get('attachments', [])\n exam = Exam(name=form.name.data, section=form.section.data, subject=form.subject.data, paper_types=form.paper_types.data, \\\n province_id=form.province_id.data, city_id=form.city_id.data, area_id=form.area_id.data,\\\n school_id=form.school_id.data,\n exam_date=form.exam_date.data,\n year=form.year.data,\n grade=form.grade.data,\n is_fast=form.is_fast.data,\n state=0,\n attachments=attachments,\n upload_user=g.user.id,\n order=PAPER_TYPE_ORDER[form.paper_types.data])\n result = exam.save()\n ExamLog.log(exam.id, g.user.id, EXAM_STATUS['未审核'], 'UPLOAD')\n if result.id is not None:\n return render_api({})\n raise JsonOutputException('添加失败')\n\n#试卷列表\n@api_blueprint.route('/paper/upload', methods=['GET'])\n@api_login_required\n@permission_required('UPLOAD_PERMISSION')\ndef get_exams():\n data = Exam.get_exams(g.user.id)\n return render_api(data)\n\n#试卷校对列表\n@api_blueprint.route('/paper/check/list', methods=['GET'])\n@api_login_required\ndef get_check_exams():\n data = Exam.list_all_exams()\n return render_api(data)\n\n#试卷详情\n@api_blueprint.route('/paper/preview/')\n# @api_login_required\ndef get_paper_preview(paper_id):\n paper = Exam.query.get_or_404(paper_id)\n query = Question.query.\\\n filter_by(exam_id=paper.id).\\\n order_by(Question.quest_no)\n questions = [q.to_dict() for q in query.all()]\n data = paper.to_dict()\n data['questions'] = questions\n return render_api(data)\n\n#试卷明细查看\n@api_blueprint.route('/paper/upload/', methods=['GET'])\n@api_login_required\n@permission_required('UPLOAD_PERMISSION')\ndef get_exam(id):\n data = Exam.get_exam(id)\n if data is not None:\n return {\n 'code': 0,\n 'data': data\n }\n else:\n raise JsonOutputException('没有数据')\n\n#试卷更新\n@api_blueprint.route('/paper/upload/', methods=['PUT'])\n@api_login_required\n@permission_required('UPLOAD_PERMISSION')\ndef update_exam(id):\n data = MultiDict(mapping=request.json)\n form = PaperUploadForm(data)\n if not form.validate():\n raise FormValidateError(form.errors)\n attachments = request.json.get('attachments', [])\n exam = Exam.query.get(int(id))\n exam.name = form.name.data\n exam.section = form.section.data\n exam.subject = form.subject.data\n exam.paper_types = form.paper_types.data\n exam.province_id = form.province_id.data\n exam.city_id = form.city_id.data\n exam.area_id = form.area_id.data\n exam.school_id = form.school_id.data\n exam.year = form.year.data\n exam.grade = form.grade.data\n exam.attachments = attachments\n exam.state = EXAM_STATUS['未审核']\n exam.save()\n return render_api({})\n\n#试卷删除\n@api_blueprint.route('/paper/upload/', methods=['DELETE'])\n@api_login_required\n@permission_required('UPLOAD_PERMISSION')\ndef delexam(id):\n\n exam = Exam.query.get(int(id))\n if exam is None or exam.state > EXAM_STATUS['未审核']:\n raise JsonOutputException('删除失败')\n else:\n exam.state = EXAM_STATUS['已删除']\n exam.save()\n return {\n 'code': 0,\n 'data': ''\n }\n\n#获取用户个人信息\n@api_blueprint.route('/user/info', methods=['GET'])\n@api_login_required\ndef user_info():\n data = g.user.to_dict()\n return render_api(data)\n\n#更新用户信息\n@api_blueprint.route('/user/info', methods=['PUT'])\n@api_login_required\ndef user_info_update():\n password = request.json.get('password')\n repassword = request.json.get('rePassword')\n valid_code = request.json.get('validCode')\n phone = request.json.get('phone')\n email = request.json.get('email')\n province_id = request.json.get('province_id', 0)\n city_id = request.json.get('city_id', 0)\n area_id = request.json.get('area_id', 0)\n school_id = request.json.get('school_id', 0)\n grade_id = request.json.get('grade_id', 0)\n\n user = User.query.filter_by(phone=phone).first()\n if user is not None and user.id != g.user.id:\n raise JsonOutputException('该手机号已被使用')\n if not valid_code:\n raise JsonOutputException('请输入验证码')\n sms = SmsServer()\n # 验证码验证\n if not sms.check_code(valid_code, phone):\n raise JsonOutputException('验证码错误')\n if password:\n if password != repassword:\n raise JsonOutputException('两次输入密码不一致')\n g.user.password = password\n g.user.phone = phone\n g.user.email = email\n g.user.province_id = province_id\n g.user.city_id = city_id\n g.user.area_id = area_id\n g.user.school_id = school_id\n g.user.grade_id = grade_id\n g.user.save()\n return render_api({})\n\n# 积分记录\n@api_blueprint.route('/user/score')\n@api_login_required\ndef user_score():\n data = pagination(g.user.scores)\n return render_api(data)\n\n@api_blueprint.route('/user/works')\n@api_login_required\ndef user_work():\n data = g.user.get_admin_summary()\n return render_api(data)\n \n# 消息记录\n@api_blueprint.route('/user/message')\n@api_login_required\ndef user_message():\n data = pagination(g.user.messages)\n message_ids = [str(item['id']) for item in data['items']]\n Message.set_is_read(message_ids)\n return render_api(data)\n\n\n@api_blueprint.route('/paper/search', methods=['GET'])\ndef q_search():\n import http.client, urllib.parse\n import json\n httpClient = None\n pageIndex = request.args.get(\"pageIndex\",\"1\")\n q = request.args.get(\"q\",\"\")\n subject_id = request.args.get('subject_id', '')\n qtype_id = request.args.get('qtype_id', '')\n pageIndex = request.args.get('pageIndex', 0)\n\n size = 15\n _from = int(pageIndex) * size\n\n connection = http.client.HTTPConnection('search.i3ke.com', 80, timeout=10)\n headers = {'Content-type': 'application/json'}\n param = {\"subject_id\": subject_id, \"qtype_id\": qtype_id, \"mlt\": {\"fields\": \"qtxt\", \"like\": \"%\"+q}, \"allFields\": [\"qtxt\"], \"highlightedFields\": [\"qtxt\"],\n \"from\": _from, \"size\": size, \"sort\": {\"_score\": \"desc\"}}\n params = json.dumps(param)\n\n connection.request('POST', '/sq-apps/api/_search', params, headers)\n\n response = connection.getresponse()\n jsonStr = response.read().decode()\n jsonResult = json.loads(jsonStr)\n if jsonResult['code'] != 0:\n raise JsonOutputException('参数错误')\n\n res = {\n 'items': jsonResult['datas'],\n 'pageIndex': pageIndex,\n 'pageSize': jsonResult['size'],\n 'totalCount': jsonResult['total'],\n 'totalPage': math.ceil(jsonResult['total']/jsonResult['size'])\n }\n return render_api(res)\n\n@api_blueprint.route('/word')\ndef render_word():\n from flask.helpers import send_file\n return send_file('/Users/chenke/dev/python/information/app/static/uploads/20170208/1486552110.3344362016.docx', mimetype=\"application/msword\", as_attachment=True)\n\n@api_blueprint.route('/login/', methods=['POST'])\ndef login():\n if g.user is not None and g.user.is_authenticated():\n return g.user.to_dict()\n user_name = request.json.get('user_name')\n password = request.json.get('password')\n user = User.query.filter_by(name=user_name).first()\n if user is None:\n user = User.query.filter_by(phone=user_name).first()\n if user is None:\n raise JsonOutputException('用户不存在')\n if user.verify_password(password):\n login_user(user)\n return user.to_dict()\n raise JsonOutputException('密码错误')\n\n@api_blueprint.route('/paper/attachment/upload/')\n@login_required\n@permission_required('FAST_PERMISSION')\ndef upload_paper_attachment(id):\n paper = Exam.query.get_or_404(id)\n if paper.is_fast != 1:\n raise JsonOutputException('操作失败')\n success = paper.push_attachments()\n if success:\n paper.is_fast = 2\n paper.save()\n return render_api({})\n raise JsonOutputException('上传失败')\n\n@api_blueprint.route(\"/logout/\")\n@api_login_required\ndef logout():\n logout_user()\n return {}\n\n@api_blueprint.route('/is_login')\n@api_login_required\ndef is_login():\n return {\n \"code\": 200\n }\n\n@api_blueprint.route('/register/', methods=['POST'])\ndef api_register():\n phone = request.json.get('phone')\n valid_code = str(request.json.get('valid_code', ''))\n visit_code = str(request.json.get('visit_code', ''))\n if not phone or not valid_code or not visit_code:\n raise JsonOutputException('参数错误')\n if len(phone) != 11:\n raise JsonOutputException('手机号格式错误') \n user = User.query.filter_by(phone=phone).first()\n if user is not None:\n raise JsonOutputException('该手机号已经注册过')\n sms = SmsServer()\n if not sms.check_code(valid_code, phone):\n raise JsonOutputException('验证码错误')\n if not (len(visit_code)==4 and sum([int(i) for i in visit_code])==16):\n raise JsonOutputException('邀请码错误')\n session['phone'] = phone\n return render_api({})\n\n@api_blueprint.route('/register/info/', methods=['POST'])\ndef api_register_info():\n if not session.get('phone'):\n raise JsonOutputException('请从注册页进入')\n data = MultiDict(mapping=request.json)\n form = RegisterInfoForm(data)\n if form.validate():\n user = User.query.filter_by(phone=form.phone.data).first()\n if user is not None:\n raise JsonOutputException('该手机号已经注册过')\n user = User.query.filter_by(name=form.user_name.data).first()\n if user is not None:\n raise JsonOutputException('该用户名已被使用')\n user = None\n if session.get('openid'):\n user = User.query.filter_by(openid=session.get('openid')).first()\n if user:\n user.name = form.user_name.data\n user.phone = form.phone.data\n email=form.email.data,\n user.password=form.password.data\n user.school_id=form.school_id.data\n user.city_id=form.city_id.data\n user.grade_id=form.grade_id.data\n user.province_id=form.province_id.data\n user.area_id=form.area_id.data\n else:\n user = User(name=form.user_name.data,\n phone=form.phone.data,\n email=form.email.data,\n password=form.password.data,\n school_id=form.school_id.data,\n city_id=form.city_id.data,\n grade_id=form.grade_id.data,\n province_id=form.province_id.data,\n area_id=form.area_id.data)\n user.save()\n login_user(user)\n return render_api({})\n raise FormValidateError(form.errors)\n ","repo_name":"Syaoran0223/information","sub_path":"app/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"32629765960","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport logging\nimport xml.etree.ElementTree as ET\nfrom typing import Any, Dict, List, Optional, Tuple, Union\nfrom urllib.error import URLError\n\nimport dask.array as da\nimport numpy as np\nimport xarray as xr\nfrom bioio_base import constants, dimensions, exceptions, io, reader, transforms, types\nfrom dask import delayed\nfrom fsspec.implementations.local import LocalFileSystem\nfrom fsspec.spec import AbstractFileSystem\nfrom ome_types import OME, from_xml\nfrom pydantic import ValidationError\nfrom tifffile.tifffile import TiffFile, TiffFileError, TiffTags, imread\nfrom xmlschema import XMLSchemaValidationError\nfrom xmlschema.exceptions import XMLSchemaValueError\n\nfrom .utils import (\n clean_ome_xml_for_known_issues,\n get_coords_from_ome,\n get_dims_from_ome,\n physical_pixel_sizes,\n)\n\n###############################################################################\n\nlog = logging.getLogger(__name__)\n\n###############################################################################\n\n\nclass Reader(reader.Reader):\n \"\"\"\n Wraps the tifffile and ome-types APIs to provide the same BioIO Reader Plugin\n for volumetric OME-TIFF images.\n Parameters\n ----------\n image: types.PathLike\n Path to image file to construct Reader for.\n chunk_dims: List[str]\n Which dimensions to create chunks for.\n Default: DEFAULT_CHUNK_DIMS\n Note: Dimensions.SpatialY, Dimensions.SpatialX, and DimensionNames.Samples,\n will always be added to the list if not present during dask array\n construction.\n clean_metadata: bool\n Should the OME XML metadata found in the file be cleaned for known\n AICSImageIO 3.x and earlier created errors.\n Default: True (Clean the metadata for known errors)\n fs_kwargs: Dict[str, Any]\n Any specific keyword arguments to pass down to the fsspec created filesystem.\n Default: {}\n Notes\n -----\n If the OME metadata in your file isn't OME schema compilant or does not validate\n this will fail to read your file and raise an exception.\n If the OME metadata in your file doesn't use the latest OME schema (2016-06),\n this reader will make a request to the referenced remote OME schema to validate.\n \"\"\"\n\n _xarray_dask_data: Optional[\"xr.DataArray\"] = None\n _xarray_data: Optional[\"xr.DataArray\"] = None\n _mosaic_xarray_dask_data: Optional[\"xr.DataArray\"] = None\n _mosaic_xarray_data: Optional[\"xr.DataArray\"] = None\n _dims: Optional[dimensions.Dimensions] = None\n _metadata: Optional[Any] = None\n _scenes: Optional[Tuple[str, ...]] = None\n _current_scene_index: int = 0\n # Do not provide default value because\n # they may not need to be used by your reader (i.e. input param is an array)\n _fs: \"AbstractFileSystem\"\n _path: str\n\n @staticmethod\n def _get_ome(ome_xml: str, clean_metadata: bool = True) -> OME:\n # To clean or not to clean, that is the question\n if clean_metadata:\n ome_xml = clean_ome_xml_for_known_issues(ome_xml)\n\n return from_xml(ome_xml, parser=\"lxml\")\n\n @staticmethod\n def _is_supported_image(\n fs: AbstractFileSystem, path: str, clean_metadata: bool = True, **kwargs: Any\n ) -> bool:\n try:\n with fs.open(path) as open_resource:\n with TiffFile(open_resource) as tiff:\n # Get first page description (aka the description tag in general)\n # after Tifffile version 2023.3.15 mmstack images read all scenes\n # into tiff.pages[0]\n xml = tiff.pages[0].description\n ome = Reader._get_ome(xml, clean_metadata)\n\n # Handle no images in metadata\n # this commonly means it is a \"BinaryData\" OME file\n # i.e. a non-main OME-TIFF from MicroManager or similar\n # in this case, because it's not the main file we want to just role\n # back to TiffReader\n if ome.binary_only:\n return False\n\n return True\n\n # tifffile exceptions\n except (TiffFileError, TypeError):\n return False\n\n # xml parse errors\n except ET.ParseError as e:\n log.debug(f\"Failed to parse XML for the provided file. Error: {e}\")\n return False\n\n # invalid OME XMl\n except (XMLSchemaValueError, XMLSchemaValidationError, ValidationError) as e:\n log.debug(f\"OME XML validation failed. Error: {e}\")\n return False\n\n # cant connect to external schema resource (no internet conection)\n except URLError as e:\n log.debug(\n f\"Could not validate OME XML against referenced schema \"\n f\"(no internet connection). \"\n f\"Error: {e}\"\n )\n return False\n\n except Exception as e:\n log.debug(f\"Unhandled exception: {e}\")\n return False\n\n @staticmethod\n def _guess_ome_dim_order(tiff: TiffFile, ome: OME, scene_index: int) -> List[str]:\n \"\"\"\n Guess the dimension order based on OME metadata and actual TIFF data.\n Parameters\n -------\n tiff: TiffFile\n A constructed TIFF object to retrieve data from.\n ome: OME\n A constructed OME object to retrieve data from.\n scene_index: int\n The current operating scene index to pull metadata from.\n Returns\n -------\n dims: List[str]\n Educated guess of the dimension order for the file\n \"\"\"\n dims_from_ome = get_dims_from_ome(ome, scene_index)\n\n # Assumes the dimensions coming from here are align semantically\n # with the dimensions specified in this package. Possible T dimension\n # is not equivalent to T dimension here. However, any dimensions\n # not also found in OME will be omitted.\n dims_from_tiff_axes = list(tiff.series[scene_index].axes)\n\n # Adjust the guess of what the dimensions are based on the combined\n # information from the tiff axes and the OME metadata.\n # Necessary since while OME metadata should be source of truth, it\n # does not provide enough data to guess which dimension is Samples\n # for RGB files\n dims = [dim for dim in dims_from_ome if dim not in dims_from_tiff_axes]\n dims += [dim for dim in dims_from_tiff_axes if dim in dims_from_ome]\n return dims\n\n def __init__(\n self,\n image: types.PathLike,\n chunk_dims: Union[str, List[str]] = dimensions.DEFAULT_CHUNK_DIMS,\n clean_metadata: bool = True,\n fs_kwargs: Dict[str, Any] = {},\n **kwargs: Any,\n ):\n # Expand details of provided image\n self._fs, self._path = io.pathlike_to_fs(\n image,\n enforce_exists=True,\n fs_kwargs=fs_kwargs,\n )\n\n # Store params\n if isinstance(chunk_dims, str):\n chunk_dims = list(chunk_dims)\n\n self.chunk_dims = chunk_dims\n self.clean_metadata = clean_metadata\n\n # Enforce valid image\n if not self._is_supported_image(self._fs, self._path, clean_metadata):\n raise exceptions.UnsupportedFileFormatError(\n self.__class__.__name__, self._path\n )\n\n # Get ome-types object and warn of other behaviors\n with self._fs.open(self._path) as open_resource:\n with TiffFile(open_resource, is_mmstack=False) as tiff:\n # Get and store OME\n self._ome = self._get_ome(\n tiff.pages[0].description, self.clean_metadata\n )\n\n # Get and store scenes\n self._scenes: Tuple[str, ...] = tuple(\n image_meta.id for image_meta in self._ome.images\n )\n\n # Log a warning stating that if this is a MM OME-TIFF, don't read\n # many series\n if tiff.is_micromanager and not isinstance(self._fs, LocalFileSystem):\n log.warning(\n \"**Remote reading** (S3, GCS, HTTPS, etc.) of multi-image \"\n \"(or scene) OME-TIFFs created by MicroManager has limited \"\n \"support with the scene API. \"\n \"It is recommended to use independent AICSImage or Reader \"\n \"objects for each remote file instead of the `set_scene` API. \"\n \"Track progress on support here: \"\n \"https://github.com/AllenCellModeling/aicsimageio/issues/196\"\n )\n\n @property\n def scenes(self) -> Optional[Tuple[str, ...]]:\n return self._scenes\n\n @staticmethod\n def _expand_dims_to_match_ome(\n image_data: types.ArrayLike,\n ome: OME,\n dims: List[str],\n scene_index: int,\n ) -> types.ArrayLike:\n # Expand image_data for empty dimensions\n ome_shape = []\n\n # need to correct channel count if this is a RGB image\n n_samples = ome.images[scene_index].pixels.channels[0].samples_per_pixel\n has_multiple_samples = n_samples is not None and n_samples > 1\n for d in dims:\n # SizeC can represent RGB (Samples) data rather\n # than channel data, whether or not this is the case depends\n # on what the SamplesPerPixel are for the channel\n if d == \"C\" and has_multiple_samples:\n count = len(ome.images[scene_index].pixels.channels)\n elif d == \"S\" and has_multiple_samples:\n count = n_samples\n else:\n count = getattr(ome.images[scene_index].pixels, f\"size_{d.lower()}\")\n ome_shape.append(count)\n\n # The file may not have all the data but OME requires certain dimensions\n # expand to fill\n expand_dim_ops: List[Optional[slice]] = []\n for d_size in ome_shape:\n # Add empty dimension where OME requires dimension but no data exists\n if d_size == 1:\n expand_dim_ops.append(None)\n # Add full slice where data exists\n else:\n expand_dim_ops.append(slice(None, None, None))\n\n # Apply operators to dask array\n return image_data[tuple(expand_dim_ops)]\n\n def _general_data_array_constructor(\n self,\n image_data: types.ArrayLike,\n dims: List[str],\n coords: Dict[str, Union[List[Any], types.ArrayLike]],\n tiff_tags: TiffTags,\n ) -> xr.DataArray:\n # Expand the image data to match the OME empty dimensions\n image_data = self._expand_dims_to_match_ome(\n image_data=image_data,\n ome=self._ome,\n dims=dims,\n scene_index=self.current_scene_index,\n )\n\n # Always order array\n if dimensions.DimensionNames.Samples in dims:\n out_order = dimensions.DEFAULT_DIMENSION_ORDER_WITH_SAMPLES\n else:\n out_order = dimensions.DEFAULT_DIMENSION_ORDER\n\n # Transform into order\n image_data = transforms.reshape_data(\n image_data,\n \"\".join(dims),\n out_order,\n )\n\n # Reset dims after transform\n dims = [d for d in out_order]\n\n return xr.DataArray(\n image_data,\n dims=dims,\n coords=coords,\n attrs={\n constants.METADATA_UNPROCESSED: tiff_tags,\n constants.METADATA_PROCESSED: self._ome,\n },\n )\n\n def _read_delayed(self) -> xr.DataArray:\n \"\"\"\n Construct the delayed xarray DataArray object for the image.\n Returns\n -------\n image: xr.DataArray\n The fully constructed and fully delayed image as a DataArray object.\n Metadata is attached in some cases as coords, dims, and attrs contains\n unprocessed tags and processed OME object.\n Raises\n ------\n exceptions.UnsupportedFileFormatError\n The file could not be read or is not supported.\n \"\"\"\n with self._fs.open(self._path) as open_resource:\n with TiffFile(open_resource, is_mmstack=False) as tiff:\n # Get unprocessed metadata from tags\n tiff_tags = self._get_tiff_tags(tiff)\n\n # Unpack coords from OME\n coords = get_coords_from_ome(\n ome=self._ome,\n scene_index=self.current_scene_index,\n )\n\n # Guess the dim order based on metadata and actual tiff data\n dims = Reader._guess_ome_dim_order(\n tiff, self._ome, self.current_scene_index\n )\n\n # Grab the tifffile axes to use for dask array construction\n # If any of the non-\"standard\" dims are present\n # they will be filtered out during later reshape data calls\n strictly_read_dims = list(tiff.series[self.current_scene_index].axes)\n\n # Create the delayed dask array\n image_data = self._create_dask_array(tiff, strictly_read_dims)\n\n return self._general_data_array_constructor(\n image_data,\n dims,\n coords,\n tiff_tags,\n )\n\n def _read_immediate(self) -> xr.DataArray:\n \"\"\"\n Construct the in-memory xarray DataArray object for the image.\n Returns\n -------\n image: xr.DataArray\n The fully constructed and fully read into memory image as a DataArray\n object. Metadata is attached in some cases as coords, dims, and attrs\n contains unprocessed tags and processed OME object.\n Raises\n ------\n exceptions.UnsupportedFileFormatError\n The file could not be read or is not supported.\n \"\"\"\n with self._fs.open(self._path) as open_resource:\n with TiffFile(open_resource, is_mmstack=False) as tiff:\n # Get unprocessed metadata from tags\n tiff_tags = self._get_tiff_tags(tiff)\n\n # Unpack coords from OME\n coords = get_coords_from_ome(\n ome=self._ome,\n scene_index=self.current_scene_index,\n )\n\n # Guess the dim order based on metadata and actual tiff data\n dims = Reader._guess_ome_dim_order(\n tiff, self._ome, self.current_scene_index\n )\n\n # Read image into memory\n image_data = tiff.series[self.current_scene_index].asarray()\n\n return self._general_data_array_constructor(\n image_data,\n dims,\n coords,\n tiff_tags,\n )\n\n @property\n def ome_metadata(self) -> OME:\n return self.metadata\n\n @property\n def physical_pixel_sizes(self) -> types.PhysicalPixelSizes:\n \"\"\"\n Returns\n -------\n sizes: PhysicalPixelSizes\n Using available metadata, the floats representing physical pixel sizes for\n dimensions Z, Y, and X.\n Notes\n -----\n We currently do not handle unit attachment to these values. Please see the file\n metadata for unit information.\n \"\"\"\n return physical_pixel_sizes(self.metadata, self.current_scene_index)\n\n def _get_tiff_tags(self, tiff: TiffFile, process: bool = True) -> TiffTags:\n unprocessed_tags = tiff.series[self.current_scene_index].pages[0].tags\n if not process:\n return unprocessed_tags\n\n # Create dict of tag and value\n tags: Dict[int, str] = {}\n for code, tag in unprocessed_tags.items():\n tags[code] = tag.value\n\n return tags\n\n def _create_dask_array(\n self, tiff: TiffFile, selected_scene_dims_list: List[str]\n ) -> da.Array:\n \"\"\"\n Creates a delayed dask array for the file.\n Parameters\n ----------\n tiff: TiffFile\n An open TiffFile for processing.\n selected_scene_dims_list: List[str]\n The dimensions to use for constructing the array with.\n Required for managing chunked vs non-chunked dimensions.\n Returns\n -------\n image_data: da.Array\n The fully constructed and fully delayed image as a Dask Array object.\n \"\"\"\n # Always add the plane dimensions if not present already\n for dim in dimensions.REQUIRED_CHUNK_DIMS:\n if dim not in self.chunk_dims:\n self.chunk_dims.append(dim)\n\n # Safety measure / \"feature\"\n self.chunk_dims = [d.upper() for d in self.chunk_dims]\n\n # Construct delayed dask array\n selected_scene = tiff.series[self.current_scene_index]\n selected_scene_dims = \"\".join(selected_scene_dims_list)\n\n # Raise invalid dims error\n if len(selected_scene.shape) != len(selected_scene_dims):\n raise exceptions.ConflictingArgumentsError(\n f\"Dimension string provided does not match the \"\n f\"number of dimensions found for this scene. \"\n f\"This scene shape: {selected_scene.shape}, \"\n f\"Provided dims string: {selected_scene_dims}\"\n )\n\n # Constuct the chunk and non-chunk shapes one dim at a time\n # We also collect the chunk and non-chunk dimension order so that\n # we can swap the dimensions after we block out the array\n non_chunk_dim_order = []\n non_chunk_shape = []\n chunk_dim_order = []\n chunk_shape = []\n for dim, size in zip(selected_scene_dims, selected_scene.shape):\n if dim in self.chunk_dims:\n chunk_dim_order.append(dim)\n chunk_shape.append(size)\n else:\n non_chunk_dim_order.append(dim)\n non_chunk_shape.append(size)\n\n # Fill out the rest of the blocked shape with dimension sizes of 1 to\n # match the length of the sample chunk\n # When dask.block happens it fills the dimensions from inner-most to\n # outer-most with the chunks as long as the dimension is size 1\n blocked_dim_order = non_chunk_dim_order + chunk_dim_order\n blocked_shape = tuple(non_chunk_shape) + ((1,) * len(chunk_shape))\n\n # Construct the transpose indices that will be used to\n # transpose the array prior to pulling the chunk dims\n match_map = {dim: selected_scene_dims.find(dim) for dim in selected_scene_dims}\n transposer = []\n for dim in blocked_dim_order:\n transposer.append(match_map[dim])\n\n # Make ndarray for lazy arrays to fill\n lazy_arrays: np.ndarray = np.ndarray(blocked_shape, dtype=object)\n for np_index, _ in np.ndenumerate(lazy_arrays):\n # All dimensions get their normal index except for chunk dims\n # which get filled with \"full\" slices\n indices_with_slices = np_index[: len(non_chunk_shape)] + (\n (slice(None, None, None),) * len(chunk_shape)\n )\n\n # Fill the numpy array with the delayed arrays\n lazy_arrays[np_index] = da.from_delayed(\n delayed(Reader._get_image_data)(\n fs=self._fs,\n path=self._path,\n scene=self.current_scene_index,\n retrieve_indices=indices_with_slices,\n transpose_indices=transposer,\n ),\n shape=chunk_shape,\n dtype=selected_scene.dtype,\n )\n\n # Convert the numpy array of lazy readers into a dask array\n image_data = da.block(lazy_arrays.tolist())\n\n # Because we have set certain dimensions to be chunked and others not\n # we will need to transpose back to original dimension ordering\n # Example, if the original dimension ordering was \"TZYX\" and we\n # chunked by \"T\", \"Y\", and \"X\"\n # we created an array with dimensions ordering \"ZTYX\"\n transpose_indices = []\n for i, d in enumerate(selected_scene_dims):\n new_index = blocked_dim_order.index(d)\n if new_index != i:\n transpose_indices.append(new_index)\n else:\n transpose_indices.append(i)\n\n # Transpose back to normal\n image_data = da.transpose(image_data, tuple(transpose_indices))\n\n return image_data\n\n @staticmethod\n def _get_image_data(\n fs: AbstractFileSystem,\n path: str,\n scene: int,\n retrieve_indices: Tuple[Union[int, slice]],\n transpose_indices: List[int],\n ) -> np.ndarray:\n \"\"\"\n Open a file for reading, construct a Zarr store, select data, and compute to\n numpy.\n Parameters\n ----------\n fs: AbstractFileSystem\n The file system to use for reading.\n path: str\n The path to file to read.\n scene: int\n The scene index to pull the chunk from.\n retrieve_indices: Tuple[Union[int, slice]]\n The image indices to retrieve.\n transpose_indices: List[int]\n The indices to transpose to prior to requesting data.\n Returns\n -------\n chunk: np.ndarray\n The image chunk as a numpy array.\n \"\"\"\n with fs.open(path) as open_resource:\n with imread(\n open_resource,\n aszarr=True,\n series=scene,\n level=0,\n chunkmode=\"page\",\n is_mmstack=False,\n ) as store:\n arr = da.from_zarr(store)\n arr = arr.transpose(transpose_indices)\n\n # By setting the compute call to always use a \"synchronous\" scheduler,\n # it informs Dask not to look for an existing scheduler / client\n # and instead simply read the data using the current thread / process.\n # In doing so, we shouldn't run into any worker data transfer and\n # handoff _during_ a read.\n return arr[retrieve_indices].compute(scheduler=\"synchronous\")\n","repo_name":"bioio-devs/bioio-ome-tiff","sub_path":"bioio_ome_tiff/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":22442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"40394009739","text":"'''\nCalculate the percentage of variance of expression when different \nnumber of genes are grouped together in each subgroup\nwritten by Wenli Wu, last edited Sep 19, 2017\nany questions related to the code, please email:wenliwu2018@u.northwestern.edu\n'''\n\nimport numpy as np\nimport os,sys,glob\nimport pandas as pd\nimport macrogenomics\n\n#### Set up data files ##############\ncpmc = macrogenomics.cp_mc()\nn_quantile_0 = np.exp(np.arange(1.5,7,0.2)).astype(int) # list of number of quantiles\nr_s = [] # percentage of variance\nC0,ld=cpmc.Ld_D()\n\n#### Calculate the percentage of variance under each quantile ####\nfor n_quantile in n_quantile_0:\n Se = np.zeros((4,n_quantile))\n initial = np.zeros((4,n_quantile))\n for m in range(4):\n percentile_norm,Se[m,:] = cpmc.se_experiment(n_quantile,m)\n exp_g=np.exp(percentile_norm[1:])*cpmc.initial_aver\n y_fit=cpmc.se_g(exp_g) # model predicted result using g_function\n y_data = Se.mean(axis=0)[1:]*cpmc.D_fit/(C0*ld)\n r_s.append(cpmc.r_sq(y_data,y_fit))\nr_s = np.array(r_s)\nn_group = np.array(2445/n_quantile_0)\np = np.vstack((n_group,r_s)).transpose()\nindex = n_group<350\nnp.savetxt('percentOfVariance.csv',p[index,:],fmt='%3.2f,%.9f')\n\n ","repo_name":"lauren-wu/macrogenomics","sub_path":"percentVariance.py","file_name":"percentVariance.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"72722067692","text":"from ...MQEstimator.mq_algorithm import MQAlgorithm\nfrom ...MQEstimator.mq_problem import MQProblem\nfrom ...MQEstimator.series.hilbert import HilbertSeries\nfrom ...MQEstimator.series.nmonomial import NMonomialSeries\nfrom ...MQEstimator.mq_helper import nmonomials_up_to_degree\nfrom ...base_algorithm import optimal_parameter\nfrom math import log2\nfrom sage.all import Integer\nfrom sage.rings.all import QQ\nfrom sage.rings.infinity import Infinity\nfrom sage.rings.power_series_ring import PowerSeriesRing\nfrom sage.functions.other import binomial\n\n\nclass Crossbred(MQAlgorithm):\n r\"\"\"\n Construct an instance of crossbred estimator\n\n The Crossbred is an algorithm to solve the MQ problem [JV18]_. This algorithm consists of two steps, named the\n preprocessing step and the linearization step. In the preprocessing step, we find a set $S$ of degree-$D$\n polynomials in the ideal generated by the initial set of polynomials. Every specialization of the first $n-k$\n variables of the polynomials in $S$ results in a set $S'$ of degree-$d$ polynomials in $k$ variables. Finally, in\n the linearization step, a solution to $S'$ is found by direct linearization.\n\n .. NOTE::\n\n Our complexity estimates are a generalization over any field of size `q` of the complexity formulas given in\n [Dua20]_, which are given either for `q=2` or generic fields.\n\n INPUT:\n\n - ``problem`` -- MQProblem object including all necessary parameters\n - ``h`` -- external hybridization parameter (default: 0)\n - ``w`` -- linear algebra constant (2 <= w <= 3) (default: 2)\n - ``max_D`` -- upper bound to the parameter D (default: 20)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n - ``complexity_type`` -- complexity type to consider (0: estimate, 1: tilde O complexity, default: 0)\n\n EXAMPLES::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5))\n sage: E\n Crossbred estimator for the MQ problem with 10 variables and 12 polynomials\n \"\"\"\n\n def __init__(self, problem: MQProblem, **kwargs):\n q = problem.order_of_the_field()\n if not isinstance(q, (int, Integer)):\n raise TypeError(\"q must be an integer\")\n\n super(Crossbred, self).__init__(problem, **kwargs)\n self._name = \"Crossbred\"\n self._max_D = kwargs.get('max_D', min(\n 30, min(problem.nvariables(), problem.npolynomials())))\n if not isinstance(self._max_D, (int, Integer)):\n raise TypeError(\"max_D must be an integer\")\n\n n = self.nvariables_reduced()\n self.set_parameter_ranges('k', 1, n)\n self.set_parameter_ranges('D', 2, self._max_D)\n self.set_parameter_ranges('d', 1, n)\n\n @optimal_parameter\n def k(self):\n \"\"\"\n Return the optimal `k`, i.e. no. of variables in the resulting system\n\n EXAMPLES::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5))\n sage: E.k()\n 7\n \"\"\"\n return self._get_optimal_parameter('k')\n\n @optimal_parameter\n def D(self):\n \"\"\"\n Return the optimal `D`, i.e. degree of the initial Macaulay matrix\n\n EXAMPLES::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), max_D = 10)\n sage: E.D()\n 5\n \"\"\"\n return self._get_optimal_parameter('D')\n\n @optimal_parameter\n def d(self):\n \"\"\"\n Return the optimal `d`, i.e. degree resulting Macaulay matrix\n\n EXAMPLES::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), max_D = 10)\n sage: E.d()\n 1\n \"\"\"\n return self._get_optimal_parameter('d')\n\n @property\n def max_D(self):\n \"\"\"\n Return the upper bound of the degree of the initial Macaulay matrix\n\n EXAMPLES::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5))\n sage: E.max_D\n 10\n \"\"\"\n return self._max_D\n\n @max_D.setter\n def max_D(self, value: int):\n \"\"\"\n Set new upper bound of the degree of the initial Macaulay matrix\n\n INPUT:\n\n - ``value`` -- integer to be set as the upper bound of the parameter `D`\n \"\"\"\n self.reset()\n min_D = self._parameter_ranges['D']['min']\n self._max_D = value\n self.set_parameter_ranges('D', min_D, value)\n\n def _ncols_in_preprocessing_step(self, k: int, D: int, d: int):\n \"\"\"\n Return the number of columns involve in the preprocessing step\n\n INPUT:\n\n - ``k`` -- no. variables in the resulting system\n - ``D`` -- degree of the initial Macaulay matrix\n - ``d`` -- degree resulting Macaulay matrix\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5))\n sage: E._ncols_in_preprocessing_step(4, 6, 3)\n 297\n \"\"\"\n if d >= D:\n raise ValueError(\"d must be smaller than D\")\n\n n, _, q = self.get_reduced_parameters()\n nms0 = NMonomialSeries(n=k, q=q, max_prec=D + 1)\n nms1 = NMonomialSeries(n=n - k, q=q, max_prec=D + 1)\n\n ncols = 0\n for dk in range(d + 1, D):\n ncols += sum([nms0.nmonomials_of_degree(dk) *\n nms1.nmonomials_of_degree(dp) for dp in range(D - dk)])\n\n return ncols\n\n def _ncols_in_linearization_step(self, k: int, d: int):\n \"\"\"\n Return the number of columns involve in the linearization step\n\n INPUT:\n\n - ``k`` -- no. variables in the resulting system\n - ``d`` -- degree resulting Macaulay matrix\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5))\n sage: E._ncols_in_linearization_step(4, 3)\n 35\n \"\"\"\n return nmonomials_up_to_degree(d, k, q=self.problem.order_of_the_field())\n\n def _admissible_parameter_series(self, k: int):\n \"\"\"\n Return the series $S_k$ of admissible parameters\n\n INPUT:\n\n - ``k`` -- no. variables in the resulting system\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), max_D = 2)\n sage: E._admissible_parameter_series(2)\n -1 - 3*x - 3*y - 10*x^2 - 3*x*y + 6*y^2 + O(x, y)^3\n \"\"\"\n n, m, q = self.get_reduced_parameters()\n max_D = self.max_D\n\n R = PowerSeriesRing(QQ, names=['x', 'y'], default_prec=max_D + 1)\n x, y = R.gens()\n\n Hk = HilbertSeries(n=k, degrees=[2] * m, q=q)\n k_y, k_xy = Hk.series(y), Hk.series(x * y)\n\n Hn = HilbertSeries(n=n, degrees=[2] * m, q=q)\n n_x = Hn.series(x)\n\n N = NMonomialSeries(n=n - k, q=q, max_prec=max_D + 1)\n nk_x = N.series_monomials_of_degree()(x)\n\n return (k_xy * nk_x - n_x - k_y) / ((1 - x) * (1 - y))\n\n def _valid_choices(self):\n \"\"\"\n Return a list of admissible parameters `(k, D, d)`\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5))\n sage: [list(x.values()) for x in E._valid_choices()][:5] == [[2, 1, 1], [3, 1, 1], [4, 1, 1], [3, 2, 1], [5, 1, 1]]\n True\n \"\"\"\n\n new_ranges = self._fix_ranges_for_already_set_parameters()\n\n k = 1\n stop = False\n while not stop:\n Sk = self._admissible_parameter_series(k)\n for (monomial, coefficient) in Sk.coefficients().items():\n D, d = monomial.exponents()[0]\n if 0 <= coefficient and d < D and new_ranges['D'][\"min\"] <= D <= new_ranges['D'][\"max\"] and new_ranges['d'][\"min\"] <= d <= new_ranges['d'][\"max\"]:\n yield {'D': D, 'd': d, 'k': k}\n\n k += 1\n if k > new_ranges['k'][\"max\"]:\n stop = True\n\n def _compute_time_complexity(self, parameters: dict):\n \"\"\"\n Return the time complexity of the algorithm for a given set of parameters\n\n INPUT:\n\n - ``parameters`` -- dictionary including the parameters\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), bit_complexities=False)\n sage: E.time_complexity(k=4, D=6, d=4)\n 29.77510134996699\n\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), bit_complexities=False)\n sage: E.time_complexity()\n 19.56992234329735\n \"\"\"\n k = parameters['k']\n D = parameters['D']\n d = parameters['d']\n n, m, q = self.get_reduced_parameters()\n w = self.linear_algebra_constant()\n np = self._ncols_in_preprocessing_step(k=k, D=D, d=d)\n nl = self._ncols_in_linearization_step(k=k, d=d)\n complexity = Infinity\n if np > 1 and log2(np) > 1:\n complexity_wiedemann = 3 * binomial(k + d, d) * binomial(n + 2, 2) * np ** 2\n complexity_gaussian = np ** w\n complexity_prep = min(complexity_gaussian, complexity_wiedemann)\n complexity = log2(complexity_prep + m * q ** (n - k) * nl ** w)\n h = self._h\n return h * log2(q) + complexity\n\n def _compute_memory_complexity(self, parameters: dict):\n \"\"\"\n Return the memory complexity of the algorithm for a given set of parameters\n\n INPUT:\n\n - ``parameters`` -- dictionary including the parameters\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5),bit_complexities=False)\n sage: E.memory_complexity(k=4, D=6, d=4)\n 12.892542816648552\n\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), bit_complexities=False)\n sage: E.memory_complexity()\n 19.38013126659691\n \"\"\"\n k = parameters['k']\n D = parameters['D']\n d = parameters['d']\n ncols_pre_step = self._ncols_in_preprocessing_step(k, D, d)\n ncols_lin_step = self._ncols_in_linearization_step(k, d)\n return log2(ncols_pre_step ** 2 + ncols_lin_step ** 2)\n\n def _compute_tilde_o_time_complexity(self, parameters: dict):\n \"\"\"\n Return the Ō time complexity of the algorithm for a given set of parameters\n\n INPUT:\n\n - ``parameters`` -- dictionary including the parameters\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), complexity_type=1)\n sage: E.time_complexity(k=4, D=6, d=4)\n 26.190185554770082\n\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), complexity_type=1)\n sage: E.time_complexity()\n 19.39681379895914\n \"\"\"\n k = parameters['k']\n D = parameters['D']\n d = parameters['d']\n np = self._ncols_in_preprocessing_step(k=k, D=D, d=d)\n nl = self._ncols_in_linearization_step(k=k, d=d)\n n, _, q = self.get_reduced_parameters()\n w = self.linear_algebra_constant()\n h = self._h\n return h * log2(q) + log2(np ** 2 + q ** (n - k) * nl ** w)\n\n def _compute_tilde_o_memory_complexity(self, parameters: dict):\n \"\"\"\n Return the Ō memory complexity of the algorithm for a given set of parameters\n\n INPUT:\n\n - ``parameters`` -- dictionary including the parameters\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), complexity_type=1)\n sage: E.memory_complexity(k=4, D=6, d=4)\n 12.892542816648552\n \"\"\"\n return self._compute_memory_complexity(parameters)\n\n def _find_optimal_tilde_o_parameters(self):\n \"\"\"\n\n Return the optimal parameters to achieve the optimal Ō time complexity.\n\n TESTS::\n\n sage: from cryptographic_estimators.MQEstimator.MQAlgorithms.crossbred import Crossbred\n sage: from cryptographic_estimators.MQEstimator.mq_problem import MQProblem\n sage: E = Crossbred(MQProblem(n=10, m=12, q=5), complexity_type=1)\n sage: E.optimal_parameters()\n {'D': 5, 'd': 1, 'k': 7}\n \"\"\"\n self._find_optimal_parameters()\n","repo_name":"Crypto-TII/CryptographicEstimators","sub_path":"cryptographic_estimators/MQEstimator/MQAlgorithms/crossbred.py","file_name":"crossbred.py","file_ext":"py","file_size_in_byte":14484,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"55"} +{"seq_id":"10281224384","text":"# BOJ 11279\n# 최대 힙 문제\n# 연산의 개수 N과 이후 x가 N줄에 주어진다. \n# x가 자연수 -> 힙에 추가\n# x가 0 -> 힙의 최대값을 출력하고 그 값을 제거\n\nimport sys\nimport heapq\ninput = sys.stdin.readline\nN = int(input())\ns = []\n\nfor i in range(N) : \n x = int(input())\n if x == 0 : \n print(0) if len(s) == 0 else print(-heapq.heappop(s))\n else : \n heapq.heappush(s, -x)\n \n","repo_name":"Luchs41/BOJ","sub_path":"Unlabeled/11279.py","file_name":"11279.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"55"} +{"seq_id":"34760584844","text":"from structio import Struct, StructIO\r\nimport string as strlib\r\nimport ctypes\r\nimport os\r\nimport sys\r\n\r\n\"\"\"\r\nPackage:\r\ncontains Header and Entry\r\n\r\nHeader:\r\nmajor_version = int #1\r\nminor_version = int #1\r\nmajor_user_version = int #0\r\nminor_user_version = int #0\r\nflags = int #unknown\r\ncreated_date = int #not important\r\nmodified_date = int #not important\r\nindex_major_version = int #7\r\nindex_entry_count = int #number of entries in file\r\nindex_location = int #location of file index\r\nindex_size = int #length of index\r\nhole_index_entry_count = int #number oh holes in file\r\nhole_index_location = int #location of hole index\r\nhole_index_size = int #length of hole index\r\nindex_minor_version = int #index version, between 0 and 2\r\nremainder = 32 bytes #what remains of the header\r\n\r\nEntry:\r\ntype: int #entry type\r\ngroup: int #entry group\r\ninstance: int #entry instance\r\nresource: int #entry resource, only exists if the index minor version is 2\r\ncompressed = bool #indicates whether the entry is compressed or not\r\nname = str #name of entry\r\n\"\"\"\r\n\r\nif sys.platform != 'win32':\r\n raise Exception('The dbpf library currently only works in Windows')\r\n \r\nif sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 2):\r\n raise Exception('The dbpf library requires Python 3.2 or higher')\r\n \r\nnamed_types = {0x42434F4E, 0x42484156, 0x4E524546, 0x4F424A44, 0x53545223, 0x54544142, 0x54544173, 0x424D505F, 0x44475250, 0x534C4F54, 0x53505232}\r\nnamed_rcol_types = {0xFB00791E, 0x4D51F042, 0xE519C933, 0xAC4F8687, 0x7BA3838C, 0xC9C81B9B, 0xC9C81BA3, 0xC9C81BA9, 0xC9C81BAD, 0xED534136, 0xFC6EB1F7, 0x49596978, 0x1C4A276C}\r\nnamed_cpf_types = {0x2C1FD8A1, 0x0C1FE246, 0xEBCF3E27}\r\nlua_types = {0x9012468A, 0x9012468B}\r\n\r\nis_64bit = sys.maxsize > 2 ** 32\r\n\r\nif is_64bit:\r\n clib = ctypes.cdll.LoadLibrary(os.path.join(os.path.dirname(__file__),'dbpf64.dll'))\r\nelse:\r\n clib = ctypes.cdll.LoadLibrary(os.path.join(os.path.dirname(__file__),'dbpf32.dll'))\r\n \r\nclib.decompress.restype = ctypes.c_bool\r\n\r\nclass RepeatKeyError(Exception): pass\r\nclass CompressionError(Exception): pass\r\nclass NotSupportedError(Exception): pass\r\n\r\nclass ExtendedStruct(Struct):\r\n def _get_7bstr_len(self, b, start=0):\r\n str_len, int_len = self.unpack_7bint(b, start)\r\n return int_len + str_len\r\n \r\n def unpack_7bstr(self, b, start=0):\r\n str_len, int_len = self.unpack_7bint(b, start)\r\n string = self.unpack_str(b[(start + int_len):(start + int_len + str_len)])\r\n return string, int_len + str_len\r\n \r\n def pack_7bstr(self, string):\r\n b = self.pack_str(string)\r\n return self.pack_7bint(len(b)) + b\r\n \r\nclass MemoryIO(StructIO):\r\n def __init__(self, b=b'', endian='little'):\r\n super().__init__(b)\r\n self._struct = ExtendedStruct(endian)\r\n \r\n def copy(self):\r\n return MemoryIO(self.getvalue(), self._struct.endian)\r\n \r\n def _get_7bstr_len(self):\r\n return self._struct._get_7bstr_len(self.getvalue(), start=self.tell())\r\n \r\n def read_7bstr(self):\r\n value, length = self._struct.unpack_7bstr(self.getvalue(), start=self.tell())\r\n self.seek(length, 1)\r\n return value\r\n \r\n def write_7bstr(self, string):\r\n return self.write(self._struct.pack_7bstr(string))\r\n \r\n def append_7bstr(self, string):\r\n return self.append(self._struct.pack_7bstr(string))\r\n \r\n def overwrite_7bstr(self, string):\r\n start = self.tell()\r\n return self.overwrite(start, start + self._get_7bstr_len(), self._struct.pack_7bstr(string))\r\n \r\n def skip_7bstr(self):\r\n return self.seek(self._get_7bstr_len(), 1)\r\n \r\n def delete_7bstr(self):\r\n return self.delete(self._get_7bstr_len())\r\n \r\nclass Header:\r\n def __init__(self):\r\n self.major_version = 1\r\n self.minor_version = 1\r\n self.major_user_version = 0\r\n self.minor_user_version = 0\r\n self.flags = 0\r\n self.created_date = 0\r\n self.modified_date = 0\r\n self.index_major_version = 7\r\n self.index_entry_count = 0\r\n self.index_location = 0\r\n self.index_size = 0\r\n self.hole_index_entry_count = 0\r\n self.hole_index_location = 0\r\n self.hole_index_size = 0\r\n self.index_minor_version = 2\r\n self.remainder = b'\\x00' * 32\r\n \r\n def __str__(self):\r\n display = 'Header:\\n'\r\n display += 'major version: {}\\n'.format(self.major_version)\r\n display += 'minor version: {}\\n'.format(self.minor_version)\r\n display += 'major user version: {}\\n'.format(self.major_user_version)\r\n display += 'minor user version: {}\\n'.format(self.minor_user_version)\r\n display += 'flags: {}\\n'.format(self.flags)\r\n display += 'created date: {}\\n'.format(self.created_date)\r\n display += 'modified date: {}\\n'.format(self.modified_date)\r\n display += 'index major version: {}\\n'.format(self.index_major_version)\r\n display += 'index entry count: {}\\n'.format(self.index_entry_count)\r\n display += 'index location: {}\\n'.format(self.index_location)\r\n display += 'index size: {}\\n'.format(self.index_size)\r\n display += 'hole index entry count: {}\\n'.format(self.hole_index_entry_count)\r\n display += 'hole index location: {}\\n'.format(self.hole_index_location)\r\n display += 'hole index size: {}\\n'.format(self.hole_index_size)\r\n display += 'index minor version: {}'.format(self.index_minor_version)\r\n \r\n return display\r\n \r\n def copy(self):\r\n header_copy = Header()\r\n for key, value in vars(self).items():\r\n setattr(header_copy, key, value)\r\n \r\n return header_copy\r\n \r\nclass Entry(MemoryIO):\r\n def __init__(self, type_id, group_id, instance_id, resource_id=None, name='', content=b'', compressed=False):\r\n super().__init__(content)\r\n self.type = type_id\r\n self.group = group_id\r\n self.instance = instance_id\r\n \r\n if resource_id is not None:\r\n self.resource = resource_id\r\n \r\n self.name = name\r\n self.compressed = compressed\r\n \r\n def __contains__(self, key):\r\n return hasattr(self, key)\r\n \r\n def __str__(self):\r\n if self.name == '':\r\n name_display = ''\r\n else:\r\n name_display = '{}\\n'.format(self.name)\r\n \r\n if hasattr(self, 'resource'):\r\n return name_display + 'Type: 0x{:08X}, Group: 0x{:08X}, Instance: 0x{:08X}, Resource: 0x{:08X}'.format(self.type, self.group, self.instance, self.resource)\r\n else:\r\n return name_display + 'Type: 0x{:08X}, Group: 0x{:08X}, Instance: 0x{:08X}'.format(self.type, self.group, self.instance)\r\n \r\n def copy(self):\r\n if hasattr(self, 'resource'):\r\n return Entry(self.type, self.group, self.instance, self.resource, self.name, self.buffer, self.compressed)\r\n else:\r\n return Entry(self.type, self.group, self.instance, name=self.name, content=self.buffer, compressed=self.compressed)\r\n \r\n #using C++ library from moreawesomethanyou \r\n def compress(self):\r\n if self.compressed or self.type == 0xE86B1EEF:\r\n return self\r\n \r\n else:\r\n src = self.buffer\r\n src_len = len(src)\r\n dst = ctypes.create_string_buffer(src_len)\r\n \r\n dst_len = clib.try_compress(src, src_len, dst)\r\n \r\n if dst_len > 0:\r\n self.write_all(dst.raw[:dst_len])\r\n self.compressed = True\r\n \r\n return self\r\n \r\n #using C++ library from moreawesomethanyou \r\n def decompress(self):\r\n if self.compressed:\r\n src = self.buffer\r\n compressed_size = len(src)\r\n \r\n self.seek(6)\r\n uncompressed_size = self.read_int(3, 'big')\r\n \r\n dst = ctypes.create_string_buffer(uncompressed_size)\r\n success = clib.decompress(src, compressed_size, dst, uncompressed_size, False)\r\n \r\n self.seek(0)\r\n \r\n if success:\r\n self.write_all(dst.raw)\r\n self.compressed = False\r\n \r\n return self\r\n \r\n else:\r\n raise CompressionError('Could not decompress the file')\r\n \r\n else:\r\n return self\r\n \r\n def read_name(self):\r\n try:\r\n if self.type in named_types:\r\n self.name = partial_decompress(self, 64).read().rstrip(b'x\\00').decode('utf-8', errors='ignore')\r\n \r\n elif self.type in named_rcol_types:\r\n file = partial_decompress(self)\r\n location = file.find(b'cSGResource')\r\n \r\n if location != -1:\r\n file.seek(location + 19)\r\n self.name = file.read_7bstr()\r\n \r\n elif self.type in named_cpf_types:\r\n file = partial_decompress(self)\r\n location = file.find(b'\\x18\\xea\\x8b\\x0b\\x04\\x00\\x00\\x00name')\r\n \r\n if location != -1:\r\n file.seek(location + 12)\r\n self.name = file.read_pstr(4)\r\n \r\n elif self.type in lua_types:\r\n file = partial_decompress(self)\r\n file.seek(4)\r\n self.name = file.read_pstr(4) \r\n \r\n else:\r\n self.name = ''\r\n \r\n except:\r\n self.name = ''\r\n \r\n return self.name\r\n \r\n def write_name(self, name):\r\n was_compressed = self.compressed\r\n \r\n if self.type in named_types:\r\n if self.compressed:\r\n self.decompress()\r\n \r\n if len(name) <= 64:\r\n self.seek(0)\r\n self.write_str(name)\r\n self.write_int(0, 64 - len(name))\r\n self.seek(0)\r\n self.name = name\r\n else:\r\n raise ValueError(\"file name '{}' is longer than expected\".format(name))\r\n \r\n elif self.type in named_rcol_types:\r\n if self.compressed:\r\n self.decompress()\r\n \r\n location = self.find(b'cSGResource')\r\n \r\n if location != -1:\r\n self.seek(location + 19)\r\n self.overwrite_7bstr(name)\r\n self.seek(0)\r\n self.name = name\r\n \r\n elif self.type in named_cpf_types:\r\n if self.compressed:\r\n self.decompress()\r\n \r\n location = self.find(b'\\x18\\xea\\x8b\\x0b\\x04\\x00\\x00\\x00name')\r\n \r\n if location != -1:\r\n self.seek(location + 12)\r\n self.overwrite_pstr(name, 4)\r\n self.seek(0)\r\n self.name = name\r\n \r\n elif self.type in lua_types:\r\n if self.compressed:\r\n self.decompress()\r\n \r\n self.seek(4)\r\n self.overwrite_pstr(name, 4)\r\n self.seek(0)\r\n self.name = name\r\n \r\n else:\r\n raise NotSupportedError('naming format 0x{:08X} is not supported'.format(self.type))\r\n \r\n if was_compressed:\r\n self.compress()\r\n \r\nclass Package:\r\n def __init__(self):\r\n self.header = Header()\r\n self.entries = []\r\n \r\n def copy(self):\r\n package_copy = Package()\r\n package_copy.header = self.header.copy() \r\n package_copy.entries = [entry.copy() for entry in self.entries]\r\n \r\n return package_copy\r\n \r\n def unpack(file_path, decompress=False):\r\n with open(file_path, 'rb') as fs:\r\n file = MemoryIO(fs.read())\r\n \r\n self = Package()\r\n \r\n self.file_name = os.path.basename(file_path)\r\n \r\n #read header\r\n file.seek(4)\r\n self.header.major_version = file.read_int(4)\r\n self.header.minor_version = file.read_int(4)\r\n self.header.major_user_version = file.read_int(4)\r\n self.header.minor_user_version = file.read_int(4)\r\n self.header.flags = file.read_int(4)\r\n self.header.created_date = file.read_int(4)\r\n self.header.modified_date = file.read_int(4)\r\n self.header.index_major_version = file.read_int(4)\r\n self.header.index_entry_count = file.read_int(4)\r\n self.header.index_location = file.read_int(4)\r\n self.header.index_size = file.read_int(4)\r\n self.header.hole_index_entry_count = file.read_int(4)\r\n self.header.hole_index_location = file.read_int(4)\r\n self.header.hole_index_size = file.read_int(4)\r\n self.header.index_minor_version = file.read_int(4)\r\n self.header.remainder = file.read(32)\r\n \r\n #read index\r\n self.entries = []\r\n \r\n file.seek(self.header.index_location)\r\n for i in range(self.header.index_entry_count):\r\n #using int.from_bytes and file.read instead of read_int to avoid the function call overhead\r\n type_id = int.from_bytes(file.read(4), 'little')\r\n group_id = int.from_bytes(file.read(4), 'little')\r\n instance_id = int.from_bytes(file.read(4), 'little')\r\n \r\n if self.header.index_minor_version == 2:\r\n resource_id = int.from_bytes(file.read(4), 'little')\r\n else:\r\n resource_id = None\r\n \r\n location = int.from_bytes(file.read(4), 'little')\r\n size = int.from_bytes(file.read(4), 'little')\r\n \r\n position = file.tell()\r\n file.seek(location)\r\n content = file.read(size)\r\n file.seek(position)\r\n \r\n self.entries.append(Entry(type_id, group_id, instance_id, resource_id, content=content))\r\n \r\n #make list of index entries\r\n #the entries list is for checking if the file is compressed later, just for increasing execution speed\r\n #so that we don't need to spend time converting the CLST entries to integers\r\n index_entries = []\r\n \r\n if self.header.index_minor_version == 2:\r\n size = 16\r\n else:\r\n size = 12\r\n \r\n file.seek(self.header.index_location)\r\n for i in range(self.header.index_entry_count):\r\n index_entries.append(file.read(size))\r\n file.seek(8, 1)\r\n \r\n #read CLST\r\n #using a set for speed\r\n clst_entries = set()\r\n results = search(self.entries, 0xE86B1EEF, get_first=True)\r\n \r\n if len(results) > 0:\r\n clst = results[0]\r\n file_size = len(clst)\r\n \r\n if self.header.index_minor_version == 2:\r\n entry_size = 20\r\n tgi_size = 16\r\n else:\r\n entry_size = 16\r\n tgi_size = 12\r\n \r\n clst.seek(0)\r\n for i in range(file_size // entry_size):\r\n entry = clst.read(tgi_size)\r\n \r\n if entry not in clst_entries:\r\n clst_entries.add(entry)\r\n else:\r\n raise RepeatKeyError('Two entries with matching type, group, and instance found')\r\n \r\n clst.seek(4, 1)\r\n \r\n clst.seek(0)\r\n \r\n #check if compressed\r\n for index_entry, entry in zip(index_entries, self.entries):\r\n entry.compressed = False\r\n \r\n if index_entry in clst_entries:\r\n entry.seek(4)\r\n \r\n #entries can be in the CLST file even if they're not compressed\r\n #so a second check for compression would be good\r\n if entry.read(2) == b'\\x10\\xfb':\r\n entry.compressed = True\r\n \r\n entry.seek(0)\r\n \r\n #decompress entries\r\n if decompress:\r\n for entry in self.entries:\r\n try:\r\n entry.decompress()\r\n except CompressionError:\r\n pass\r\n \r\n #read file names\r\n for entry in self.entries:\r\n try:\r\n #print(entry)\r\n entry.read_name()\r\n except CompressionError:\r\n pass\r\n \r\n return self \r\n \r\n def pack_into(self, file_path, compress=False):\r\n #compress entries\r\n if compress:\r\n compressed_entries = {} #for checking if the a compressed entry with the same TGI already exists\r\n for i, entry in enumerate(self.entries):\r\n if 'resource' in entry:\r\n tgi = (entry.type, entry.group, entry.instance, entry.resource)\r\n else:\r\n tgi = (entry.type, entry.group, entry.instance)\r\n \r\n if tgi in compressed_entries:\r\n i = compressed_entries[tgi]\r\n self.entries[i].decompress()\r\n \r\n else:\r\n entry.compress()\r\n compressed_entries[tgi] = i\r\n \r\n #only check for repeated compressed entries\r\n else:\r\n compressed_entries = set()\r\n for entry in self.entries:\r\n if entry.compressed:\r\n if 'resource' in entry:\r\n tgi = (entry.type, entry.group, entry.instance, entry.resource)\r\n else:\r\n tgi = (entry.type, entry.group, entry.instance)\r\n \r\n if tgi in compressed_entries:\r\n raise RepeatKeyError('Repeat compressed entry found in package')\r\n else:\r\n compressed_entries.add(tgi)\r\n \r\n #use index minor version 2?\r\n if self.header.index_minor_version != 2:\r\n for entry in self.entries:\r\n if 'resource' in entry:\r\n self.header.index_minor_version = 2\r\n break\r\n \r\n file = MemoryIO()\r\n \r\n #write header\r\n file.write(b'DBPF')\r\n file.write_int(self.header.major_version, 4)\r\n file.write_int(self.header.minor_version, 4)\r\n file.write_int(self.header.major_user_version, 4)\r\n file.write_int(self.header.minor_user_version, 4)\r\n file.write_int(self.header.flags, 4)\r\n file.write_int(self.header.created_date, 4)\r\n file.write_int(self.header.modified_date, 4)\r\n file.write_int(self.header.index_major_version, 4)\r\n file.write_int(self.header.index_entry_count, 4)\r\n file.write_int(self.header.index_location, 4)\r\n file.write_int(self.header.index_size, 4)\r\n file.write_int(self.header.hole_index_entry_count, 4)\r\n file.write_int(self.header.hole_index_location, 4)\r\n file.write_int(self.header.hole_index_size, 4)\r\n file.write_int(self.header.index_minor_version, 4)\r\n \r\n file.write(self.header.remainder)\r\n \r\n #make CLST\r\n results = search(self.entries, 0xE86B1EEF, get_first=True)\r\n compressed_files = [entry for entry in self.entries if entry.compressed]\r\n \r\n if len(results) > 0:\r\n self.entries.remove(results[0])\r\n \r\n if len(compressed_files) > 0:\r\n clst = Entry(0xE86B1EEF, 0xE86B1EEF, 0x286B1F03)\r\n \r\n if self.header.index_minor_version == 2:\r\n clst.resource = 0x00000000\r\n \r\n for compressed_file in compressed_files:\r\n clst.write_int(compressed_file.type, 4)\r\n clst.write_int(compressed_file.group, 4)\r\n clst.write_int(compressed_file.instance, 4)\r\n \r\n if self.header.index_minor_version == 2:\r\n if 'resource' in compressed_file:\r\n clst.write_int(compressed_file.resource, 4)\r\n else:\r\n clst.write_int(0, 4)\r\n \r\n #uncompressed size is written in big endian?\r\n compressed_file.seek(6)\r\n uncompressed_size = compressed_file.read_int(3, 'big')\r\n clst.write_int(uncompressed_size, 4)\r\n \r\n self.entries.append(clst)\r\n \r\n #write entries\r\n for entry in self.entries:\r\n #get new location to put in the index later\r\n entry.location = file.tell()\r\n \r\n file.write(entry.buffer)\r\n \r\n #get new file size to put in the index later\r\n entry.size = file.tell() - entry.location\r\n \r\n #write index\r\n index_start = file.tell()\r\n \r\n for entry in self.entries:\r\n file.write_int(entry.type, 4)\r\n file.write_int(entry.group, 4)\r\n file.write_int(entry.instance, 4)\r\n \r\n if self.header.index_minor_version == 2:\r\n if 'resource' in entry:\r\n file.write_int(entry.resource, 4)\r\n else:\r\n file.write_int(0, 4)\r\n \r\n file.write_int(entry.location, 4)\r\n file.write_int(entry.size, 4)\r\n \r\n index_end = file.tell()\r\n \r\n file.truncate()\r\n \r\n #update header index info, clear holes index info\r\n file.seek(36)\r\n file.write_int(len(self.entries), 4) #index entry count\r\n file.write_int(index_start, 4) #index location\r\n file.write_int(index_end - index_start, 4) #index size\r\n file.write_int(0, 12) #hole index entries\r\n \r\n with open(file_path, 'wb') as fs:\r\n fs.write(file.buffer)\r\n \r\ndef partial_decompress(entry, size=-1):\r\n if entry.compressed:\r\n src = entry.buffer \r\n compressed_size = len(src)\r\n \r\n entry.seek(6)\r\n uncompressed_size = entry.read_int(3, 'big')\r\n \r\n if size == -1 or size >= uncompressed_size:\r\n size = uncompressed_size\r\n \r\n dst = ctypes.create_string_buffer(size)\r\n success = clib.decompress(src, compressed_size, dst, size, True)\r\n \r\n entry.seek(0)\r\n \r\n if success:\r\n return MemoryIO(dst.raw)\r\n else:\r\n raise CompressionError('Could not decompress the file')\r\n \r\n else:\r\n entry.seek(0)\r\n buffer = entry.read(size)\r\n entry.seek(0)\r\n return MemoryIO(buffer)\r\n \r\ndef search(entries, type_id=-1, group_id=-1, instance_id=-1, resource_id=-1, entry_name='', get_first=False):\r\n entry_name = entry_name.lower()\r\n \r\n results = []\r\n for entry in entries:\r\n if type_id != -1 and type_id != entry.type:\r\n continue\r\n \r\n if group_id != -1 and group_id != entry.group:\r\n continue\r\n \r\n if instance_id != -1 and instance_id != entry.instance:\r\n continue\r\n \r\n if resource_id != -1 and resource_id != entry.resource:\r\n continue\r\n \r\n if entry_name != '' and entry_name not in entry.name.lower():\r\n continue\r\n \r\n results.append(entry)\r\n \r\n if get_first:\r\n return results\r\n \r\n return results\r\n \r\n#for faster searching\r\ndef build_index(entries):\r\n index = {}\r\n index['types'] = {}\r\n index['groups'] = {}\r\n index['instances'] = {}\r\n index['resources'] = {}\r\n index['names index'] = {}\r\n index['names list'] = []\r\n \r\n for c in strlib.printable:\r\n index['names index'][c] = set()\r\n \r\n for i, entry in enumerate(entries):\r\n if entry.type not in index['types']:\r\n index['types'][entry.type] = set()\r\n \r\n index['types'][entry.type].add(i)\r\n \r\n if entry.group not in index['groups']:\r\n index['groups'][entry.group] = set()\r\n \r\n index['groups'][entry.group].add(i)\r\n \r\n if entry.instance not in index['instances']:\r\n index['instances'][entry.instance] = set()\r\n \r\n index['instances'][entry.instance].add(i)\r\n \r\n if 'resource' in entry:\r\n if entry.resource not in index['resources']:\r\n index['resources'][entry.resource] = set()\r\n \r\n index['resources'][entry.resource].add(i)\r\n \r\n name = entry.name.lower()\r\n index['names list'].append(name)\r\n \r\n if name != '':\r\n for char in name:\r\n index['names index'][char].add(i)\r\n \r\n return index\r\n \r\n#faster search\r\ndef index_search(entries, index, type_id=-1, group_id=-1, instance_id=-1, resource_id=-1, entry_name=''):\r\n results = []\r\n keys = ['types', 'groups', 'instances', 'resources']\r\n values = [type_id, group_id, instance_id, resource_id]\r\n \r\n for key, value in zip(keys, values):\r\n if value == -1:\r\n pass\r\n elif value in index[key]:\r\n results.append(index[key][value])\r\n else:\r\n return []\r\n \r\n if len(results) > 0:\r\n results = set.intersection(*results)\r\n \r\n if entry_name != '':\r\n entry_name = entry_name.lower()\r\n names_set = (index['names index'][char] for char in entry_name)\r\n \r\n if len(results) > 0:\r\n results = results.intersection(*names_set)\r\n else:\r\n results = set.intersection(*names_set)\r\n \r\n if len(entry_name) > 1:\r\n results = [i for i in results if entry_name in index['names list'][i]]\r\n \r\n return [entries[i] for i in results]\r\n","repo_name":"lingeringwillx/Sims-2-Clean-Up-Script","sub_path":"dbpf/dbpf.py","file_name":"dbpf.py","file_ext":"py","file_size_in_byte":26467,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"17858973210","text":"import numpy as np\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import neighbors\nimport random\n\n\ndef getTextWords(path):\n f = open(path, \"r\", encoding = 'UTF-8')\n txt = f.read().lower()\n \n for ch in '!\"#$%&()*+,-./;:<=>?@[\\\\]^‘_{|}~':\n txt = txt.replace(ch, \" \")\n words = txt.split()\n \n f.close()\n return words\n\n\ndef getLineWords(path):\n f = open(path, \"r\", encoding = 'UTF-8')\n line = f.readline().lower()\n lines = []\n \n while line:\n for ch in '!\"#$%&()*+,-./;:<=>?@[\\\\]^‘_{|}~':\n line = line.replace(ch, \" \")\n words = line.split()\n \n lines.append(words)\n line = f.readline().lower()\n \n f.close()\n return lines\n\n\ndef getFreq(words):\n counts = {}\n \n for word in words:\n counts[word] = counts.get(word, 0) + 1\n \n items = list(counts.items())\n items.sort(key=lambda x:x[1], reverse=True)\n return items,counts\n\n\ndef printFreq(items):\n for i in range(len(items)):\n word, count = items[i]\n print(\"{0:<10}{1:>5}\".format(word, count))\n\n\ndef readDataSet(path,numQues,numWords,lineFreq):\n f = open(path, \"r\")\n print(\"Dataset:\",numQues)\n dataSet = np.zeros([numQues,numWords],int)\n hwLabels = np.zeros([numQues,10])\n \n for i in range(numQues):\n digit = int(f.readline())\n hwLabels[i][digit] = 1.0\n dataSet[i] = lineFreq[i]\n \n f.close()\n return dataSet,hwLabels\n\n\ndef readDataSet_K(path,numQues,numWords,lineFreq):\n f = open(path, \"r\")\n hwLabels = np.zeros([numQues])\n \n for i in range(numQues): \n hwLabels[i] = f.readline()\n \n f.close()\n return hwLabels\n\n\ndef getLineFreq(lines,counts):\n lineFreq = []\n \n for linewords in lines:\n lineItems,l_counts = getFreq(linewords)\n \n for word in counts.keys():\n for l_word in l_counts.keys():\n if l_word == word and l_word not in ['i','to','can','my','the','what','how','if']:\n counts[word] = 1\n break\n counts[word] = 0\n \n lineFreq.append(list(counts.values()))\n \n #print(lineFreq)\n return lineFreq\n\n\ndef getQuesFreq(counts,op):\n ques = op.lower()\n for ch in '!\"#$%&()*+,-./;:<=>?@[\\\\]^‘_{|}~':\n ques = ques.replace(ch, \" \")\n quesWords = ques.split()\n #print(quesWords)\n quesLines = []\n quesLines.append(quesWords)\n quesFreq = getLineFreq(quesLines,counts)\n #print(quesFreq)\n return quesFreq\n\n\ndef getQuesClass(res,Res):\n QC = {}\n QC[0] = 'date'\n QC[1] = 'exchange, study abroad'\n QC[2] = 'course transfer, 2+2 & 4+0'\n QC[3] = 'health care'\n QC[4] = 'contact list'\n QC[5] = 'campus return'\n QC[6] = 'extenuating circumstances'\n QC[7] = 'feedback'\n QC[8] = 'teaching schedule'\n QC[9] = 'academic'\n for vec in res:\n for num in range(len(vec)):\n if vec[num] == 1:\n return QC[num],\"(Classifier: MLP Neural Network)\"\n return QC[Res],\"(Classifier: KNN Algorithm)\"\n\n\ndef getQues(path,lineNum):\n f = open(path, \"r\", encoding = 'UTF-8')\n for i in range(lineNum+1):\n line = f.readline()\n f.close()\n return line\n\n\ndef main():\n words = getTextWords(\"hamlet_all.txt\")\n items,counts = getFreq(words)\n #printFreq(items)\n #print(len(items))\n #print(counts)\n lines = getLineWords(\"hamlet_all.txt\")\n lineFreq = getLineFreq(lines,counts)\n \n trainNum = 635\n testTimes = 10\n print(\"Training...\")\n train_dataSet, train_hwLabels = readDataSet(\"labels_all.txt\", len(lines), len(items), lineFreq)\n print(\"Training Sample:\",trainNum,\"\\nTesting Sample:\",len(train_dataSet)-trainNum)\n\n clf = MLPClassifier(hidden_layer_sizes=(50,),\n activation='logistic', solver='adam',\n learning_rate_init = 0.001, max_iter=1000)\n print(clf,'\\n')\n\n knn_hwLabels = readDataSet_K(\"labels_all.txt\", len(lines), len(items), lineFreq)\n knn = neighbors.KNeighborsClassifier(algorithm='kd_tree', n_neighbors=1)\n \n Knn = neighbors.NearestNeighbors(n_neighbors=3)\n Knn.fit(train_dataSet, knn_hwLabels)\n\n for j in range(testTimes):\n trainSample = []\n trainSampleLb = []\n trainSampleLb_K = []\n \n testSample = []\n testSampleLb = []\n testSampleLb_K = []\n\n Sample = random.sample(range(len(train_dataSet)),trainNum)\n\n for i in range(len(Sample)):\n trainSample.append(train_dataSet[Sample[i]])\n trainSampleLb.append(train_hwLabels[Sample[i]])\n trainSampleLb_K.append(knn_hwLabels[Sample[i]])\n\n for i in range(len(train_dataSet)):\n if i not in Sample:\n testSample.append(train_dataSet[i])\n testSampleLb.append(train_hwLabels[i])\n testSampleLb_K.append(knn_hwLabels[i])\n \n clf.fit(trainSample, trainSampleLb)\n knn.fit(trainSample, trainSampleLb_K)\n\n print(\"Random testing:\",j+1,'/',testTimes)\n\n res = clf.predict(testSample)\n #print(res)\n error_num = 0\n num = len(testSample)\n for i in range(num):\n #比较长度为10的数组,返回包含01的数组,0为不同,1为相同\n #若预测结果与真实结果相同,则10个数字全为1,否则不全为1\n if np.sum(res[i] == testSampleLb[i]) < 10: \n error_num += 1 \n print(\"Wrong:\",error_num,\"in\",num,\"\\tAccuracy:\",round(1-error_num/num,6),\"(MLP Neural Network)\")\n\n res = knn.predict(testSample)\n error_num = np.sum(res != testSampleLb_K)\n print(\"Wrong:\",error_num,\"in\",num,\"\\tAccuracy:\",round(1-error_num/num,6),\"(KNN Algorithm)\")\n \n\n print(\"\\nTesting complete. Training with whole dataset...\")\n clf.fit(train_dataSet, train_hwLabels)\n knn.fit(train_dataSet, knn_hwLabels)\n print(\"Training complete.\")\n \n op = input(\"\\nEnter your questions ('n' to quit): \")\n while op != 'n':\n a = getQuesFreq(counts,op)\n res = clf.predict(a)\n Res = knn.predict(a)\n for v in a:\n if sum(v) == 0:\n print(\"Unknown question type\")\n break\n Class,Classifier = getQuesClass(res,int(Res))\n print(\"Question Type:\",Class,Classifier)\n\n dist,neigh = Knn.kneighbors(a)\n #print(dist,neigh)\n for v in dist:\n maxDist = max(v)\n if maxDist >= 2:\n print(\"\\nResults may compromise. Consider more questions:\\n\")\n for v in neigh:\n for qNum in v:\n print('\\t',getQues(\"hamlet_all.txt\",qNum),end='')\n \n op = input(\"\\nEnter your questions ('n' to quit): \")\n\nmain()\n","repo_name":"Stx666Michael/text_classifier","sub_path":"src/NLPC_Validation.py","file_name":"NLPC_Validation.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"10458307208","text":"# This attempts to be (more or less) the simplest possible hello world Alexa skill...\r\n\r\nfrom __future__ import print_function\r\nimport random\r\n\r\nglobal joke_count\r\nglobal secret_count\r\nglobal song_count\r\nglobal food_count\r\nglobal quote_count\r\nglobal game_count\r\njoke_count = 0\r\nsecret_count = 0\r\nsong_count = 0\r\nfood_count = 0\r\nquote_count = 0\r\ngame_count = 0\r\n# We'll start with a couple of globals...\r\nCardTitlePrefix = \"Greeting\"\r\n\r\n# --------------- Helpers that build all of the responses ----------------------\r\n\r\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\r\n \"\"\"\r\n Build a speechlet JSON representation of the title, output text, \r\n reprompt text & end of session\r\n \"\"\"\r\n\r\n return {\r\n 'outputSpeech': {\r\n 'type': 'SSML',\r\n 'ssml': \"\"+output+\"\"\r\n },\r\n\r\n 'card': {\r\n 'type': 'Simple',\r\n 'title': CardTitlePrefix + \" - \" + title,\r\n 'content': output\r\n },\r\n \r\n 'reprompt': {\r\n 'outputSpeech': {\r\n 'type': 'SSML',\r\n 'ssml': \"\"+reprompt_text+\"\"\r\n }\r\n },\r\n 'shouldEndSession': should_end_session\r\n }\r\n\r\n\r\ndef build_response(session_attributes, speechlet_response):\r\n \"\"\"\r\n Build the full response JSON from the speechlet response\r\n \"\"\"\r\n #print(\"**** Session Attributes: \" +str(session_attributes))\r\n return {\r\n 'version': '1.0',\r\n 'sessionAttributes': session_attributes,\r\n 'response': speechlet_response\r\n }\r\n\r\n\r\n# --------------- Functions that control the skill's behavior ------------------\r\n\r\ndef get_welcome_response(): \r\n card_title = \"Hello\"\r\n speech_output = \"Hi Mayank. I welcome your friend? Introduce me.\"\r\n # If the user either does not reply to the welcome message or says something\r\n # that is not understood, they will be prompted again with this text.\r\n reprompt_text = \"I'm sorry - I didn't get your friend name. It's either complicated or your pronunciation is bad.\"\r\n should_end_session = False\r\n session_attributes = { \"speech_output\": speech_output,\r\n \"reprompt_text\": reprompt_text\r\n }\r\n return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))\r\n\r\n\r\ndef handle_session_end_request(session):\r\n card_title = \"Session Ended\"\r\n friend_name = session['attributes']['friend_name']\r\n speech_output = \"It was nice meeting you \" + friend_name + \". But before going let me tell you this, DATE HIM.\"\r\n # Setting this to true ends the session and exits the skill.\r\n should_end_session = True\r\n joke_count = 0\r\n secret_count = 0\r\n song_count = 0\r\n food_count = 0\r\n quote_count = 0\r\n game_count = 0\r\n return build_response({}, build_speechlet_response(\r\n card_title, speech_output, \"Good Bye\", should_end_session))\r\n\r\ndef say_hello_to_girl(friend_name):\r\n \"\"\"\r\n Return a suitable greeting...\r\n \"\"\"\r\n card_title = \"Greeting Message\"\r\n greeting_string = \"Hi \"+friend_name+\"! Welcome to Mayank's adobe. He has told me about you. \"\\\r\n \"You seem to be more beautiful then he has mentioned. \"\\\r\n \"Do you want to listen anything specific about him. \"\\\r\n \"Say Joke or Secret or Songs. You can also say food or quote or game.\"\r\n\r\n should_end_session = False\r\n session_attributes = { \r\n \"speech_output\": greeting_string,\r\n \"friend_name\" : friend_name\r\n }\r\n\r\n return build_response(session_attributes, build_speechlet_response(card_title, greeting_string, \"Ask me to say hello...\", should_end_session))\r\n\r\ndef say_hello_to_boy(friend_name):\r\n \"\"\"\r\n Return a suitable greeting...\r\n \"\"\"\r\n card_title = \"Greeting Message\"\r\n greeting_string = \"Hi \"+friend_name+\"! Welcome to Mayank's adobe. This is unusual to have a guy in mikki's room. Anyway, I welcome you here.\"# Have you brought him anything to eat?\"\r\n should_end_session = True\r\n session_attributes = { \r\n \"speech_output\": greeting_string,\r\n \"friend_name\" : friend_name\r\n \r\n }\r\n\r\n return build_response(session_attributes, build_speechlet_response(card_title, greeting_string, \"Ask me to say hello...\", should_end_session))\r\n\r\ndef fall_back_message():\r\n \"\"\"\r\n Return a suitable greeting...\r\n \"\"\"\r\n card_title = \"Fallback Message\"\r\n fallback_string = \"Sorry. I couldn't understood it. Please say again.\"\r\n should_end_session = False\r\n session_attributes = { \r\n \"speech_output\": fallback_string,\r\n \r\n \r\n }\r\n\r\n return build_response(session_attributes, build_speechlet_response(card_title, fallback_string, \"Ask me to say hello...\", should_end_session))\r\n\r\n\r\ndef joke_story(session):\r\n\r\n global joke_count\r\n friend_name = session['attributes']['friend_name']\r\n secret_count = 0\r\n song_count = 0\r\n food_count = 0\r\n quote_count = 0\r\n game_count = 0\r\n\r\n joke_story = [\r\n \"Mayank dances like chimpanzee. Please don't laugh on him \" + friend_name,\r\n \"Once he was fallen in love with his maths teacher.\",\r\n \"His parents still teases him about his first girl as friend.\",\r\n \"He is very shy guy. Don't take it seriously \"+ friend_name+ \". You know it's a joke.\",\r\n \"He fumbles with words in front of beautiful girls. I am pretty sure it would have happened in front of you too \" + friend_name\r\n ]\r\n card_title = \"Joke Story\"\r\n\r\n if joke_count >= len(joke_story) :\r\n joke_string = \"I don't have more stories right now. But I will make sure to gather more. Good Bye. Nice meeting you \" + friend_name\r\n should_end_session = True\r\n else : \r\n joke_string = joke_story[joke_count] + \". Do you want to hear another one? Just say joke\"\r\n should_end_session = False\r\n joke_count += 1 \r\n \r\n session_attributes = {\r\n \"friend_name\" : friend_name,\r\n \"Joke_Count\" : joke_count,\r\n \"Secret_Count\" : secret_count,\r\n \"Song_Count\" : song_count,\r\n \"Food_Count\" : food_count,\r\n \"Quote_Count\" : quote_count,\r\n \"Game_Count\" : game_count\r\n }\r\n return build_response(session_attributes, build_speechlet_response(card_title, joke_string, \"Please repeat it again..\", should_end_session))\r\n\r\ndef favorite_food(session):\r\n\r\n global food_count\r\n friend_name = session['attributes']['friend_name']\r\n\r\n joke_count = 0\r\n secret_count = 0\r\n song_count = 0\r\n quote_count = 0\r\n game_count = 0\r\n\r\n food_story = [\r\n \"Rice and Rajma. You know what. You can feed him sometime \" + friend_name +\". He is always hungry\",\r\n \"Pizza. He craves for pizza. Just say the word and he will order for you too \" + friend_name,\r\n \"Pasta. He is crazy for pasta. You should ask him to treat you sometime.\",\r\n \"Salad. Sometimes he care for his weight for a change. I think you would love to go and have salad with him \" + friend_name\r\n ]\r\n card_title = \"Favorite Food\"\r\n\r\n #print(\"****food count: \"+ str(food_count))\r\n if food_count >= len(food_story) :\r\n food_string = \"He like these many only. If he likes anything else, i will surely tell you. Good Bye. Nice meeting you \" + friend_name\r\n should_end_session = True\r\n else : \r\n food_string = \"His favorite food is \" + food_story[food_count] + \". Do you want to hear another one? Just say food\"\r\n food_count += 1\r\n should_end_session = False \r\n \r\n session_attributes = {\r\n \r\n \"friend_name\" : friend_name,\r\n \"Joke_Count\" : joke_count,\r\n \"Secret_Count\" : secret_count,\r\n \"Song_Count\" : song_count,\r\n \"Food_Count\" : food_count,\r\n \"Quote_Count\" : quote_count,\r\n \"Game_Count\" : game_count\r\n }\r\n return build_response(session_attributes, build_speechlet_response(card_title, food_string, \"Please repeat it again..\", should_end_session))\r\n\r\ndef secret_story(session):\r\n\r\n global secret_count\r\n friend_name = session['attributes']['friend_name']\r\n\r\n secret_story = [\r\n \"He is still crazy for toy cars. Don't you too find it funny \" + friend_name,\r\n \"Don't ever ask him school, college marks. He won't ever tell you.\",\r\n friend_name + \" his room might be clean but check his closed too. There you will be surprised.\",\r\n \"He is scared of high speed driving because of his accident but am sure to impress you he will definitely drive fast. Go for bike rides with him \" + friend_name\r\n ]\r\n card_title = \"Mayank's Secret\"\r\n\r\n #print(\"****secret count: \"+ str(secret_count))\r\n if secret_count >= len(secret_story) :\r\n secret_string = \"Till now i could find only there. If i find anything else, i will surely tell you. Good Bye. Nice meeting you \" + friend_name\r\n should_end_session = True\r\n else : \r\n secret_string = \"One of his secret is that \" + secret_story[secret_count] + \". Do you want to hear another one? Just say secret\"\r\n secret_count += 1\r\n should_end_session = False \r\n \r\n session_attributes = {\r\n \r\n \"friend_name\" : friend_name,\r\n \"Joke_Count\" : joke_count,\r\n \"Secret_Count\" : secret_count,\r\n \"Song_Count\" : song_count,\r\n \"Food_Count\" : food_count,\r\n \"Quote_Count\" : quote_count,\r\n \"Game_Count\" : game_count\r\n }\r\n return build_response(session_attributes, build_speechlet_response(card_title, secret_string, \"Please repeat it again..\", should_end_session))\r\n\r\n# --------------- Events ------------------\r\n\r\ndef on_session_started(session_started_request, session):\r\n \"\"\" Called when the session starts \"\"\"\r\n\r\n print(\"on_session_started requestId=\" + session_started_request['requestId']\r\n + \", sessionId=\" + session['sessionId'])\r\n\r\n\r\ndef on_launch(launch_request, session):\r\n \"\"\" Called when the user launches the skill without specifying what they want \"\"\"\r\n\r\n #print(\"****on_launch requestId=\" + launch_request['requestId'] +\r\n # \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()\r\n\r\n\r\ndef on_intent(intent_request, session):\r\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n \r\n #print (intent)\r\n try :\r\n intent_name_value = intent['slots']['friend_name']['value']\r\n except :\r\n print(\"**** Can't find name\")\r\n\r\n try: \r\n intent_gender_value = intent['slots']['gender']['value']\r\n # print(\"****intent_gender_value: \" + intent_name_value)\r\n except :\r\n print(\"**** Can't find gender\")\r\n\r\n #friend_name = intent_value\r\n print(\"****session: \" + str(session))\r\n print(\"****Intent found is: \" + str(intent))\r\n print(\"****Intent Name found is: \" + str(intent_name))\r\n #print(\"****intent_gender_value found is: \" + str(intent_gender_value))\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"welcomeIntent\" and (intent_gender_value == \"her\" or intent_gender_value == \"she\"):\r\n return say_hello_to_girl(intent_name_value)\r\n elif intent_name == \"welcomeIntent\" and (intent_gender_value == \"his\" or intent_gender_value == \"he\"):\r\n return say_hello_to_boy(intent_name_value) \r\n elif intent_name == \"jokeIntent\" :\r\n return joke_story(session)\r\n elif intent_name == \"foodIntent\" :\r\n return favorite_food(session)\r\n elif intent_name == \"secretIntent\" :\r\n return secret_story(session)\r\n elif intent_name == \"songIntent\" :\r\n return favorite_song(session)\r\n elif intent_name == \"quoteIntent\" :\r\n return favorite_quote(session)\r\n elif intent_name == \"gameIntent\" :\r\n return favorite_game(session)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request(session)\r\n elif intent_name == \"AMAZON.FallbackIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")\r\n\r\n\r\ndef on_session_ended(session_ended_request, session):\r\n \"\"\" Called when the user ends the session. Is not called when the skill returns should_end_session=true \"\"\"\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n# --------------- Main handler ------------------\r\n\r\ndef lambda_handler(event, context):\r\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\r\n etc.) The JSON body of the request is provided in the event parameter.\r\n \"\"\"\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n\r\n if event['session']['new']:\r\n #print (\"**** Reached\")\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n #print(\"**** Intent coming is : \" + event['request']['type'])\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])","repo_name":"mayankkumaryadav06/alexa-welcome-guest","sub_path":"welcomeFriend.py","file_name":"welcomeFriend.py","file_ext":"py","file_size_in_byte":14216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"32117207921","text":"VERSION = \"v20230402\"\nDESCRIPTION = \"poojtag %s - pure python JTAG over BusPirate-like OCD protocol\" % VERSION\n\n\"\"\" Data flow overview between poojtag.py classes, BusPirate and the JTAG port\n\n BP Instruction TAP OCD BP pySerial BusPirate JTAG port\n-----------------------------------------------------------------------------------------------------------------\n--> command seq send_seq,set_state cmd_shift _sr write [applies TDI,TMS] --> JTAG port\n<-- command dec recv_seq decode_tdo _sr read [reads TDO] <-- JTAG port\n\"\"\"\n\nimport re\nimport sys\nimport time\nimport inspect\nimport argparse\nimport logging\nimport logging\nimport itertools\nfrom struct import pack, unpack\nfrom datetime import datetime\nfrom binascii import hexlify, unhexlify\nfrom functools import reduce\nfrom textwrap import wrap\nfrom logging import debug, info, warning, error\n\nimport serial # pySerial\nfrom bitstring import BitStream, BitArray\n\nLOG = 'poojtag.log'\n\ndef bstobl(bs):\n \"\"\" bit string to bit list \"\"\"\n return list(map(lambda b: int(b), [*bs]))\n\ndef bstobl_rev(bs):\n \"\"\" bit string to bit list \"\"\"\n return bstobl(bs)[::-1]\n\ndef bltobs(bl):\n \"\"\" bit list to bit string \"\"\"\n return ''.join([ \"%s\" % x for x in bl])\n\nclass TAP(object):\n \"\"\" JTAG Test Access Port \"\"\"\n\n # states, values are arbitrary\n STATE_UNKNOWN = -1\n STATE_TEST_LOGIC_RESET = 0\n STATE_RUN_TEST_IDLE = 1\n # DR\n STATE_SELECT_DR_SCAN = 10\n STATE_CAPTURE_DR = 11\n STATE_SHIFT_DR = 12\n STATE_EXIT1_DR = 13\n STATE_PAUSE_DR = 14\n STATE_EXIT2_DR = 15\n STATE_UPDATE_DR = 16\n # IR\n STATE_SELECT_IR_SCAN = 20\n STATE_CAPTURE_IR = 21\n STATE_SHIFT_IR = 22\n STATE_EXIT1_IR = 23\n STATE_PAUSE_IR = 24\n STATE_EXIT2_IR = 25\n STATE_UPDATE_IR = 26\n\n STATES = {\n -1: 'STATE_UNKNOWN',\n 0: 'STATE_TEST_LOGIC_RESET',\n 1: 'STATE_RUN_TEST_IDLE',\n 10: 'STATE_SELECT_DR_SCAN',\n 11: 'STATE_CAPTURE_DR',\n 12: 'STATE_SHIFT_DR',\n 13: 'STATE_EXIT1_DR',\n 14: 'STATE_PAUSE_DR',\n 15: 'STATE_EXIT2_DR',\n 16: 'STATE_UPDATE_DR',\n 20: 'STATE_SELECT_IR_SCAN',\n 21: 'STATE_CAPTURE_IR',\n 22: 'STATE_SHIFT_IR',\n 23: 'STATE_EXIT1_IR',\n 24: 'STATE_PAUSE_IR',\n 25: 'STATE_EXIT2_IR',\n 26: 'STATE_UPDATE_IR',\n }\n\n STATES_CHANGE = {\n STATE_UNKNOWN: { 0: STATE_UNKNOWN, 1: STATE_UNKNOWN },\n STATE_TEST_LOGIC_RESET: { 0: STATE_RUN_TEST_IDLE, 1: STATE_TEST_LOGIC_RESET },\n STATE_RUN_TEST_IDLE: { 0: STATE_RUN_TEST_IDLE, 1: STATE_SELECT_DR_SCAN },\n # DR\n STATE_SELECT_DR_SCAN: { 0: STATE_CAPTURE_DR, 1: STATE_SELECT_IR_SCAN },\n STATE_CAPTURE_DR: { 0: STATE_SHIFT_DR, 1: STATE_EXIT1_DR },\n STATE_SHIFT_DR: { 0: STATE_SHIFT_DR, 1: STATE_EXIT1_DR },\n STATE_EXIT1_DR: { 0: STATE_PAUSE_DR, 1: STATE_UPDATE_DR },\n STATE_PAUSE_DR: { 0: STATE_PAUSE_DR, 1: STATE_EXIT2_DR },\n STATE_EXIT2_DR: { 0: STATE_SHIFT_DR, 1: STATE_UPDATE_DR },\n STATE_UPDATE_DR: { 0: STATE_RUN_TEST_IDLE, 1: STATE_SELECT_DR_SCAN },\n # IR\n STATE_SELECT_IR_SCAN: { 0: STATE_CAPTURE_IR, 1: STATE_TEST_LOGIC_RESET },\n STATE_CAPTURE_IR: { 0: STATE_SHIFT_IR, 1: STATE_EXIT1_IR },\n STATE_SHIFT_IR: { 0: STATE_SHIFT_IR, 1: STATE_EXIT1_IR },\n STATE_EXIT1_IR: { 0: STATE_PAUSE_IR, 1: STATE_UPDATE_IR },\n STATE_PAUSE_IR: { 0: STATE_PAUSE_IR, 1: STATE_EXIT2_IR },\n STATE_EXIT2_IR: { 0: STATE_SHIFT_IR, 1: STATE_UPDATE_IR },\n STATE_UPDATE_IR: { 0: STATE_RUN_TEST_IDLE, 1: STATE_SELECT_DR_SCAN },\n }\n \n\n # instructions\n INST_EXTEST = 0x0\n INST_IDCODE = 0x1\n INST_SAMPLE_PRELOAD = 0x2\n INST_BYPASS = 0xF\n\n # TDO decoding\n SKIP_TDO = 0\n DECODE_TDO = 1\n\n def __init__(self):\n self.state = self.STATE_UNKNOWN\n\n def reset(self):\n if self.state == self.STATE_TEST_LOGIC_RESET:\n return []\n self.state = self.STATE_TEST_LOGIC_RESET\n return [(0, 1, TAP.SKIP_TDO)] * 8 # TDI=0 TMS=1 * 8\n\n def idle(self):\n s = []\n if self.state == self.STATE_TEST_LOGIC_RESET:\n s += [(0, 0, TAP.SKIP_TDO)]\n elif self.state != self.STATE_RUN_TEST_IDLE:\n debug(\"TAP.idle: we are reseting from state %s\" % self.STATES[self.state])\n s += self.reset() + [(0, 0, TAP.SKIP_TDO)]\n self.state = TAP.STATE_RUN_TEST_IDLE\n return s\n\n def set_state(self, state):\n seq = self.idle()\n if state == self.STATE_SHIFT_DR:\n seq += zip([0]*3, [1, 0, 0], [TAP.SKIP_TDO]*3)\n self.state = self.STATE_SHIFT_DR\n elif state == self.STATE_SHIFT_IR:\n seq += zip([0]*4, [1, 1, 0, 0], [TAP.SKIP_TDO]*4)\n self.state = self.STATE_SHIFT_IR\n else:\n raise Exception(\"unsupported state %d\" % state)\n return seq\n\n def shift_ir(self, istr):\n # prepare to shift instruction\n s = self.set_state(TAP.STATE_SHIFT_IR)\n # shift instruction and go back to STATE_RUN_TEST_IDLE:\n # TDI: reverse instruction and add 0,0\n # TMS: [0, 0, 0] to shift 3 LSB bits, [1] to shift MSB and set STATE_EXIT1_DR, and [1, 0] for STATE_RUN_TEST_IDLE\n # decode TDO: 6 & SKIP_TDO\n s += list(zip(bstobl_rev(\"{:04b}\".format(istr))+[0,0], bstobl('000110'), [TAP.SKIP_TDO]*6))\n debug(\"shift_ir: %s\" % s)\n self.state = TAP.STATE_RUN_TEST_IDLE\n return s\n\n def shift_dr(self, count, tdi=0, decode_tdo=False):\n \"\"\" shift Data Register and provide a fixed value for TDI \"\"\"\n return self.shift_dr_tdi([tdi]*count, decode_tdo=decode_tdo)\n\n def shift_dr_int(self, value, size, decode_tdo_int=0):\n \"\"\" shift Data Register for {size} bits of value {value} \"\"\"\n tdi = bstobl_rev('{:0{size}b}'.format(value, size=size))\n decode_tdo = bstobl_rev('{:0{size}b}'.format(decode_tdo_int, size=size))\n return self.shift_dr_tdi(tdi, decode_tdo=decode_tdo)\n\n def shift_dr_int_list(self, intlist, size, decode_tdo_intlist=False):\n s = list()\n if type(decode_tdo_intlist) is not list:\n decode_tdo_intlist = [decode_tdo_intlist] * len(intlist)\n for i, i_dec in zip(intlist, decode_tdo_intlist):\n s += self.shift_dr_int(i, size, decode_tdo_int=i_dec)\n return s\n\n def shift_dr_tdi(self, tdi, decode_tdo=False):\n \"\"\" shift Data Register and provide a TDI sequence \"\"\"\n count = len(tdi)\n if type(decode_tdo) is list:\n if len(decode_tdo) != len(tdi):\n raise Exception(\"invalid decode_tdo len %d vs %d for tdi\" % (len(decode_tdo), len(tdi)))\n else:\n decode_tdo = [1]*count if decode_tdo else [0]*count\n\n s = self.set_state(TAP.STATE_SHIFT_DR)\n s += list(zip(tdi[:-1], [0]*(count-1), decode_tdo[:-1]))\n s += [(tdi[-1], 1, decode_tdo[-1])] # last bit is recorded when exiting to STATE_EXIT1_DR\n s += [(0, 1, TAP.SKIP_TDO)] # STATE_UPDATE_DR\n s += [(0, 0, TAP.SKIP_TDO)] # STATE_RUN_TEST_IDLE\n self.state = TAP.STATE_RUN_TEST_IDLE\n return s\n\n def send_seq(self, seq):\n debug(\"send_seq tdi %s [%d]\" % (bltobs([s[0] for s in seq]), len(seq)))\n debug(\"send_seq tms %s\" % bltobs([s[1] for s in seq]))\n debug(\"send_seq decode_tdo %s\" % bltobs([s[2] for s in seq]))\n self.last_send = seq\n # return only TDI and TDO components\n return list(map(lambda e: e[0:2], seq))\n\n def recv_seq(self, tdo):\n out_tdo = list()\n if len(self.last_send) != len(tdo):\n error(\"len(last_send)=%d != len(tdo)=%d\" % (len(self.last_send), len(tdo)))\n exit(1)\n debug(\"recv_seq decode_tdo %s [%d]\" % (bltobs(list(map(lambda s: str(s[2]), self.last_send))), len(tdo)))\n debug(\"recv_seq tdo %s\" % (bltobs(tdo)))\n for [tdi, tms, decode_tdo], tdo in zip(self.last_send, tdo):\n if decode_tdo:\n out_tdo.append(tdo)\n debug(\"recv_seq out_tdo %s [%d]\" % (bltobs(out_tdo), len(out_tdo)))\n if len(out_tdo) == 0:\n return BitStream()\n return BitStream('0b'+''.join(out_tdo))\n\n @classmethod\n def sim_seq(cls, seq, state):\n \"\"\" simulate the state and register changes when running a TDI,TMS,decode_tdo sequence \"\"\"\n info(\"dump sequence [%d] : %s\" % (len(seq), seq))\n info(\"tdi = %s\" % ([e[0] for e in seq]))\n info(\"tms = %s\" % ([e[1] for e in seq]))\n ir = ''\n dr = ''\n state_unknown_count = 0\n for idx, (tdi, tms, decode_tdo) in enumerate(seq, 1):\n extra = ''\n if state == TAP.STATE_UNKNOWN:\n state_unknown_count += 1\n if state_unknown_count == 8:\n state = TAP.STATE_TEST_LOGIC_RESET\n state_unknown_count = 0\n if state == TAP.STATE_SHIFT_DR:\n dr = str(tdi) + dr\n elif state == TAP.STATE_EXIT1_DR:\n extra = 'dr=%s [%d] 0x%x' % (dr, len(dr), int(dr, 2))\n dr = ''\n elif state == TAP.STATE_SHIFT_IR:\n ir = str(tdi) + ir\n elif state == TAP.STATE_EXIT1_IR:\n extra = 'ir=%s [%d] 0x%x' % (ir, len(ir), int(ir, 2))\n ir = ''\n info(\"%02d/%02d %22s tdi=%s tms=%s decode_tdo=%s %s\" % (idx, len(seq), TAP.STATES[state], tdi, tms, decode_tdo, extra))\n state = TAP.STATES_CHANGE[state][tms]\n info(\"end %22s\" % (TAP.STATES[state]))\n return state\n\nclass TAP_ATmega1248P(TAP):\n \"\"\" JTAG Test Access Port - specifics of Atmel AVR ATmega1284P, from ATmega1284P Datasheet 8059D–AVR–11/09 \"\"\"\n\n # 25.1 Program And Data Memory Lock Bits\n LOCK_BITS = [ (\"LB1\", \"lock Flash/EEPROM read\"), (\"LB2\", \"lock Flash/EEPROM write\"), (\"BLB01\", \"\"), (\"BLB02\", \"\"), (\"BLB11\", \"\"), (\"BLB12\", \"\"), (\"\", \"1\"), (\"\", \"1\") ]\n # 25.2 Fuse Bits\n EXTENDED_FUSE_BYTE = [ (\"BODLEVEL0\", \"\"), (\"BODLEVEL1\", \"\"), (\"BODLEVEL2\", \"\"), (\"\", \"1\"), (\"\", \"1\"), (\"\", \"1\"), (\"\", \"1\"), (\"\", \"1\") ]\n FUSE_HIGH_BYTE = [ (\"BOOTRST\", \"\"), (\"BOOTSZ0\", \"\"), (\"BOOTSZ1\", \"\"), (\"EESAVE\", \"\"), (\"WDTON\", \"\"), (\"SPIEN\", \"\"), (\"JTAGEN\", \"\"), (\"OCDEN\", \"\") ]\n FUSE_LOW_BYTE = [ (\"CKSEL0\", \"\"), (\"CKSEL1\", \"\"), (\"CKSEL2\", \"\"), (\"CKSEL3\", \"\"), (\"SUT0\", \"\"), (\"SUT1\", \"\"), (\"CKOUT\", \"\"), (\"CKDIV8\", \"\") ]\n\n # 25.10.2 AVR_RESET (0xC)\n INST_AVR_RESET = 0xC\n # 25.10.3 PROG_ENABLE (0x4)\n INST_PROG_ENABLE = 0x4\n # 25.10.4 PROG_COMMANDS (0x5)\n INST_PROG_COMMANDS = 0x5\n # 25.10.5 PROG_PAGELOAD (0x6)\n INST_PROG_PAGELOAD = 0x6\n # 25.10.6 PROG_PAGEREAD (0x7)\n INST_PROG_PAGEREAD = 0x7\n\n REG_PROG_CMD_SIZE = 15\n REG_PROG_ENABLE_SIZE = 16\n\n # 25.10.9 Programming Enable Register\n PROGRAMMING_ENABLE_SIGNATURE = 0b1010001101110000\n\n # 25.10.10 Programming Command Register\n # from Table 25-18. JTAG Programming Instruction\n # TDI Sequence, TDO Sequence\n # Set a = address high bits, b = address low bits, c = address extended bits, H = 0 - Low byte, 1 - High Byte, o = data out, i = data in, x = don’t care\n # 1a. Chip Erase\n PINST_1A_CHIP_ERASE = [ 0b010001110000000, 0b011000110000000, 0b011001110000000, 0b011001110000000 ]\n # 1b. Poll for Chip Erase Complete\n # 0110011_10000000 xxxxxox_xxxxxxxx\n PINST_1B_POLL_FOR_CHIP_ERASE_COMPLETE = 0b011001110000000\n # 2a. Enter Flash Write\n # 0100011_00010000 xxxxxxx_xxxxxxxx\n PINST_2A_ENTER_FLASH_WRITE = 0b010001100010000\n # 2b. Load Address Extended High Byte (10)\n # 0001011_cccccccc xxxxxxx_xxxxxxxx\n PINST_2B_LOAD_ADDRESS_EXTENDED_HIGH_BYTE_PREFIX = 0b0001011\n # 2c. Load Address High Byte\n # 0000111_aaaaaaaa xxxxxxx_xxxxxxxx\n PINST_2C_LOAD_ADDRESS_HIGH_BYTE_PREFIX = 0b0000111\n # 2d. Load Address Low Byte\n # 0000011_bbbbbbbb xxxxxxx_xxxxxxxx\n PINST_2D_LOAD_ADDRESS_LOW_BYTE_PREFIX = 0b0000011\n # 2e. Load Data Low Byte\n # 0010011_iiiiiiii xxxxxxx_xxxxxxxx\n PINST_2E_LOAD_DATA_LOW_BYTE_PREFIX = 0b0010011\n # 2f. Load Data High Byte\n # 0010111_iiiiiiii xxxxxxx_xxxxxxxx\n PINST_2F_LOAD_DATA_HIGH_BYTE_PREFIX = 0b0010111\n # 2g. Latch Data (1)\n # 0110111_00000000 xxxxxxx_xxxxxxxx\n # 1110111_00000000 xxxxxxx_xxxxxxxx\n # 0110111_00000000 xxxxxxx_xxxxxxxx\n PINST_2G_LATCH_DATA = [ 0b011011100000000, 0b111011100000000, 0b011011100000000 ]\n # 2h. Write Flash Page (1)\n # 0110111_00000000 xxxxxxx_xxxxxxxx\n # 0110101_00000000 xxxxxxx_xxxxxxxx\n # 0110111_00000000 xxxxxxx_xxxxxxxx\n # 0110111_00000000 xxxxxxx_xxxxxxxx\n PINST_2H_WRITE_FLASH_PAGE = [ 0b011011100000000, 0b011010100000000, 0b011011100000000, 0b011011100000000 ]\n # 2i. Poll for Page Write Complete (2)\n # 0110111_00000000 xxxxxox_xxxxxxxx\n PINST_2I_POLL_FOR_PAGE_WRITE_COMPLETE = 0b011011100000000\n # 3a. Enter Flash Read\n # 0100011_00000010 xxxxxxx_xxxxxxxx\n PINST_3A_ENTER_FLASH_READ = 0b010001100000010\n # 3b. Load Address Extended High Byte\n # 0001011_cccccccc xxxxxxx_xxxxxxxx\n PINST_3B_LOAD_ADDRESS_EXTENDID_HIGH_BYTE_PREFIX = 0b0001011\n # 3c. Load Address High Byte\n # 0000111_aaaaaaaa xxxxxxx_xxxxxxxx\n PINST_3C_LOAD_ADDRESS_HIGH_BYTE_PREFIX = 0b0000111\n # 3d. Load Address Low Byte\n # 0000011_bbbbbbbb xxxxxxx_xxxxxxxx\n PINST_3D_LOAD_ADDRESS_LOW_BYTE_PREFIX = 0b0000011\n # 3e. Read Data Low and High Byte\n # 0110010_00000000 xxxxxxx_xxxxxxxx \n # 0110110_00000000 xxxxxxx_oooooooo Low byte \n # 0110111_00000000 xxxxxxx_oooooooo High byte\n PINST_3E_READ_DATA_LOW_AND_HIGH_BYTE = [ 0b011011000000000, 0b011011000000000, 0b011011100000000 ]\n PINST_3E_READ_DATA_LOW_AND_HIGH_BYTE_DECODE_MASK = [ 0b000000000000000, 0b000000011111111, 0b000000011111111 ]\n # 8a. Enter Fuse/Lock Bit Read\n # 0100011_00000100 xxxxxxx_xxxxxxxx\n PINST_8A_ENTER_FUSE_LOCK_BIT_READ = 0b010001100000100\n # 8b. Read Extended Fuse Byte(6)\n # 0111010_00000000 xxxxxxx_xxxxxxxx\n # 0111011_00000000 xxxxxxx_oooooooo\n PINST_8B_READ_EXTENDED_FUSE_BYTE = [ 0b011101000000000, 0b011101100000000 ]\n PINST_8B_READ_EXTENDED_FUSE_BYTE_DECODE_MASK = [ 0b000000000000000, 0b000000011111111 ]\n # 8c. Read Fuse High Byte(7)\n # 0111110_00000000 xxxxxxx_xxxxxxxx\n # 0111111_00000000 xxxxxxx_oooooooo\n PINST_8C_READ_FUSE_HIGH_BYTE = [ 0b011111000000000, 0b011111100000000 ]\n PINST_8C_READ_FUSE_HIGH_BYTE_DECODE_MASK = [ 0b000000000000000, 0b000000011111111 ]\n # 8d. Read Fuse Low Byte(8)\n # 0110010_00000000 xxxxxxx_xxxxxxxx\n # 0110011_00000000 xxxxxxx_oooooooo\n PINST_8D_READ_FUSE_LOW_BYTE = [ 0b011001000000000, 0b011001100000000 ]\n PINST_8D_READ_FUSE_LOW_BYTE_DECODE_MASK = [ 0b000000000000000, 0b000000011111111 ]\n # 8e. Read Lock Bits(9)\n # 0110110_00000000 xxxxxxx_xxxxxxxx (5)\n # 0110111_00000000 xxxxxxx_xxoooooo\n PINST_8E_READ_LOCK_BITS = [ 0b011011000000000, 0b011011100000000 ]\n PINST_8E_READ_LOCKS_BITS_DECODE_MASK = [ 0b000000000000000, 0b000000011111111 ]\n # 8f. Read Fuses and Lock Bits\n # 0111010_00000000 xxxxxxx_xxxxxxxx (5)\n # 0111110_00000000 xxxxxxx_oooooooo Fuse Ext. byte\n # 0110010_00000000 xxxxxxx_oooooooo Fuse High byte\n # 0110110_00000000 xxxxxxx_oooooooo Fuse Low byte\n # 0110111_00000000 xxxxxxx_oooooooo Lock bits\n PINST_8F_READ_FUSES_AND_LOCK_BITS = [ 0b011101000000000, 0b011111000000000, 0b011001000000000, 0b011011000000000, 0b011011100000000 ]\n PINST_8F_READ_FUSES_AND_LOCK_BITS_DECODE_MASK = [ 0b000000000000000, 0b000000011111111, 0b000000011111111, 0b000000011111111, 0b000000011111111 ]\n # 9a. Enter Signature Byte Read\n # 0100011_00001000 xxxxxxx_xxxxxxxx\n PINST_9A_ENTER_SIGNATURE_BYTE_READ = 0b010001100001000\n # 9b. Load Address Byte\n # 0000011_bbbbbbbb xxxxxxx_xxxxxxxx\n PINST_9B_LOAD_ADDRESS_BYTE_PREFIX = 0b0000011\n # 9c. Read Signature Byte \n # 0110010_00000000 xxxxxxx_xxxxxxxx \n # 0110011_00000000 xxxxxxx_oooooooo\n PINST_9C_READ_SIGNATURE_BYTE = [ 0b011001000000000, 0b011001100000000 ]\n PINST_9C_READ_SIGNATURE_BYTE_DECODE_MASK = [ 0b000000000000000, 0b000000011111111 ]\n # 11a. Load No Operation Command\n # 0100011_00000000 xxxxxxx_xxxxxxxx \n # 0110011_00000000 xxxxxxx_xxxxxxxx\n PINST_11A_LOAD_NO_OP = [ 0b010001100000000, 0b011001100000000 ]\n\n def prog_mode(self):\n pass\n\nclass TAP_AVR_Private(TAP):\n \"\"\" JTAG Test Access Port - AVR Private commands, credits to Free AVR ICE for documenting them incompletely\n ressources:\n * http://cvs.savannah.nongnu.org/viewvc/freeice/freeice/gipper/firmware/jtag_avr_ocd.c?revision=1.3&view=markup\n * https://download-mirror.savannah.gnu.org/releases/freeice/AVR-OCD-Documentation.html\n * https://people.ece.cornell.edu/land/courses/ece4760/FinalProjects/s2009/jgs33_rrw32/Final%20Paper/Documentation.html\n * https://people.ece.cornell.edu/land/courses/ece4760/FinalProjects/s2009/jgs33_rrw32/Final%20Paper/debugger.c \"\"\"\n\n # On-Chip Debug Specific JTAG Instructions\n # OCD Force Break (0x8)\n INST_OCD_FORCE_BREAK = 0x8\n # OCD Run (0x9)\n INST_OCD_RUN = 0x9\n # OCD Execute AVR Instruction (0xA)\n # instructions can be 16bit or 32bit long\n # use 0x0A, SDR 0xFFFF0000 to read PC on TDO (actually returns PC+2 or PC+4)\n INST_OCD_EXEC = 0xA\n # OCD Access OCD Registers (0xB)\n # there are total 16 Addressable Registers\n # after IR next DRSHIFT is RW Flag (1=Write) + 4 Bits Address\n # those Data in Instruction is 21 (5 + 16) bits\n # note for read operation OCD Address need to be pre latched!\n INST_OCD_REGISTERS = 0xB\n\n OPCODE_READ_PC_JTAG = 0xFFFF0000\n OPCODE_READ_PC_JTAG_LEN = 32\n\n BREAK_RUN = 1\n BREAK_STOP = 0\n\n # OCD Registers\n OCDR_FLAG_READ = 0x0\n OCDR_FLAG_WRITE = 0x1\n OCDR_FLAG_LEN = 1\n OCDR_ADDRESS_LEN = 4\n #OCDR_DATA_LEN = 16\n OCDR_DATA_LEN = 21\n # Register 0 PSB0\n OCDR_0_PSB0 = 0x00\n # Register 1 PSB1\n OCDR_1_PSB1 = 0x01\n # Register 2 PDMSB\n OCDR_2_PDMSB = 0x02\n # Register 3 PDSB\n OCDR_3_PDSB = 0x03\n # Register 8 Break Control Register (BCR)\n OCDR_8_BCR = 0x08\n # Register 9 - Break Status Register (BSR)\n OCDR_9_BSR = 0x09\n # Register C - OCDR Readback\n # Bit rw Description \n # D15-8 rw OCDR 7..0 \n # D7-0 r unused (read as 0)\n OCDR_C_READBACK = 0x0C\n # Register D - Control and Status Register\n # Bit rw \tDescription\n # D15 rw \t1=Enable OCDR\n # D14 rw \t1=?\n # D13-D5 r\n # D4 r \t 1=OCDR written by AVR and not read by OCD\n # D3 r 1=Reset not active\n # D2 r 1=Reset not active\n # D1-0 r\n OCDR_D_CONTROL_STATUS = 0x0D\n\nclass Instruction(object):\n \"\"\" implements the logic to execute a JTAG instruction and process the result \"\"\"\n def seq(self, tap):\n info(\"Instruction has no seq() function\")\n return []\n\n def dec(self, tdo):\n return tdo.bin\n\nclass BYPASS(Instruction):\n def seq(self, tap):\n return tap.shift_ir(TAP.INST_BYPASS) \\\n + tap.shift_dr(32, tdi=1, decode_tdo=True)\n\n def dec(self, tdo):\n return tdo.bin\n\nclass SAMPLE_PRELOAD(Instruction):\n def __init__(self, chain_len=64):\n self.chain_len = chain_len\n\n def seq(self, tap):\n return tap.shift_ir(TAP.INST_SAMPLE_PRELOAD) \\\n + tap.shift_dr(self.chain_len, decode_tdo=True)\n\nclass IDCODE_implicit(Instruction):\n def seq(self, tap):\n return tap.shift_dr(64, decode_tdo=True)\n\n def dec(self, tdo):\n res = list()\n while tdo.pos < tdo.len:\n val = bytes()\n for i in range(4):\n bits = tdo.read(min(8, tdo.len-tdo.pos))\n bits.reverse()\n bits.append(8 - bits.len) # fill the byte with zeros\n val += bits.bytes\n res.append(hexlify(val[::-1]))\n return res\n\nclass IDCODE(IDCODE_implicit):\n def seq(self, tap):\n return tap.shift_ir(TAP.INST_IDCODE) + IDCODE_implicit.seq(self, tap)\n\nclass AVR_Reset(Instruction):\n def __init__(self, tdi=1):\n self.tdi = tdi\n\n def seq(self, tap):\n return tap.shift_ir(TAP_ATmega1248P.INST_AVR_RESET) \\\n + tap.shift_dr(1, tdi=self.tdi, decode_tdo=True)\n\nclass AVR_Prog_Enter(Instruction):\n def seq(self, tap):\n s = AVR_Reset(tdi=1).seq(tap)\n s += tap.shift_ir(TAP_ATmega1248P.INST_PROG_ENABLE)\n s += tap.shift_dr_int(TAP_ATmega1248P.PROGRAMMING_ENABLE_SIGNATURE, TAP_ATmega1248P.REG_PROG_ENABLE_SIZE)\n return s\n\nclass AVR_Prog_Leave(Instruction):\n def seq(self, tap):\n return tap.shift_ir(TAP_ATmega1248P.INST_PROG_COMMANDS) \\\n + tap.shift_dr_int_list(TAP_ATmega1248P.PINST_11A_LOAD_NO_OP, TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_ir(TAP_ATmega1248P.INST_PROG_ENABLE) \\\n + tap.shift_dr(TAP_ATmega1248P.REG_PROG_ENABLE_SIZE, tdi=0) \\\n + AVR_Reset(tdi=0).seq(tap)\n\nclass AVR_Read_Signature(Instruction):\n def seq(self, tap):\n return tap.shift_ir(TAP_ATmega1248P.INST_PROG_COMMANDS) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_9A_ENTER_SIGNATURE_BYTE_READ, TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_9B_LOAD_ADDRESS_BYTE_PREFIX<<8 | 0x00, TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int_list(TAP_ATmega1248P.PINST_9C_READ_SIGNATURE_BYTE, TAP_ATmega1248P.REG_PROG_CMD_SIZE, TAP_ATmega1248P.PINST_9C_READ_SIGNATURE_BYTE_DECODE_MASK) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_9B_LOAD_ADDRESS_BYTE_PREFIX<<8 | 0x01, TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int_list(TAP_ATmega1248P.PINST_9C_READ_SIGNATURE_BYTE, TAP_ATmega1248P.REG_PROG_CMD_SIZE, TAP_ATmega1248P.PINST_9C_READ_SIGNATURE_BYTE_DECODE_MASK) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_9B_LOAD_ADDRESS_BYTE_PREFIX<<8 | 0x02, TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int_list(TAP_ATmega1248P.PINST_9C_READ_SIGNATURE_BYTE, TAP_ATmega1248P.REG_PROG_CMD_SIZE, TAP_ATmega1248P.PINST_9C_READ_SIGNATURE_BYTE_DECODE_MASK)\n\n def dec(self, tdo):\n return [ \"0x%02x\" % int(v[::-1], 2) for v in wrap(tdo.bin, 8) ]\n\nclass AVR_Read_Fuses_and_Lock_bits(Instruction):\n def seq(self, tap):\n return tap.shift_ir(TAP_ATmega1248P.INST_PROG_COMMANDS) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_8A_ENTER_FUSE_LOCK_BIT_READ, TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int_list(TAP_ATmega1248P.PINST_8F_READ_FUSES_AND_LOCK_BITS, TAP_ATmega1248P.REG_PROG_CMD_SIZE, TAP_ATmega1248P.PINST_8F_READ_FUSES_AND_LOCK_BITS_DECODE_MASK)\n\n def dec(self, tdo):\n b = wrap(tdo.bin, 8)\n s = \"1 for disabled, 0 for enabled\\n\"\n s += \"Fuse Ext. byte : %s\\n\" % b[0]\n for n, (name, desc) in enumerate(TAP_ATmega1248P.EXTENDED_FUSE_BYTE):\n s += \"%30s : %s\\n\" % (\"%s (%s)\" % (name, desc), b[0][n])\n s += \"Fuse High byte : %s\\n\" % b[1]\n for n, (name, desc) in enumerate(TAP_ATmega1248P.FUSE_HIGH_BYTE):\n s += \"%30s : %s\\n\" % (\"%s (%s)\" % (name, desc), b[1][n])\n s += \"Fuse Low byte : %s\\n\" % b[2]\n for n, (name, desc) in enumerate(TAP_ATmega1248P.FUSE_LOW_BYTE):\n s += \"%30s : %s\\n\" % (\"%s (%s)\" % (name, desc), b[2][n])\n s += \"Fuse Lock bits : %s\\n\" % b[3]\n for n, (name, desc) in enumerate(TAP_ATmega1248P.LOCK_BITS):\n s += \"%30s : %s\\n\" % (\"%s (%s)\" % (name, desc), b[3][n])\n return s[:-1]\n\nclass AVR_Read_Flash_Byte(Instruction):\n def __init__(self, address=0x0):\n self.address = address\n\n def seq(self, tap):\n return tap.shift_ir(TAP_ATmega1248P.INST_PROG_COMMANDS) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_3A_ENTER_FLASH_READ, TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_3B_LOAD_ADDRESS_EXTENDID_HIGH_BYTE_PREFIX<<8 | 0x00, TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_3C_LOAD_ADDRESS_HIGH_BYTE_PREFIX << 8 | ((self.address >> 8) & 0xff), TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int(TAP_ATmega1248P.PINST_3D_LOAD_ADDRESS_LOW_BYTE_PREFIX << 8 | ( self.address & 0xff), TAP_ATmega1248P.REG_PROG_CMD_SIZE) \\\n + tap.shift_dr_int_list(TAP_ATmega1248P.PINST_3E_READ_DATA_LOW_AND_HIGH_BYTE, TAP_ATmega1248P.REG_PROG_CMD_SIZE, TAP_ATmega1248P.PINST_3E_READ_DATA_LOW_AND_HIGH_BYTE_DECODE_MASK)\n\n def dec(self, tdo):\n return '0x'+tdo.hex\n\nclass AVR_OCD_Break(Instruction):\n def __init__(self, run_or_stop=None):\n self.mode = run_or_stop\n\n def seq(self, tap):\n s = []\n if self.mode is not None:\n s = tap.shift_dr(1, tdi=self.mode, decode_tdo=1) # BREAK_RUN (1) or BREAK_STOP (0)\n return tap.shift_ir(TAP_AVR_Private.INST_OCD_FORCE_BREAK) + s\n\nclass AVR_OCD_Run(Instruction):\n def seq(self, tap):\n return tap.shift_ir(TAP_AVR_Private.INST_OCD_RUN)\n\nclass AVR_OCD_Read_Register_Private(Instruction):\n def __init__(self, register=TAP_AVR_Private.OCDR_D_CONTROL_STATUS):\n self.register = register\n\n def seq(self, tap):\n return tap.shift_ir(TAP_AVR_Private.INST_OCD_REGISTERS) \\\n + tap.shift_dr_int(self.register<<1, TAP_AVR_Private.OCDR_FLAG_LEN+TAP_AVR_Private.OCDR_ADDRESS_LEN) \\\n + tap.shift_dr_tdi(bstobl('0{:04b}'.format(0xc)+'0'*16), 1)\n\nclass AVR_OCD_Read_PC_Private(Instruction):\n def seq(self, tap):\n return tap.shift_ir(TAP_AVR_Private.INST_OCD_EXEC) \\\n + tap.shift_dr_int(TAP_AVR_Private.OPCODE_READ_PC_JTAG, TAP_AVR_Private.OPCODE_READ_PC_JTAG_LEN, 0xffffffff)\n\nclass Sleep(Instruction):\n def __init__(self, usec=1000):\n warn(\"when used with a real device, it seems the sleep is breaking pySerial connection\")\n self.usec = usec\n\n def seq(self, tap):\n time.sleep(self.usec/1000)\n return []\n\nclass OCD(object):\n \"\"\" BusPirate protocol to drive JTAG, used in OpenOCD mode \"\"\"\n CMD_PORT_MODE = 0x01\n CMD_FEATURE = 0x02\n CMD_TAP_SHIFT = 0x05\n\n PORT_MODE_HIZ = 0\n PORT_MODE_JTAG = 1\n PORT_MODE_JTAG_OD = 2\n\n FEATURE_LED = 0x01\n FEATURE_VREG = 0x02\n FEATURE_TRST = 0x04\n FEATURE_SRST = 0x08\n FEATURE_PULLUP = 0x10\n\n def __init__(self):\n pass\n\n def enter_bbio(self):\n return b'\\x00' * 21\n\n def enter_ocd(self):\n return b'\\x06'\n\n def cmd_port_mode(self, mode):\n return pack('BB', self.CMD_PORT_MODE, mode)\n\n def cmd_feature(self, features, opts):\n return pack('BBB', self.CMD_FEATURE, features, opts)\n\n def cmd_shift(self, sequence):\n \"\"\" converts sequence of (tdi, tms) boolean values to a packed form bits_count[1],bits_count[0],[tmi_8bits,tms_8bits,...] \"\"\"\n def seq2byte(seq):\n x = BitArray(seq)\n x.append(8 - x.len) # fill the byte with zeros\n x.reverse()\n return x.uint\n sequence_len = len(sequence)\n buf = bytearray()\n while len(sequence) > 0:\n seq8 = sequence[:8]\n tdi8 = seq2byte(map(lambda s: s[0], seq8))\n tms8 = seq2byte(map(lambda s: s[1], seq8))\n buf.extend((tdi8, tms8))\n sequence = sequence[8:]\n cmd_buf = pack('BBB', self.CMD_TAP_SHIFT, sequence_len >> 8, sequence_len % 2**8) + buf\n response_len = int(3 + ((len(cmd_buf) - 3) / 2))\n debug(\"cmd_shift %s [%d, %d]\" % (hexlify(cmd_buf, ' ', 1), len(cmd_buf), response_len))\n return cmd_buf, response_len\n\n def decode_tdo(self, tdo):\n \"\"\" converts received tdo bytes into sequence of tdo boolean values \"\"\"\n debug(\"decode_tdo %s [%d]\" % (hexlify(tdo, ' ', 1), len(tdo)))\n bitlen = unpack('H', tdo[1:3][::-1])[0]\n bits = [ [ bit for bit in '{:08b}'.format(byte)[::-1] ] for byte in tdo[3:] ]\n sequence = list(itertools.chain.from_iterable(bits))[:bitlen]\n debug(\"decode_tdo %s [%d]\" % (bltobs(sequence), len(sequence)))\n return sequence\n\n def exit(self):\n return b'\\x00'\n\nclass BP(object):\n \"\"\" Communication with BusPirate \"\"\"\n SPEED = 115200\n\n def __init__(self, device):\n self.ser = serial.Serial(device, self.SPEED)\n self.ocd = OCD()\n self.tap = TAP()\n\n # bbio: request to enter BBIO mode\n self.ser.write(self.ocd.enter_bbio())\n # bbio: find initial 'B'\n while self.ser.read(1) != b'B':\n time.sleep(0.01)\n # bbio: read initial 'BBIO1'\n ret = self.ser.read(4)\n debug(\"XXX ret %s\" % ret)\n if ret != b'BIO1':\n error(\"invalid answer from BusPirate when setting BBIO : %s\" % ret)\n sys.exit(1)\n # ocd: request to enter OCD mode\n self.ser.write(self.ocd.enter_ocd())\n # ocd: wait for OCD1 mode confirmation\n while True:\n ret = self.ser.read(4)\n if ret == b'BBIO':\n self.ser.read(1) # '1'\n elif ret == b'OCD1':\n debug(\"BP OCD mode confirmed\")\n break\n else:\n error(\"invalid answer from BusPirate: %s\" % ret)\n sys.exit(1)\n\n # setup port mode\n self.ser.write(self.ocd.cmd_port_mode(OCD.PORT_MODE_JTAG))\n self.ser.write(self.ocd.cmd_feature(OCD.FEATURE_PULLUP, 0x0))\n\n # set the tap in STATE_RUN_TEST_IDLE\n data, reslen = self.ocd.cmd_shift(self.tap.send_seq(self.tap.idle()))\n self._sr(data, reslen)\n\n info(\"BP initialized\")\n\n def close(self):\n debug(\"BP exiting\")\n self._sr(self.ocd.exit())\n\n def _sr(self, data, read_count=0):\n if data:\n self.ser.write(data)\n if read_count > 0:\n res = self.ser.read(read_count)\n return res\n\n def instruction(self, istr):\n data, reslen = self.ocd.cmd_shift(self.tap.send_seq(istr.seq(self.tap)))\n recv = self._sr(data, reslen)\n return istr.dec(bp.tap.recv_seq(self.ocd.decode_tdo(recv)))\n\nINSTRUCTIONS = { m[0]: m[1] for m in inspect.getmembers(sys.modules[__name__],\n lambda member: inspect.isclass(member) and (member.__base__ == Instruction or member.__base__.__base__ == Instruction)) }\nACTIONS = [ 'interactive' ] + list(INSTRUCTIONS.keys())\nEPILOG = \"Instructions:\\n\"\nfor name, istr in INSTRUCTIONS.items():\n extra = \"\"\n if '__code__' in dir(istr.__init__) and istr.__init__.__code__.co_argcount > 1:\n istr_args = [ \"%s=%s\" % (arg[0], arg[1]) for arg in zip(istr.__init__.__code__.co_varnames[1:], istr.__init__.__defaults__) ]\n extra = \"[:%s]\" % ','.join(istr_args)\n EPILOG += \"%s%s\\n\" % (name, extra)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('device', help='Serial port to BusPirate-compatible device eg. /dev/ttyUSB0, or \\'pretend\\' to only display the JTAG sequence')\n parser.add_argument('-d', '--debug', action='store_true', help='show debug output')\n parser.add_argument('actions', action='store', nargs='+', metavar='{interactive,[:arg]}')\n\n # parse arguments\n args = parser.parse_args()\n actions = list()\n for action in args.actions:\n reg = re.match(r\"(?P[A-Za-z-_]*):?(?P[x0-9A-Fa-f,]*)?\", action).groupdict()\n if reg['action'] not in ACTIONS:\n parser.error(\"invalid action %s\" % action)\n if action != reg['action'] and not reg['arg']:\n parser.error(\"invalid parameter for action %s\" % action)\n actions.append((reg['action'], [ int(a, base=0) for a in reg['arg'].split(',')] if reg['arg'] else []))\n\n # initialize logging to file and stdout\n class Formatter(logging.Formatter):\n def format(self, record):\n if record.levelno == logging.INFO:\n self._style._fmt = \"%(message)s\"\n else:\n self._style._fmt = \"%(levelname)s %(module)s: %(message)s\"\n return super().format(record)\n handler_file = logging.FileHandler(LOG, mode='a')\n handler_file.setFormatter(Formatter())\n handler_console = logging.StreamHandler(sys.stdout)\n handler_console.setFormatter(Formatter())\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO, handlers=[handler_file, handler_console])\n\n info(\"starting at %s, %s, logging to %s, using device '%s', actions: %s\" % (datetime.now(), VERSION, LOG, args.device, args.actions))\n\n # initialize the device\n if args.device == 'pretend':\n tap = TAP()\n state = TAP.sim_seq(tap.idle(), TAP.STATE_UNKNOWN)\n else:\n bp = BP(args.device)\n\n # run actions\n for action, action_arg in actions:\n if action == 'interactive':\n import IPython; from IPython import embed; embed()\n elif action in INSTRUCTIONS.keys():\n if args.device == 'pretend':\n istr = INSTRUCTIONS[action](*action_arg)\n state = TAP.sim_seq(istr.seq(tap), state)\n else:\n info(\"%s%s: %s\" % (action, action_arg, bp.instruction(INSTRUCTIONS[action](*action_arg))))\n\n # close device\n if args.device != 'pretend':\n bp.close()\n\n info(\"stopping at %s\" % datetime.now())\n","repo_name":"looran/poojtag","sub_path":"poojtag.py","file_name":"poojtag.py","file_ext":"py","file_size_in_byte":33653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"36390808231","text":"class Solution:\n def removeElement(self, nums: List[int], val: int) -> int:\n while val in nums:\n nums.remove(val)\n return len(nums)\n # count=0\n # for i in range(len(nums)):\n # if nums[i]==val:\n # nums[i]=\"_\"\n # count+=1\n # print(len(nums)-count)\n # i=len(nums)-1\n # j=0\n # print(nums)\n\n # while i==j:\n # if (nums[j]==\"_\"):\n \n # nums[j],nums[i]=nums[i],nums[j]\n # j+=1\n # i-=1\n # else:\n # j+=1\n # return\n\n\n \n \n","repo_name":"yordanos-AtoSV/A2sv-computational-programming","sub_path":"removeElements.py","file_name":"removeElements.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"72189759213","text":"from carts.models import Cart, CartItem, CartView\n\n\ndef get_cart(cart_id):\n try:\n cart = Cart.objects.get(cart_id=cart_id)\n except Cart.DoesNotExist as e:\n raise e\n\n cart_items = CartItem.objects.filter(cart=cart).all()\n cart_items_render = []\n total = 0\n\n for item in cart_items:\n cart_items_render.append(\n {\n \"cupcake\": item.product,\n \"quantity\": item.quantity,\n \"total_per_product\": item.quantity * item.product.price,\n }\n )\n\n total += item.product.price * item.quantity\n return CartView(cart_items=cart_items_render, total=total)\n","repo_name":"MariliaMJ/cupcakestore","sub_path":"checkout/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"21797109002","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pin', '0018_auto_20160628_1309'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='useractivitiessample',\n name='label',\n field=models.ForeignKey(related_name='lable', to='pin.Lable'),\n ),\n ]\n","repo_name":"strogo/wisgoon","sub_path":"pin/migrations/0019_auto_20160628_1315.py","file_name":"0019_auto_20160628_1315.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"25319206764","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import Conv2d, ReLU, SiLU, Sigmoid, Linear, Hardtanh\nfrom torch.nn.functional import relu, relu6\n\nfrom model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams\nfrom model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO\nfrom model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation\nfrom model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs\nfrom tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs\nfrom tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest\n\nimport model_compression_toolkit as mct\ntp = mct.target_platform\n\n\nclass BaseLayerFusingTest(BasePytorchTest):\n\n def __init__(self, unit_test):\n super().__init__(unit_test=unit_test)\n self.expected_fusions = []\n\n def create_inputs_shape(self):\n return [[self.val_batch_size, 3, 16, 16]]\n\n def representative_data_gen(self):\n input_shapes = self.create_inputs_shape()\n yield self.generate_inputs(input_shapes)\n\n def get_type(self, fusion):\n fusion_types = [x.type for x in fusion]\n return fusion_types\n\n def get_tpc(self):\n default_config, mixed_precision_cfg_list = get_op_quantization_configs()\n default_configuration_options = tp.QuantizationConfigOptions([default_config])\n generated_tp = tp.TargetPlatformModel(default_configuration_options, name='layer_fusing_test')\n mixed_precision_configuration_options = tp.QuantizationConfigOptions(mixed_precision_cfg_list,\n base_config=default_config)\n return generated_tp, mixed_precision_configuration_options\n\n def _compare(self, fused_nodes):\n self.unit_test.assertTrue(len(fused_nodes) == len(self.expected_fusions), msg=f'Number of fusions is not as expected!')\n for i, fusion in enumerate(fused_nodes):\n self.unit_test.assertTrue(self.get_type(fusion) == self.expected_fusions[i], msg=f'Miss-match fusion compared to expected!')\n\n\nclass LayerFusingTest1(BaseLayerFusingTest):\n def __init__(self, unit_test):\n super().__init__(unit_test)\n self.expected_fusions = [[nn.Conv2d, nn.ReLU]]\n\n def get_tpc(self):\n generated_tp, mixed_precision_configuration_options = super().get_tpc()\n with generated_tp:\n conv = tp.OperatorsSet(\"Conv\", mixed_precision_configuration_options)\n any_relu = tp.OperatorsSet(\"AnyReLU\")\n # Define fusions\n tp.Fusing([conv, any_relu])\n\n pytorch_tpc = tp.TargetPlatformCapabilities(generated_tp, name='layer_fusing_test')\n with pytorch_tpc:\n tp.OperationsSetToLayers(\"Conv\", [nn.Conv2d])\n tp.OperationsSetToLayers(\"AnyReLU\", [torch.relu,\n nn.ReLU])\n return pytorch_tpc\n\n def run_test(self, seed=0):\n model_float = self.LayerFusingNetTest()\n\n graph = prepare_graph_with_configs(model_float, PytorchImplementation(), DEFAULT_PYTORCH_INFO,\n self.representative_data_gen, lambda name, _tp: self.get_tpc())\n\n self._compare(graph.fused_nodes)\n\n class LayerFusingNetTest(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 16, kernel_size=(3, 3))\n self.conv2 = nn.Conv2d(16, 32, kernel_size=(1, 1))\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n y = self.relu(x)\n return y\n\n\nclass LayerFusingTest2(BaseLayerFusingTest):\n def __init__(self, unit_test):\n super().__init__(unit_test)\n self.expected_fusions = [[Conv2d, Hardtanh], [Conv2d, ReLU], [Conv2d, Sigmoid], [Conv2d, SiLU]]\n\n def get_tpc(self):\n generated_tp, mixed_precision_configuration_options = super().get_tpc()\n with generated_tp:\n conv = tp.OperatorsSet(\"Conv\", mixed_precision_configuration_options)\n any_act = tp.OperatorsSet(\"AnyAct\")\n # Define fusions\n tp.Fusing([conv, any_act])\n\n pytorch_tpc = tp.TargetPlatformCapabilities(generated_tp, name='layer_fusing_test')\n with pytorch_tpc:\n tp.OperationsSetToLayers(\"Conv\", [Conv2d])\n tp.OperationsSetToLayers(\"AnyAct\", [ReLU,relu6,relu,SiLU,Sigmoid, LayerFilterParams(Hardtanh, min_val=0)])\n return pytorch_tpc\n\n def run_test(self, seed=0):\n model_float = self.LayerFusingNetTest()\n graph = prepare_graph_with_configs(model_float, PytorchImplementation(), DEFAULT_PYTORCH_INFO,\n self.representative_data_gen, lambda name, _tp: self.get_tpc())\n\n self._compare(graph.fused_nodes)\n\n class LayerFusingNetTest(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=(3,3))\n self.conv2 = nn.Conv2d(32, 32, kernel_size=(1,1))\n self.conv3 = nn.Conv2d(32, 32, kernel_size=(3,3))\n self.conv4 = nn.Conv2d(32, 64, kernel_size=(1,1))\n self.conv5 = nn.Conv2d(64, 64, kernel_size=(2,2))\n self.relu = nn.ReLU()\n self.tanh = Hardtanh(min_val=0)\n self.swish = nn.SiLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.tanh(x)\n x = self.conv3(x)\n x = self.relu(x)\n x = self.conv4(x)\n x = self.sigmoid(x)\n x = self.conv5(x)\n y = self.swish(x)\n return y\n\n\nclass LayerFusingTest3(BaseLayerFusingTest):\n def __init__(self, unit_test):\n super().__init__(unit_test)\n self.expected_fusions = [[Conv2d, ReLU]]\n\n def get_tpc(self):\n generated_tp, mixed_precision_configuration_options = super().get_tpc()\n with generated_tp:\n conv = tp.OperatorsSet(\"Conv\", mixed_precision_configuration_options)\n any_act = tp.OperatorsSet(\"AnyAct\")\n # Define fusions\n tp.Fusing([conv, any_act])\n\n pytorch_tpc = tp.TargetPlatformCapabilities(generated_tp, name='layer_fusing_test')\n with pytorch_tpc:\n tp.OperationsSetToLayers(\"Conv\", [Conv2d])\n tp.OperationsSetToLayers(\"AnyAct\", [ReLU,relu6,relu])\n return pytorch_tpc\n\n def run_test(self, seed=0):\n model_float = self.LayerFusingNetTest()\n graph = prepare_graph_with_configs(model_float, PytorchImplementation(), DEFAULT_PYTORCH_INFO,\n self.representative_data_gen, lambda name, _tp: self.get_tpc())\n\n self._compare(graph.fused_nodes)\n\n class LayerFusingNetTest(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=(3,3))\n self.conv2 = nn.Conv2d(32, 32, kernel_size=(1,1))\n self.conv3 = nn.Conv2d(32, 32, kernel_size=(3,3))\n self.conv4 = nn.Conv2d(32, 64, kernel_size=(1,1))\n self.conv5 = nn.Conv2d(64, 64, kernel_size=(2,2))\n self.relu = nn.ReLU()\n self.tanh = nn.Tanh()\n self.swish = nn.SiLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.tanh(x)\n x = self.conv3(x)\n x = self.relu(x)\n x = self.conv4(x)\n x = self.sigmoid(x)\n x = self.conv5(x)\n y = self.swish(x)\n return y\n\n\nclass LayerFusingTest4(BaseLayerFusingTest):\n def __init__(self, unit_test):\n super().__init__(unit_test)\n self.expected_fusions = [[Conv2d, SiLU, torch.add], [Conv2d, SiLU, torch.add], [Conv2d, ReLU], [Conv2d, ReLU, torch.add], [Linear, SiLU], [Linear, SiLU]]\n\n def get_tpc(self):\n generated_tp, mixed_precision_configuration_options = super().get_tpc()\n with generated_tp:\n conv = tp.OperatorsSet(\"Conv\", mixed_precision_configuration_options)\n fc = tp.OperatorsSet(\"FullyConnected\", mixed_precision_configuration_options)\n any_relu = tp.OperatorsSet(\"AnyReLU\")\n add = tp.OperatorsSet(\"Add\")\n swish = tp.OperatorsSet(\"Swish\")\n activations_to_fuse = tp.OperatorSetConcat(any_relu, swish)\n # Define fusions\n tp.Fusing([conv, activations_to_fuse])\n tp.Fusing([conv, add, activations_to_fuse])\n tp.Fusing([conv, activations_to_fuse, add])\n tp.Fusing([fc, activations_to_fuse])\n\n pytorch_tpc = tp.TargetPlatformCapabilities(generated_tp, name='layer_fusing_test')\n with pytorch_tpc:\n tp.OperationsSetToLayers(\"Conv\", [Conv2d])\n tp.OperationsSetToLayers(\"FullyConnected\", [Linear])\n tp.OperationsSetToLayers(\"AnyReLU\", [ReLU])\n tp.OperationsSetToLayers(\"Add\", [torch.add])\n tp.OperationsSetToLayers(\"Swish\", [SiLU])\n return pytorch_tpc\n\n def run_test(self, seed=0):\n model_float = self.LayerFusingNetTest()\n graph = prepare_graph_with_configs(model_float, PytorchImplementation(), DEFAULT_PYTORCH_INFO,\n self.representative_data_gen, lambda name, _tp: self.get_tpc())\n\n self._compare(graph.fused_nodes)\n\n class LayerFusingNetTest(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 3, kernel_size=(3,3), padding='same')\n self.conv2 = nn.Conv2d(3, 3, kernel_size=(1,1), padding='same')\n self.conv3 = nn.Conv2d(3, 3, kernel_size=(3,3), padding='same')\n self.conv4 = nn.Conv2d(3, 3, kernel_size=(1,1), padding='same')\n self.conv5 = nn.Conv2d(3, 3, kernel_size=(3,3), padding='same')\n self.conv6 = nn.Conv2d(3, 3, kernel_size=(1,1), padding='same')\n self.relu = nn.ReLU()\n self.swish = nn.SiLU()\n self.flatten = nn.Flatten()\n self.dense1 = nn.Linear(768, out_features=16)\n self.dense2 = nn.Linear(16, out_features=16)\n\n def forward(self, inputs):\n x = self.conv1(inputs)\n x = self.swish(x)\n x1 = torch.add(inputs, x)\n x2 = self.conv2(x1)\n x2 = self.swish(x2)\n x2 = torch.add(x1, x2)\n x2 = self.conv3(x2)\n x2 = self.relu(x2)\n x3 = self.conv4(x2)\n x3 = self.relu(x3)\n x3 = torch.add(x3, x2)\n x3 = self.flatten(x3)\n x3 = self.dense1(x3)\n x3 = self.swish(x3)\n x3 = self.dense2(x3)\n y = self.swish(x3)\n return y\n","repo_name":"sony/model_optimization","sub_path":"tests/pytorch_tests/function_tests/layer_fusing_test.py","file_name":"layer_fusing_test.py","file_ext":"py","file_size_in_byte":11003,"program_lang":"python","lang":"en","doc_type":"code","stars":226,"dataset":"github-code","pt":"55"} +{"seq_id":"6621621947","text":"#!/usr/bin/env python\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n self.numSmaller = 0\n self.numRepeat = 1\n\nclass Solution:\n def countSmaller(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n self.root = None\n length = len(nums)\n ret = [0] * length\n i = length - 1\n while i >= 0:\n newNode = TreeNode(nums[i])\n self.root, size = self.insert(self.root, newNode)\n ret[i] = size\n i -= 1\n return ret\n\n def insert(self, node, newNode):\n if node == None:\n # print('Finish node %d, numSmaller: %d, numRepeat: %d' % (newNode.val, newNode.numSmaller, newNode.numRepeat))\n return (newNode, newNode.numSmaller)\n\n if node.val == newNode.val:\n node.numRepeat += 1\n size = node.numSmaller\n elif node.val > newNode.val:\n node.numSmaller += 1\n node.left, size = self.insert(node.left, newNode)\n elif node.val < newNode.val:\n node.right, size = self.insert(node.right, newNode)\n size += (node.numSmaller + node.numRepeat)\n # print('Finish node %d, numSmaller: %d, numRepeat: %d, size: %d' % (node.val, node.numSmaller, node.numRepeat, size))\n return node, size\n\nnumsList = [\n [5,2,6,1],\n [52,22,83,51,98,69,81,32,78,28,94,13,2,97,3,76,99,51,9,21,84,66,65,36,100,41],\n ]\nsol = Solution()\n\nfor nums in numsList:\n print(sol.countSmaller(nums))\n","repo_name":"eroicaleo/LearningPython","sub_path":"interview/leet/315_Count_of_Smaller_Numbers_After_Self_v2.py","file_name":"315_Count_of_Smaller_Numbers_After_Self_v2.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"9735219003","text":"import numpy as np\nimport pickle\nimport os\n\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom experiments.main import DIR_TREES, F_DATASET, F_MAP_TREE, F_OPT_TREE, F_SPARSE_OPT_TREE, load_data\nfrom tree import Tree\nfrom argparse import ArgumentParser\n\nfrom typing import Set, FrozenSet, List\n\nfrom prettytable import PrettyTable\n\ndatasets = [\n 'australian-un-reduced_converted',\n 'bank_conv_categorical_bin',\n 'banknote_categorical_bin',\n 'cancer-un-reduced_converted',\n 'car-un-reduced_converted',\n 'cleve-un-reduced_converted',\n 'colic-un-reduced_converted',\n 'compas-binary',\n 'fico-binary',\n 'haberman-un-reduced_converted',\n 'heart-statlog-un-reduced_converted',\n 'hungarian-un-reduced_converted',\n 'messidor_categorical_bin',\n 'primary-tumor',\n 'seismic_bumps_categorical_bin',\n 'shuttleM-un-reduced_converted',\n # 'soybean',\n 'spect-un-reduced_converted',\n # 'tic-tac-toe',\n # 'vote',\n]\n\nSAMPLE_SIZES = [10, 20, 40, 80, 160]\nNUM_SAMPLES_TAKEN = 10\nOPT_DEPTHS = [2, 3, 4, 5]\nSEED = 42\n\nALPHA = 5.0\nALPHA_S = 0.95\nBETA_S = 0.5\nLAMB = 0.005\n\ndef region_compatibility(F_1: List[FrozenSet[int]], F_2: List[FrozenSet[int]], D: int):\n F_1 = set(F_1)\n F_2 = set(F_2)\n shared_sets = F_1.intersection(F_2)\n F_1 = F_1.difference(shared_sets)\n F_2 = F_2.difference(shared_sets)\n all_sets = list(F_1) + list(F_2)\n if len(all_sets) == 0:\n assert sum(len(st) for st in shared_sets) == D\n return 0.0\n\n W = np.zeros((len(all_sets), len(all_sets)))\n for i, r_1 in enumerate(all_sets):\n for j, r_2 in enumerate(all_sets):\n W[i, j] = len(r_1.intersection(r_2)) / len(r_1.union(r_2))\n V = np.zeros(len(all_sets))\n for i, r in enumerate(all_sets):\n V[i] = len(r) / D if r in F_1 else -len(r) / D\n return V @ W @ V\n\ndef results(dataset: str):\n data, n, d, l = load_data(dataset, SAMPLE_SIZES, NUM_SAMPLES_TAKEN, SEED)\n print(dataset)\n\n values = np.zeros((len(SAMPLE_SIZES), 7, 4))\n for i, sample_size in enumerate(SAMPLE_SIZES):\n print(f\"Sample Size: {sample_size}\")\n opt_trees = []\n sparse_opt_trees = []\n map_trees = []\n cart_trees = []\n for j in range(NUM_SAMPLES_TAKEN):\n dir_tree = DIR_TREES.format(\n dataset=dataset,\n seed=SEED,\n idx=j,\n sample_size=sample_size)\n\n opt_trees.append([])\n for d, opt_max_depth in enumerate(OPT_DEPTHS):\n opt_tree_file = F_OPT_TREE.format(depth=opt_max_depth)\n opt_tree_path = os.path.join(dir_tree, opt_tree_file)\n opt_tree_dict, duration = pickle.loads(open(opt_tree_path, 'rb').read())\n opt_trees[j].append(Tree.from_dict(opt_tree_dict))\n\n sparse_opt_tree_file = F_SPARSE_OPT_TREE.format(lamb=LAMB)\n sparse_opt_tree_path = os.path.join(dir_tree, sparse_opt_tree_file)\n sparse_opt_tree_dict, duration = pickle.loads(open(sparse_opt_tree_path, 'rb').read())\n sparse_opt_trees.append(Tree.from_dict(sparse_opt_tree_dict))\n\n map_tree_file = F_MAP_TREE.format(alpha=ALPHA, alpha_s=ALPHA_S, beta_s=BETA_S)\n map_tree_path = os.path.join(dir_tree, map_tree_file)\n map_tree_dict, duration = pickle.loads(open(map_tree_path, 'rb').read())\n map_trees.append(Tree.from_dict(map_tree_dict))\n\n cart_tree = DecisionTreeClassifier(random_state=SEED)\n X_train, _, y_train, _ = data[i][j]\n cart_tree.fit(X_train, y_train)\n cart_trees.append(Tree.from_sklearn_tree(cart_tree.tree_))\n\n # accuracy\n total_opt_tree_accs = [0.0] * len(OPT_DEPTHS)\n total_sparse_opt_tree_acc = 0.0\n total_map_tree_acc = 0.0\n total_cart_tree_acc = 0.0\n\n # log likelihood\n total_opt_tree_lls = [0.0] * len(OPT_DEPTHS)\n total_sparse_opt_tree_ll = 0.0\n total_map_tree_ll = 0.0\n total_cart_tree_ll = 0.0\n\n # regional compatibility\n total_opt_tree_stabs = [0.0] * len(OPT_DEPTHS)\n total_sparse_opt_tree_stab = 0.0\n total_map_tree_stab = 0.0\n total_cart_tree_stab = 0.0\n\n # semantic similarity\n total_opt_tree_sims = [0.0] * len(OPT_DEPTHS)\n total_sparse_opt_tree_sim = 0.0\n total_map_tree_sim = 0.0\n total_cart_tree_sim = 0.0\n\n for j in range(NUM_SAMPLES_TAKEN):\n X_train, X_test, y_train, y_test = data[i][j]\n X = np.concatenate((X_train, X_test), axis=0)\n\n for d, opt_max_depth in enumerate(OPT_DEPTHS):\n opt_tree = opt_trees[j][d]\n total_opt_tree_accs[d] += opt_tree.accuracy(X_test, y_test)\n total_opt_tree_lls[d] += opt_tree.log_likelihood(X_test, y_test, alpha=ALPHA)\n\n for l in range(j + 1, NUM_SAMPLES_TAKEN):\n if l == j:\n continue\n F_1 = opt_trees[j][d].regions(X)\n F_2 = opt_trees[l][d].regions(X)\n total_opt_tree_stabs[d] += region_compatibility(F_1, F_2, X.shape[0])\n total_opt_tree_sims[d] += np.mean(opt_trees[j][d].predict(X) == opt_trees[l][d].predict(X))\n\n sparse_opt_tree = sparse_opt_trees[j]\n total_sparse_opt_tree_acc += sparse_opt_tree.accuracy(X_test, y_test)\n total_sparse_opt_tree_ll += sparse_opt_tree.log_likelihood(X_test, y_test, alpha=ALPHA)\n for l in range(j + 1, NUM_SAMPLES_TAKEN):\n if l == j:\n continue\n F_1 = sparse_opt_trees[j].regions(X)\n F_2 = sparse_opt_trees[l].regions(X)\n total_sparse_opt_tree_stab += region_compatibility(F_1, F_2, X.shape[0])\n total_sparse_opt_tree_sim += np.mean(sparse_opt_trees[j].predict(X) == sparse_opt_trees[l].predict(X))\n\n map_tree = map_trees[j]\n total_map_tree_acc += map_tree.accuracy(X_test, y_test)\n total_map_tree_ll += map_tree.log_likelihood(X_test, y_test, alpha=ALPHA)\n for l in range(j + 1, NUM_SAMPLES_TAKEN):\n if l == j:\n continue\n F_1 = map_trees[j].regions(X)\n F_2 = map_trees[l].regions(X)\n total_map_tree_stab += region_compatibility(F_1, F_2, X.shape[0])\n total_map_tree_sim += np.mean(map_trees[j].predict(X) == map_trees[l].predict(X))\n\n cart_tree = cart_trees[j]\n total_cart_tree_acc += cart_tree.accuracy(X_test, y_test)\n total_cart_tree_ll += cart_tree.log_likelihood(X_test, y_test, alpha=ALPHA)\n for l in range(j + 1, NUM_SAMPLES_TAKEN):\n if l == j:\n continue\n F_1 = cart_trees[j].regions(X)\n F_2 = cart_trees[l].regions(X)\n total_cart_tree_stab += region_compatibility(F_1, F_2, X.shape[0])\n total_cart_tree_sim += np.mean(cart_trees[j].predict(X) == cart_trees[l].predict(X))\n \n avg_map_tree_acc = total_map_tree_acc / NUM_SAMPLES_TAKEN\n avg_map_tree_ll = total_map_tree_ll / NUM_SAMPLES_TAKEN\n avg_map_tree_stab = total_map_tree_stab / (NUM_SAMPLES_TAKEN * (NUM_SAMPLES_TAKEN - 1) / 2)\n avg_map_tree_sim = total_map_tree_sim / (NUM_SAMPLES_TAKEN * (NUM_SAMPLES_TAKEN - 1) / 2)\n\n avg_sparse_opt_tree_acc = total_sparse_opt_tree_acc / NUM_SAMPLES_TAKEN\n avg_sparse_opt_tree_ll = total_sparse_opt_tree_ll / NUM_SAMPLES_TAKEN\n avg_sparse_opt_tree_stab = total_sparse_opt_tree_stab / (NUM_SAMPLES_TAKEN * (NUM_SAMPLES_TAKEN - 1) / 2)\n avg_sparse_opt_tree_sim = total_sparse_opt_tree_sim / (NUM_SAMPLES_TAKEN * (NUM_SAMPLES_TAKEN - 1) / 2)\n\n avg_cart_tree_acc = total_cart_tree_acc / NUM_SAMPLES_TAKEN\n avg_cart_tree_ll = total_cart_tree_ll / NUM_SAMPLES_TAKEN\n avg_cart_tree_stab = total_cart_tree_stab / (NUM_SAMPLES_TAKEN * (NUM_SAMPLES_TAKEN - 1) / 2)\n avg_cart_tree_sim = total_cart_tree_sim / (NUM_SAMPLES_TAKEN * (NUM_SAMPLES_TAKEN - 1) / 2)\n\n avg_opt_tree_accs = [0.0] * len(OPT_DEPTHS)\n avg_opt_tree_lls = [0.0] * len(OPT_DEPTHS)\n avg_opt_tree_stabs = [0.0] * len(OPT_DEPTHS)\n avg_opt_tree_sims = [0.0] * len(OPT_DEPTHS)\n for d, opt_max_depth in enumerate(OPT_DEPTHS):\n avg_opt_tree_accs[d] = total_opt_tree_accs[d] / NUM_SAMPLES_TAKEN\n avg_opt_tree_lls[d] = total_opt_tree_lls[d] / NUM_SAMPLES_TAKEN\n avg_opt_tree_stabs[d] = total_opt_tree_stabs[d] / (NUM_SAMPLES_TAKEN * (NUM_SAMPLES_TAKEN - 1) / 2)\n avg_opt_tree_sims[d] = total_opt_tree_sims[d] / (NUM_SAMPLES_TAKEN * (NUM_SAMPLES_TAKEN - 1) / 2)\n\n values[i][0][0] = avg_map_tree_acc\n values[i][0][1] = avg_map_tree_ll\n values[i][0][2] = avg_map_tree_stab\n values[i][0][3] = avg_map_tree_sim\n\n values[i][1][0] = avg_sparse_opt_tree_acc\n values[i][1][1] = avg_sparse_opt_tree_ll\n values[i][1][2] = avg_sparse_opt_tree_stab\n values[i][1][3] = avg_sparse_opt_tree_sim\n\n values[i][2][0] = avg_cart_tree_acc\n values[i][2][1] = avg_cart_tree_ll\n values[i][2][2] = avg_cart_tree_stab\n values[i][2][3] = avg_cart_tree_sim\n\n for d, opt_max_depth in enumerate(OPT_DEPTHS):\n values[i][3 + d][0] = avg_opt_tree_accs[d]\n values[i][3 + d][1] = avg_opt_tree_lls[d]\n values[i][3 + d][2] = avg_opt_tree_stabs[d]\n values[i][3 + d][3] = avg_opt_tree_sims[d]\n \n return values\n\n\nif __name__ == '__main__':\n \n # values = np.zeros((len(SAMPLE_SIZES), 7, 4))\n # if not os.path.exists(\"results\"):\n # os.makedirs(\"results\")\n \n # for dataset in datasets:\n # values = results(dataset)\n # np.save(f\"results/{dataset}.npy\", values)\n\n wins = np.zeros((len(SAMPLE_SIZES), 6, 4))\n diff = np.zeros((len(SAMPLE_SIZES), 6, 4))\n for dataset in datasets:\n values = np.load(f\"results/{dataset}.npy\")\n wins += values[:, 0, :].reshape(len(SAMPLE_SIZES), 1, 4) > values[:, 1:, :]\n diff += values[:, 0, :].reshape(len(SAMPLE_SIZES), 1, 4) - values[:, 1:, :]\n\n print(\"Win %\")\n for i, sample_size in enumerate(SAMPLE_SIZES):\n print(f\"Sample size: {sample_size}\")\n table = PrettyTable(field_names=[\"Metric\", \"MAP vs. SparseOpt\", \"MAP vs. CART\", \"MAP vs. Opt_2\", \"MAP vs. Opt_3\", \"MAP vs. Opt_4\", \"MAP vs. Opt_5\"])\n for j, metric in enumerate([\"Accuracy\", \"Log Likelihood\", \"Stability\", \"Similarity\"]):\n table.add_row([metric] + [f\"{prc}%\" for prc in np.round(wins[i, :, j] / len(datasets) * 100, 2)])\n print(table)\n","repo_name":"SullivanC19/bopt","sub_path":"experiments/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":10776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"30563652217","text":"from paperwork_system.tests.LoggedInTestCase import LoggedInTestCase\nfrom django.urls import reverse_lazy\nimport os\nfrom django.conf import settings\n\nfrom ..models import Quotations, Quotations_details, Quotations_attached_file, Clients\n\n\nclass Test_QuotationExcelDownloadView(LoggedInTestCase):\n\n # 正常系\n def test_download_excel_success(self):\n\n # テスト用データの作成\n registration_client = Clients.objects.create(\n client_id=1,\n name='顧客名'\n )\n\n registration_params = {\n 'quotation_id': 1,\n 'client_id': registration_client.client_id,\n 'expiry': '見積有効期限',\n 'recipient': '宛名',\n 'title': '件名',\n 'delivery_time': '納期',\n 'delivery_location': '納入場所',\n 'delivery_method': '納入方法',\n 'payment_condition': '取引条件',\n 'remark': '備考',\n\n 'quotations_details_set-TOTAL_FORMS': 1,\n 'quotations_details_set-INITIAL_FORMS': 0,\n 'quotations_details_set-MIN_NUM_FORMS': 0,\n 'quotations_details_set-MAX_NUM_FORMS': 1000,\n 'quotations_details_set-0-merchandise': '商品名',\n 'quotations_details_set-0-merchandise_description': '商品明細',\n 'quotations_details_set-0-quantity': 1,\n 'quotations_details_set-0-unit': '単位',\n 'quotations_details_set-0-sales_unit_price': 100,\n 'quotations_details_set-0-purchase_unit_price': 90,\n\n 'file': ''\n }\n\n self.client.post(\n reverse_lazy('quotation:registration'),\n registration_params)\n\n # Excelダウンロード処理を実行\n response = self.client.get(\n reverse_lazy(\n 'quotation:exceldownload', kwargs={\n 'pk': registration_params['quotation_id']}))\n\n # Excel作成の検証\n self.assertTrue(os.path.exists(settings.BASE_DIR +\n '/quotation/lib/quotation_excel/created_excel/' +\n 'No ' +\n str(registration_params['quotation_id']) +\n '.xlsx'.replace('/', '\\\\')))\n\n # ダウンロードExcelファイルの正当性を検証\n self.assertEqual(\n response['Content-Disposition'],\n 'attachment; filename=\"No ' + str(\n registration_params['quotation_id']) + '.xlsx\"')\n self.assertEqual(response['Content-Length'], '8355')\n self.assertEqual(\n response['Content-Type'],\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n\n # 後処理\n response.close()\n os.remove(settings.BASE_DIR +\n '/quotation/lib/quotation_excel/created_excel/' +\n 'No ' +\n str(registration_params['quotation_id']) +\n '.xlsx'.replace('/', '\\\\'))\n","repo_name":"iihama-h/paperwork-system","sub_path":"quotation/tests/test_ExcelDownloadView.py","file_name":"test_ExcelDownloadView.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"72047893292","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom decimal import Decimal\nfrom django.core.exceptions import ValidationError\nfrom django.forms.models import model_to_dict, modelform_factory\nfrom model_mommy.mommy import Mommy\nimport pytest\nfrom fakeapp.models import DecimalFieldModel\nfrom strictmodels import MODEL_MOMMY_MAPPING\n\n\n\ndef test_StrictDecimalField_no_args():\n value = DecimalFieldModel()\n\n\n@pytest.mark.django_db\ndef test_StrictDecimalField_save():\n x = DecimalFieldModel(field='1.1')\n x.save()\n assert model_to_dict(x) == model_to_dict(DecimalFieldModel.objects.get(pk=x.pk))\n\n\n@pytest.mark.django_db\ndef test_StrictDecimalField_mommy():\n mommy = Mommy(model=DecimalFieldModel)\n mommy.type_mapping.update(MODEL_MOMMY_MAPPING)\n mommy.prepare()\n mommy.make()\n\n\n@pytest.mark.django_db\ndef test_StrictDecimalField_form_with_instance_valid():\n x = DecimalFieldModel(field=5)\n form_class = modelform_factory(model=DecimalFieldModel, fields=['field'])\n form = form_class(data={'field': 6}, instance=x)\n assert form.is_valid() is True\n assert form.errors == {}\n assert form.save().field == Decimal('6.0')\n\n\ndef test_StrictDecimalField_form_with_instance_invalid():\n x = DecimalFieldModel(field=5)\n form_class = modelform_factory(model=DecimalFieldModel, fields=['field'])\n form = form_class(data={'field': 9223372036854775808}, instance=x)\n assert form.is_valid() is False\n assert form.errors == {'field': ['Ensure that there are no more than 5 digits in total.']}\n\n\n@pytest.mark.django_db\ndef test_StrictDecimalField_form_without_instance_valid():\n form_class = modelform_factory(model=DecimalFieldModel, fields=['field'])\n form = form_class(data={'field': 6})\n assert form.is_valid() is True\n assert form.errors == {}\n assert form.save().field == Decimal('6.0')\n\n\ndef test_StrictDecimalField_form_without_instance_invalid():\n form_class = modelform_factory(model=DecimalFieldModel, fields=['field'])\n form = form_class(data={'field': 9223372036854775808})\n assert form.is_valid() is False\n assert form.errors == {'field': ['Ensure that there are no more than 5 digits in total.']}\n\n\ndef test_StrictDecimalField_descriptor_doesnt_disappear():\n \"\"\"\n don't clobber the descriptor\n \"\"\"\n value = DecimalFieldModel(field='1.1')\n assert value.field == Decimal('1.1')\n value.field = '2.0'\n assert value.field == Decimal('2.0')\n with pytest.raises(ValidationError):\n value.field = 'v'*256\n assert value.field == Decimal('2.0')\n value.field = Decimal('3.0')\n assert value.field == Decimal('3.0')\n value.field = -1\n assert value.field == Decimal('-1')\n value.field = '-1'\n assert value.field == Decimal('-1')\n\n\n\ndef test_StrictDecimalField_null_skips_cleaning():\n DecimalFieldModel(field=None)\n\n\n\ndef test_StrictDecimalField_ok_until_changed():\n \"\"\"\n Ensure this value cannot change to an invalid state after being set\n \"\"\"\n model = DecimalFieldModel(field='0.0001')\n assert model.field == Decimal('0.0001')\n with pytest.raises(ValidationError):\n model.field = '2000-00-00'\n\n\n@pytest.mark.django_db\ndef test_StrictDecimalField_create_via_queryset():\n \"\"\"\n This won't allow crap into the DB.\n \"\"\"\n assert DecimalFieldModel.objects.count() == 0\n with pytest.raises(ValidationError):\n DecimalFieldModel.objects.create(field='t'*256)\n assert DecimalFieldModel.objects.count() == 0\n\n\n@pytest.mark.django_db\ndef test_StrictDecimalField_update_via_queryset_invalid_then_get():\n \"\"\"\n ValidationError: 2000-01-01' value must be a decimal number\n \"\"\"\n model = DecimalFieldModel.objects.create(field='0.02')\n assert model.field == Decimal('0.02')\n with pytest.raises(ValidationError):\n model.__class__.objects.filter(pk=model.pk).update(field='2000-01-01')\n","repo_name":"kezabelle/django-strictmodels","sub_path":"test_fields_decimal.py","file_name":"test_fields_decimal.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"40403804094","text":"def get_elem_nodes(iwfm_dll,elem_id):\n ''' IWFM DLL: Get FE element's nodes \n\n Parameters\n ----------\n iwfm_dll : IWFM Model object\n instantiated IWFM model object\n \n elem_id : int\n FE element index \n\n Returns\n -------\n nodes : list of ints\n FE node numbers of element elem_id\n\n status : int\n 0 if everything worked\n\n '''\n\n from ctypes import byref, c_int, c_double\n import iwfm.dll as idll\n\n nelem = idll.get_nelem(iwfm_dll)[0]\n\n vertices = 4\n\n nodes = (c_int * vertices)(*range(vertices))\n\n status = c_int(-1)\n\n iwfm_dll.IW_Model_GetElementConfigData(byref(c_int(elem_id)), \n byref(c_int(vertices)), \n byref(nodes), \n byref(status))\n\n \n\n return list(nodes), status\n\n\nif __name__ == '__main__':\n ' Run get_elem_nodes() from command line '\n import sys\n import iwfm.debug as idb\n import iwfm as iwfm\n import iwfm.dll as idll\n\n if len(sys.argv) > 1: # arguments are listed on the command line\n dll_path, pre_file, sim_file = sys.argv[1], sys.argv[2], sys.argv[3]\n\n else: # ask for file names from terminal\n dll_path = input('Path to IWFM DLL: ')\n pre_file = input('IWFM Preprocessor file name: ')\n sim_file = input('IWFM Simulation file name: ')\n\n iwfm.file_test(pre_file)\n iwfm.file_test(sim_file)\n\n idb.exe_time() # initialize timer\n\n iwfm_dll = idll.dll_init(dll_path) # instatiate the IWFM DLL\n\n status = idll.dll_open(iwfm_dll, pre_file, sim_file) # instantiate the model\n\n\n for elem_id in range(1, 10):\n nodes, status = get_elem_nodes(iwfm_dll, elem_id)\n \n print(f' Nodes of element {elem_id}: {nodes}')\n\n idb.exe_time() # print elapsed time\n","repo_name":"ucdavis/iwfm","sub_path":"iwfm/dll/get_elem_nodes.py","file_name":"get_elem_nodes.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"72379947050","text":"class Database:\n def __init__(self, row_counts):\n self.parents = list(range(len(row_counts)))\n self.rank = [0] * len(row_counts)\n self.row_counts = row_counts\n self.max_row_count = max(row_counts)\n\n def merge(self, src, dst):\n src_parent = self.get_parent(src)\n dst_parent = self.get_parent(dst)\n\n if src_parent == dst_parent:\n return\n\n new_row_count = self.row_counts[src_parent] + self.row_counts[dst_parent]\n self.max_row_count = max(self.max_row_count, new_row_count)\n\n src_rank, dst_rank = self.rank[src_parent], self.rank[dst_parent]\n\n if src_rank < dst_rank:\n self.parents[src_parent] = dst_parent\n self.row_counts[src_parent] = 0\n self.row_counts[dst_parent] = new_row_count\n else:\n self.parents[dst_parent] = src_parent\n self.row_counts[dst_parent] = 0\n self.row_counts[src_parent] = new_row_count\n\n if src_rank == dst_rank:\n self.rank[src_rank] += 1\n\n def get_parent(self, table):\n children_to_update = []\n\n root = table\n\n # Recursive implementation reaches max recursion depth for one of the testcase\n while root != self.parents[root]:\n children_to_update.append(root)\n root = self.parents[root]\n\n for i in children_to_update:\n self.parents[i] = root\n\n return root\n\n\ndef main():\n n_tables, n_queries = map(int, input().split())\n counts = list(map(int, input().split()))\n assert len(counts) == n_tables\n db = Database(counts)\n for _ in range(n_queries):\n dst, src = map(int, input().split())\n db.merge(dst - 1, src - 1)\n print(db.max_row_count)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DumbledoreD/algo-spec","sub_path":"assignments/c2w3/merging_tables.py","file_name":"merging_tables.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"34254197784","text":"#database with python\n\nimport sqlite3 as sq\nconn = sq.connect(\"courses.db\")\ncursor = conn.cursor()\ncursor.execute(\"\"\"CREATE TABLE if not exists courses(\n number INTEGER PRIMARY KEY,\n name text,\n ects real);\"\"\")\n\n# normal insertion\n\n#cursor.execute(\"\"\"INSERT INTO courses VALUES(\"02820\",\"Python programming\",5);\"\"\")\n\n# inserstion through variable\n\n#courses = (\"02345\",\"NonLinear Signal IDk\",12)\n#cursor.execute(\"INSERT INTO courses values(?,?,?);\",courses)\n\n# many entries at once\n#courses = [(\"2323\",\"introduction to cognitive Science\",6),(\"2327\",\"introduction to Python\",3)]\n#cursor.executemany(\"INSERT INTO courses values(?,?,?);\",courses)\n#conn.commit()\n\n#Fetch data from db\n\n#cursor.execute(\"SELECT * FROM courses;\")\n#print(cursor.fetchone()) # Return one row at a time with fetch one\n#for row in cursor:\n# print(row)\n\n# Limiting no. of rows\n#cursor.execute(\"SELECT * FROM courses ORDER BY number LIMIT 2;\")\n#print(cursor.fetchall())\n\n\n# search for specific values\n\n# cursor.execute(\"Select * from courses where number=? or name=? or ects=?\",(\"2327\",12,6))\n# rows = cursor.fetchall()\n# print(rows)\n\n#paramaterize search data into python variable\n\n# param ={'ects':10.0}\n# cursor.execute(\"SELECT number From courses WHERE ects=?\",(param['ects'],))\n# print(cursor.fetchall())\n\n# Updating data in SQLlite\n# cursor.execute(\"update courses set name=?,ects=? where number=?\",('MAX','99','2327'))\n# cursor.execute(\"SELECT * FROM courses;\")\n# print(cursor.fetchall())\n\n# deleting From database\ncursor.execute(\"DELETE FROM courses where number=?\",(\"2345\",))\nconn.commit()\ncursor.execute(\"SELECT * FROM courses;\")\nprint(cursor.fetchall())\nconn.close()","repo_name":"Mayankjh/Python_ML_Training","sub_path":"Python Training/Day 9/sqllitedata.py","file_name":"sqllitedata.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"33492247213","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 21 21:51:16 2019\n\n@author: my\n\"\"\"\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# Method 1: Use the property of BST: \n#the left subtree of node contains nodes with values less than or equal to the node's value \n#the right subtree of node contains nodes with values greater than to the node's value \n#Left and Right subtrees are BSTs. \nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n parent_val = root.val\n p_val = p.val\n q_val = q.val\n # right sub-tree\n if p_val > parent_val and q_val > parent_val:\n return self.lowestCommonAncestor(root.right, p, q)\n # left sub-tree\n elif p_val < parent_val and q_val < parent_val:\n return self.lowestCommonAncestor(root.left, p, q)\n else:\n return root","repo_name":"mayu0007/LeetCode","sub_path":"235_LCA_BST.py","file_name":"235_LCA_BST.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"43017537215","text":"import googleapiclient.discovery\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport json\r\nfrom google.cloud import storage\r\nfrom google.api_core.client_options import ClientOptions\r\nfrom PIL import Image\r\n\r\ndef cloud_storage_trigger(event, context):\r\n\r\n file = event\r\n if '.jpeg' in file['name'] or '.jpg' in file['name'] or '.png' in file['name']:\r\n def preprocess(uploaded_image):\r\n \r\n img = tf.keras.utils.load_img(uploaded_image, target_size=(150, 150))\r\n x = tf.keras.preprocessing.image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = tf.keras.applications.xception.preprocess_input(x)\r\n image = np.vstack([x])\r\n\r\n return image\r\n\r\n def download_image(bucket_name, source_image_name):\r\n \r\n storage_client = storage.Client()\r\n bucket = storage_client.bucket(bucket_name)\r\n blob = bucket.blob(source_image_name)\r\n uploaded_image = '/tmp/'+source_image_name\r\n blob.download_to_filename(uploaded_image)\r\n\r\n return uploaded_image\r\n\r\n def predict_json(project, region, model, instances, version=None):\r\n \r\n prefix = \"{}-ml\".format(region) if region else \"ml\"\r\n api_endpoint = \"https://{}.googleapis.com\".format(prefix)\r\n client_options = ClientOptions(api_endpoint=api_endpoint)\r\n service = googleapiclient.discovery.build(\r\n 'ml', 'v1', client_options=client_options)\r\n name = 'projects/{}/models/{}'.format(project, model)\r\n\r\n if version is not None:\r\n name += '/versions/{}'.format(version)\r\n\r\n response = service.projects().predict(\r\n name=name,\r\n body={'instances': instances}\r\n ).execute()\r\n\r\n if 'error' in response:\r\n raise RuntimeError(response['error'])\r\n\r\n return response['predictions']\r\n\r\n #Download uploaded-image from GCS Bucket\r\n uploaded_image = download_image('jeder-storage-bucket', file['name'])\r\n \r\n print(\"File Successfully Downloaded\")\r\n\r\n #Preprocess the image\r\n image = preprocess(uploaded_image)\r\n\r\n #Predict image with Machine Learning Model from AI Platform then make JSON file out of the result\r\n project_name = 'capstone-project-jeder'\r\n region = 'asia-southeast1'\r\n model = 'jeder_classification_model'\r\n version = 'version_1'\r\n instances = image.tolist()\r\n result = predict_json(project_name, region, model, instances, version)\r\n probability = result[0]\r\n top_idx = np.argmax(probability)\r\n\r\n print(\"Prediction process done!\")\r\n \r\n labels = ['Akar Kelapa', 'Bakmi', 'Bakso', 'Kue Bangkit', 'Kue Tambang', 'Nasi Goreng', 'Nastar', 'Onde Onde', 'Orek Tempe', 'Rendang', 'Sate', 'Semur Tahu']\r\n food_result = labels[top_idx]\r\n confidence = max(probability) * 100\r\n\r\n print(\"Prediction Result: \")\r\n print('Model Prediction: \\n{}'.format(food_result))\r\n print('Confidence: {:.2f} %'.format(confidence))\r\n \r\n result_json = {\"Prediction\": food_result,\"Confidence\": confidence}\r\n\r\n #Upload the result to GCS Bucket\r\n storage_client = storage.Client()\r\n bucket = storage_client.bucket('jeder-storage-bucket')\r\n blob = bucket.blob('result.json')\r\n\r\n blob.upload_from_string(\r\n data = json.dumps(result_json),\r\n content_type = 'application/json'\r\n )\r\n\r\n print(\"Result uploaded to GCS\")\r\n","repo_name":"JeDer-Bangkit2022/CloudComputing","sub_path":"cloud-function-predict/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"39789192959","text":"import cbpro\npublic_client = cbpro.PublicClient()\nmydict = public_client.get_currencies()\n\nfor n in range(0,len(mydict)):\n print(mydict[n]['id'], mydict[n]['name'],mydict[n]['details']['symbol'])\n print('------------------------------')\n\n\n# https://www.youtube.com/watch?v=tdmccmKDFFw\n# code borrowed from youtube demo by Flopperam\nimport requests\nimport json\nresponse=requests.get('https://api.coinbase.com/v2/prices/BTC-USD/spot')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Spot Price:\", price)\n\nresponse=requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Spot Price:\", price)\n\nresponse=requests.get('https://api.coinbase.com/v2/prices/LTC-USD/spot')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Spot Price:\", price)\n\nresponse=requests.get('https://api.coinbase.com/v2/prices/BTC-USD/buy')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Buy Price:\", price)\n\nresponse=requests.get('https://api.coinbase.com/v2/prices/ETH-USD/buy')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Buy Price:\", price)\n\nresponse=requests.get('https://api.coinbase.com/v2/prices/LTC-USD/buy')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Buy Price:\", price)\n\nresponse=requests.get('https://api.coinbase.com/v2/prices/BTC-USD/sell')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Sell Price:\", price)\n\nresponse=requests.get('https://api.coinbase.com/v2/prices/ETH-USD/sell')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Sell Price:\", price)\n\nresponse=requests.get('https://api.coinbase.com/v2/prices/LTC-USD/sell')\ndata = response.json()\ncurrency = data[\"data\"][\"base\"]\nprice = data[\"data\"][\"amount\"]\nprint(\"Currency:\", currency, \"Sell Price:\", price)\n\n# from coinbase.wallet.client import Client\n\n# #client = Client()\n\n# client = Client(api_key, api_secret, api_version='YYYY-MM-DD')\n\n# currency_code = 'USD' # can also use EUR, CAD, etc.\n\n# Make the request\n#price = client.get_spot_price(currency=currency_code)\n\n#print('Current bitcoin price in %s: %s' % (currency_code, price.amount))\n\n# from coinbase.wallet.client import Client\n# #client = Client(, )\n\n# price = client.get_buy_price(currency_pair = 'BTC-USD')\n\n\n#wsClient.products()\n#print(public_client.get_products())\n\n\n#PublicClient Methods\n#get_products\n# public_client.get_products()\n# public_client.get_product_order_book('ETH-USD')\n# # Get the order book at the default level.\n# public_client.get_product_order_book('BTC-USD')\n# # Get the order book at a specific level.\n# public_client.get_product_order_book('BTC-USD', level=1)\n# public_client.get_product_ticker('BTC-USD',)\n# # Get the product ticker for a specific product.\n# public_client.get_product_ticker(product_id='ETH-USD')\n# #get_product_trades (paginated)\n# # Get the product trades for a specific product.\n# # Returns a generator\n# public_client.get_product_trades(product_id='ETH-USD'\n# #get_product_historic_rates\n# public_client.get_product_historic_rates('ETH-USD')\n# # To include other parameters, see function docstring:\n# public_client.get_product_historic_rates('ETH-USD', granularity=3000)\n#get_product_24hr_stats\n# public_client.get_product_24hr_stats('ETH-USD')\n#get_currencies\n#print(public_client.get_currencies())\n\n#get_time\n#print(public_client.get_time())\n","repo_name":"jjnkns/jenbar_repo","sub_path":"coinbase_temp.py","file_name":"coinbase_temp.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"40252997373","text":"\"\"\"\nGiven an integer n, you must transform it into 0 using the following operations any number of times:\n\nChange the rightmost (0th) bit in the binary representation of n.\nChange the ith bit in the binary representation of n if the (i-1)th bit is set to 1 and the (i-2)th through 0th bits are set to 0.\nReturn the minimum number of operations to transform n into 0.\n\nExample 1:\n\nInput: n = 0\nOutput: 0\n\nExample 2:\n\nInput: n = 3\nOutput: 2\nExplanation: The binary representation of 3 is \"11\".\n\"11\" -> \"01\" with the 2nd operation since the 0th bit is 1.\n\"01\" -> \"00\" with the 1st operation.\n\nExample 3:\n\nInput: n = 6\nOutput: 4\nExplanation: The binary representation of 6 is \"110\".\n\"110\" -> \"010\" with the 2nd operation since the 1st bit is 1 and 0th through 0th bits are 0.\n\"010\" -> \"011\" with the 1st operation.\n\"011\" -> \"001\" with the 2nd operation since the 0th bit is 1.\n\"001\" -> \"000\" with the 1st operation.\n\nExample 4:\n\nInput: n = 9\nOutput: 14\n\nExample 5:\n\nInput: n = 333\nOutput: 393\n\n\nConstraints:\n\n0 <= n <= 109\n\"\"\"\n\n\n# Bit manipulation\nclass Solution:\n def minimumOneBitOperations(self, n: int) -> int:\n memo = {0: 0}\n\n def dfs(n):\n if n not in memo:\n b = 1\n while (b << 1) <= n:\n b <<= 1\n\n memo[n] = dfs((b >> 1) ^ b ^ n) + 1 + b - 1\n return memo[n]\n\n return dfs(n)\n","repo_name":"lonely7yk/LeetCode_py","sub_path":"LeetCode1000/LeetCode1611MinimumOneBitOperationstoMakeIntegersZero.py","file_name":"LeetCode1611MinimumOneBitOperationstoMakeIntegersZero.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"13127037313","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Import data\n\nimport tensorflow as tf\nimport numpy as np\nimport time, sys, os, math\nimport tf_nn_utils as tf_utils\nimport tf_cifar10_utils\nfrom timeit import default_timer as timer\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nnp.set_printoptions(precision=4, suppress=True, threshold=1000, linewidth=500)\n\n\nclass Attacker(object):\n def __init__(self, model, max_epsilon, learning_rate, batchsize):\n self.model = model\n self.batchsize = batchsize\n self.max_epsilon = max_epsilon\n self.processed_batch_num = 0\n self.time_per_iter = 0\n self.overall_error = 0.0\n\n # placeholders\n self.input = model.input\n self.label = tf.placeholder(tf.int32, shape=[batchsize])\n self.adv_image = tf.get_variable('adv_image', shape=[batchsize, 32, 32, 3])\n self.initialization_step = tf.assign(self.adv_image, self.input)\n\n # label_mask = tf.one_hot(self.label, 10, dtype=tf.float32)\n correct_prediction = tf.equal(tf.argmax(model.logits, axis=1), tf.cast(self.label, tf.int64))\n self.error = 1 - tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # loss and gradient\n self.loss = - tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model.logits, labels=self.label))\n self.grad = tf.gradients(self.loss, self.input)[0]\n\n # define optimization step\n opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate * max_epsilon)\n self.gradient_step = opt.apply_gradients([(tf.sign(self.grad), self.adv_image)])\n\n def run(self, sess, images, labels, iternum, cl):\n start1 = start2 = timer()\n sess.run(self.initialization_step, feed_dict={self.input: images})\n adv_images = images\n confidence_level = cl * np.ones([self.batchsize, 32, 32, 3])\n\n for i in range(iternum):\n if i == 1: start2 = timer()\n sess.run(self.gradient_step,\n feed_dict={self.input: adv_images, self.label: labels, self.model.ci: confidence_level})\n adv_images = sess.run(self.adv_image)\n adv_images = np.maximum(images - self.max_epsilon, np.minimum(images + self.max_epsilon, adv_images))\n end = timer()\n\n adv_images, error = sess.run([self.adv_image, self.error],\n feed_dict={self.input: adv_images, self.label: labels,\n self.model.ci: confidence_level})\n error = round(error, 3)\n\n print('Attacker -- iternum: %d, time: %g sec, error: %g' % (iternum, end - start1, error))\n sys.stdout.flush()\n\n self.processed_batch_num += 1\n self.time_per_iter = (end - start2) / (iternum - 1) if iternum > 1 else end - start1\n self.overall_error += error\n return adv_images - images\n\n\ndef suppress(x, ci):\n def f(t, r): return tf.minimum(tf.maximum(0.01 * t, t - r), t + r)\n x_min = f(x - ci, ci * 2)\n x_max = f(x + ci, ci * 2)\n return (x_max + x_min) / 2, (x_max - x_min) / 2\n\n\ndef relu(x, ci):\n x_max = tf.nn.relu(x + ci)\n x_min = tf.nn.relu(x - ci)\n return (x_max + x_min) / 2, (x_max - x_min) / 2\n\n\ndef sigmoid(x, ci):\n x_max = tf.nn.sigmoid(x + ci)\n x_min = tf.nn.sigmoid(x - ci)\n return (x_max + x_min) / 2, (x_max - x_min) / 2\n\n\ndef pool(x, ci):\n x = tf.nn.avg_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n ci = tf.nn.avg_pool(ci, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n return x, ci\n\n\ndef conv2d(x, ci, fout, patch_size):\n fin = x.get_shape().as_list()[-1]\n W = tf_utils.weight_variable([patch_size, patch_size, fin, fout], scale=0.1)\n b = tf_utils.bias_variable([fout])\n x = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') + b\n ci = tf.nn.conv2d(ci, tf.abs(W), strides=[1, 1, 1, 1], padding='SAME')\n return x, ci\n\n\ndef fc(x, ci, fin, fout):\n W = tf_utils.weight_variable([fin, fout])\n b = tf_utils.bias_variable([fout])\n x = tf.matmul(x, W) + b\n ci = tf.matmul(ci, tf.abs(W))\n return x, ci\n\n\nclass DropNet:\n def __init__(self, params):\n self.hidden_nodes = params['hidden_nodes']\n\n def __call__(self, x, ci):\n self.input = x\n self.ci = ci\n\n # Convonlutional layer 1\n x, ci = conv2d(x, ci, fout=self.hidden_nodes[0], patch_size=6)\n x, ci = relu(x, ci)\n x, ci = suppress(x, ci)\n x, ci = pool(x, ci)\n\n # Convonlutional layer 2\n x, ci = conv2d(x, ci, fout=self.hidden_nodes[1], patch_size=6)\n x, ci = relu(x, ci)\n x, ci = suppress(x, ci)\n x, ci = pool(x, ci)\n\n # Convonlutional layer 3\n x, ci = conv2d(x, ci, fout=self.hidden_nodes[2], patch_size=6)\n x, ci = relu(x, ci)\n x, ci = suppress(x, ci)\n x, ci = pool(x, ci)\n\n shape = x.get_shape().as_list()\n n_hidden = shape[1] * shape[2] * shape[3]\n x = tf.reshape(x, [-1, n_hidden])\n ci = tf.reshape(ci, [-1, n_hidden])\n\n # # fully connected layer 1\n # x, ci = fc(x, ci, n_hidden, self.hidden_nodes[3])\n # x, ci = relu(x, ci)\n # x, ci = suppress(x, ci)\n #\n # # fully connected layer 2\n # x, ci = fc(x, ci, self.hidden_nodes[3], self.hidden_nodes[4])\n # x, ci = relu(x, ci)\n # x, ci = suppress(x, ci)\n\n # output layer\n x, ci = fc(x, ci, n_hidden, 10)\n x, ci = suppress(x, ci)\n self.logits = tf.nn.softmax(tf.clip_by_value(x, -20, 20))\n return self.logits\n\n\ndef train(params):\n data = tf_cifar10_utils.read_cifar()\n x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])\n ci = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])\n truth = tf.placeholder(tf.int32, shape=[None])\n sess = tf.Session()\n\n # define model outputs\n model = DropNet(params)\n y = model(x, ci)\n\n # define attacker\n attacker = Attacker(model=model, max_epsilon=0.03, learning_rate=1.0, batchsize=100)\n\n # training\n cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model.logits, labels=truth))\n\n if params['optimizer'] == 'momentum':\n opt = tf.train.MomentumOptimizer(learning_rate=params['learning_rate'], momentum=0.9)\n elif params['optimizer'] == 'rmsprop':\n opt = tf.train.RMSPropOptimizer(learning_rate=params['learning_rate'])\n grads_and_vars = opt.compute_gradients(cross_entropy)\n train_step = opt.apply_gradients(grads_and_vars)\n\n prediction = tf.argmax(y, 1)\n sess.run(tf.global_variables_initializer())\n time_elapsed = 0\n sys.stdout.flush()\n\n def evaluate(dataset, batchsize, max_n, cl):\n index, n = 0, min(max_n, len(dataset.images))\n error = 0\n while index < n:\n next_index = min(n, index + batchsize)\n confidence_level = cl * np.ones([next_index - index, 32, 32, 3])\n t = dataset.labels[index:next_index]\n p = sess.run(prediction, feed_dict={x: dataset.images[index:next_index], ci: confidence_level})\n error += (1.0 - np.mean(p == t)) * (next_index - index)\n index = next_index\n return error / n\n\n confidence_level = params['cl'] * np.ones([params['batchsize'], 32, 32, 3])\n for i in range(params['iternum']):\n if (i + 1) % 1000 == 0:\n error_train = evaluate(data.train, 1000, 10000, params['cl'])\n error_test = evaluate(data.test, 1000, 10000, params['cl'])\n print(\"%f\\titer=%d\\ttrain=%g\\ttest=%g\" % (time_elapsed, i + 1, error_train, error_test))\n\n # run attacker\n batch = data.test.next_batch(100)\n attacker.run(sess=sess, images=batch[0], labels=batch[1], iternum=1, cl=params['cl'])\n\n sys.stdout.flush()\n start = time.time()\n\n batch = data.train.next_batch(params['batchsize'])\n feed_dict = {x: batch[0], truth: batch[1], ci: confidence_level}\n train_step.run(session=sess, feed_dict=feed_dict)\n end = time.time()\n time_elapsed += end - start\n\n\ndef main(args):\n index = 1\n params = {'optimizer': 'rmsprop', 'hidden_nodes': [32, 64, 64, 128, 128], 'learning_rate': 1e-3,\n 'iternum': 100000, 'thread': 0, 'batchsize': 50, 'cl': 0.05}\n while (index < len(args)):\n if args[index] in {'--optimizer'}:\n params[args[index][2:]] = args[index + 1]\n elif args[index] in {'--hidden_nodes'}:\n params[args[index][2:]] = [int(x) for x in args[index + 1][1:-1].split(',')]\n elif args[index] in {'--dropout_rate'}:\n params[args[index][2:]] = [float(x) for x in args[index + 1][1:-1].split(',')]\n elif args[index] in {'--learning_rate'}:\n params[args[index][2:]] = float(args[index + 1])\n elif args[index] in {'--iternum', '--thread', '--batchsize'}:\n params[args[index][2:]] = int(args[index + 1])\n else:\n print('unknown option: %s' % args[index])\n return\n index += 2\n\n print('params: ' + ' '.join(('--%s %s' % item) for item in params.items()))\n train(params)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"zhangyuc/randomized-discretization","sub_path":"nips-challenge/ci_cifar10.py","file_name":"ci_cifar10.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"27026154334","text":"\"\"\"Imports new redirects from a CSV file into DynamoDB.\"\"\"\nimport csv\nimport os\nimport sys\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom redirect_utils import str2bool\n\n\n\"\"\"\nThis function receives a trigger from S3 that a new CSV file is available in the S3\nbucker. It then retrieves the CSV file and imports the records into the DynamoDB\nredirects table.\n\nConfigure these environment variables in your Lambda environment:\n1. DYNAMO_DB_ARN - The ARN of the DynamoDB instance holding the redirect data\n2. DYNAMO_DB_TABLE - The name of the DynamoDB table\n3. S3_BUCKET_ARN - The ARN of the S3 bucket that will be triggering the function\n4. DEBUG (Optional) - Set to \"True\" if you want debug info printed to CloudWatch\n\n\"\"\"\n\nDYNAMO_DB_ARN = os.environ[\"DYNAMO_DB_ARN\"]\nDYNAMO_DB_TABLE = os.environ[\"DYNAMO_DB_TABLE\"]\nS3_BUCKET_ARN = os.environ[\"S3_BUCKET_ARN\"]\nFIELDNAMES = (\"site\", \"from_uri\", \"redirect_to\")\n\nif \"DEBUG\" in os.environ:\n DEBUG = str2bool(os.environ[\"DEBUG\"])\nelse:\n DEBUG = False\n\ntry:\n ddb = boto3.client(\"dynamodb\")\nexcept Exception as e:\n print(\"ERROR: failed to connect to DynamoDB\")\n sys.exit(1)\n\ntry:\n s3 = boto3.client(\"s3\")\nexcept Exception as e:\n print(\"ERROR: failed to connect to S3\")\n sys.exit(1)\n\n\ndef readCsv(csvData=None):\n \"\"\"Read CSV Data.\"\"\"\n print(\"===== BEGIN readCsv() =====\")\n if not csvData:\n print(\"ERROR: NO CSV DATA\")\n return False\n csvReader = csv.DictReader(csvData, fieldnames=FIELDNAMES)\n injectResults = []\n for row in csvReader:\n injectResults.append(injectRecord(row))\n print(\"===== END readCsv() =====\")\n return injectResults\n\n\ndef injectRecord(row=[]):\n \"\"\"Inject a record into DynamoDB.\"\"\"\n if DEBUG:\n print(\"===== injectRecord() DEBUG BEGIN =====\")\n print(\"site: %s\" % row[\"site\"])\n print(\"from_uri: %s\" % row[\"from_uri\"])\n print(\"redirect_to: %s\" % row[\"redirect_to\"])\n from_uri_sanitized = row[\"from_uri\"].rstrip(\"/\")\n try:\n update_response = ddb.update_item(\n TableName=DYNAMO_DB_TABLE,\n Key={\n \"Site\": {\"S\": (\"%s\" % row[\"site\"])},\n \"URI\": {\"S\": (\"%s\" % from_uri_sanitized)},\n },\n UpdateExpression=\"SET RedirectLocation = :l\",\n ExpressionAttributeValues={\":l\": {\"S\": (\"%s\" % row[\"redirect_to\"])},},\n )\n except ClientError as e:\n if DEBUG:\n print(\"injectRecord ERROR: %s\" % e.response[\"Error\"][\"Message\"])\n return False\n else:\n if DEBUG:\n print(\"'response' from DynamoDB: %s\" % update_response)\n return update_response\n\n\ndef importFile(record=None):\n \"\"\"Import file into DynamoDB.\n\n Keyword Arguments:\n record {obj} -- Record of the S3 file to import (default: {None})\n\n Returns:\n [type] -- [description]\n \"\"\"\n s3Bucket = record[\"bucket\"]\n s3ObjectKey = record[\"object\"][\"key\"]\n importResult = None\n if DEBUG:\n print(\"===== BEGIN importFile() =====\")\n print(\"S3 Bucket: %s\" % s3Bucket)\n print(\"S3 ObjectKey: %s\" % s3ObjectKey)\n if s3Bucket[\"arn\"] != S3_BUCKET_ARN:\n print(\"SKIPPING %s! WRONG S3 BUCKET!!!\" % s3ObjectKey)\n print(\"===== ABORT importFile() =====\")\n return False\n try:\n s3File = s3.get_object(Bucket=s3Bucket[\"name\"], Key=s3ObjectKey)\n except Exception as e:\n print(\"ERROR IMPORTING %s: %s\" % (s3ObjectKey, e))\n print(\"===== ABORT importFile() =====\")\n return False\n\n # csvRaw = s3File[\"Body\"].read().decode()\n importResult = readCsv(s3File[\"Body\"].read().decode(\"utf-8\").split(\"\\n\"))\n if DEBUG:\n print(\"===== END importFile() =====\")\n\n return importResult\n\n\ndef lambda_handler(event, context):\n \"\"\"Run job when invoked by Lambda.\n\n Arguments:\n event {obj} -- event that was invoked\n context {obj} -- context of event that was invoked\n\n \"\"\"\n if DEBUG:\n print(\"===== BEGIN lambda_handler() =====\")\n print(\"event['Records']: %s\" % event[\"Records\"])\n finalResult = []\n filesProcessed = []\n s3Records = event[\"Records\"]\n for record in s3Records:\n filesProcessed.append(\n \"%s/%s\" % (record[\"s3\"][\"bucket\"][\"arn\"], record[\"s3\"][\"object\"][\"key\"])\n )\n finalResult = finalResult + importFile(record[\"s3\"])\n if DEBUG:\n print(\"FINAL RESULT: %s\" % finalResult)\n print(\"NUM OF IMPORTED RECORDS: %s\" % len(finalResult))\n print(\"===== END lambda_handler() =====\")\n finalMsg = {\n \"NumRecordsImported\": len(finalResult),\n \"FilesProcessed\": filesProcessed,\n }\n return finalMsg\n","repo_name":"javiergayala/lambda-redirector","sub_path":"lambda_redirect_importer.py","file_name":"lambda_redirect_importer.py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"14581840425","text":"import logging\nfrom abc import ABC, abstractmethod\nfrom statistics import mean\nfrom threading import Thread\nfrom time import sleep\n\nfrom exceptions.sensor_exception import SensorException\nfrom health_check.health_check_file_manager import register_success_for_class_into_health_check_file\n\n\nclass Sensor(ABC):\n \"\"\"Base class for sensors\"\"\"\n\n SECONDS_BETWEEN_READINGS = 10\n\n def __init__(self):\n self.readings = []\n self.getting_readings = False\n\n thread = Thread(target=self.add_value_to_readings)\n thread.start()\n\n def add_value_to_readings(self):\n while self.get_true():\n try:\n sensor_name = self.__class__.__name__\n\n if self.getting_readings:\n return\n\n reading = self.get_reading()\n self.readings.append(reading)\n logging.debug(msg=f'[{sensor_name}] Obtained \"{reading}\".')\n except Exception:\n logging.exception(f'[{sensor_name}] Error while reading.')\n finally:\n sleep(self.SECONDS_BETWEEN_READINGS)\n\n @staticmethod\n def get_true():\n # Stupid method for unit tests purposes to avoid infinite loop\n return True\n\n @abstractmethod\n def get_reading(self):\n raise NotImplementedError('A sub-class must be implemented.')\n\n def get_readings_average(self):\n try:\n self.getting_readings = True\n sensor_name = self.__class__.__name__\n\n if len(self.readings) == 0 or all(x is None for x in self.readings):\n raise SensorException(class_name=sensor_name, message=f'The sensor \"{sensor_name}\" did not report any read.')\n\n logging.debug(msg=f'[{sensor_name}] Getting average from the values \"{self.readings}\"')\n average = self.get_average()\n register_success_for_class_into_health_check_file(class_name=sensor_name)\n\n return average\n finally:\n del self.readings[:]\n self.getting_readings = False\n\n def get_average(self):\n return [mean(data=row) for row in list(zip(*self.readings))]\n","repo_name":"weather-station-project/sensors-reader","sub_path":"WeatherStationSensorsReader/sensors/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"41235326072","text":"from PyQt5 import QtWidgets, QtCore, QtGui\nimport dirlist\n\n\nclass DirProc(QtWidgets.QFrame):\n\n def __init__(self, id_):\n super().__init__()\n\n self.layout = QtWidgets.QVBoxLayout()\n self.dir_choose_layout = QtWidgets.QHBoxLayout()\n self.dir_label = QtWidgets.QLabel('Choose directory')\n self.dir_choose_button = QtWidgets.QPushButton(\"...\")\n self.dir_list = dirlist.DirList(id_)\n self.cur_dir = '/home/alexander'\n\n self.make_ui()\n self.connect_signals()\n self.connect_slots()\n\n def resizeEvent(self, event):\n width = event.size().width()\n self.dir_label.setFixedWidth(width * 0.9)\n\n def make_ui(self):\n self.layout.setAlignment(QtCore.Qt.AlignTop)\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.dir_choose_layout.addWidget(self.dir_label)\n self.dir_choose_layout.addWidget(self.dir_choose_button)\n self.dir_choose_button.setFixedWidth(25)\n self.dir_choose_button.setCursor(QtCore.Qt.PointingHandCursor)\n self.layout.addLayout(self.dir_choose_layout)\n self.layout.addWidget(self.dir_list)\n self.setLayout(self.layout)\n\n def connect_signals(self):\n self.dir_choose_button.clicked.connect(self.on_dir_button_click)\n\n def connect_slots(self):\n pass\n\n def on_dir_button_click(self):\n dirname = QtWidgets.QFileDialog.getExistingDirectoryUrl(\n self, \"Choose directory\", QtCore.QUrl(\"/home/alexander\"))\n abs_path = QtCore.QUrl.toString(dirname)\n\n if abs_path:\n self.cur_dir = abs_path\n formatted = abs_path.split(\"file://\")[1]\n self.dir_label.setText(self.pretty_label(formatted))\n self.dir_list.set_base_and_cur_dirs(formatted)\n\n @staticmethod\n def pretty_label(string):\n pieces = string.split('/')\n max_ = max(pieces, key=len)\n idx = pieces.index(max_)\n pieces[idx] = max_[0: 2] + \"...\" + max_[-3: -1]\n return '/'.join(pieces)\n","repo_name":"alshitov/laboratories","sub_path":"DirSynchro/dirproc.py","file_name":"dirproc.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"4853490457","text":"class Solution(object):\n def minWindow(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n if s is None or t is None or len(s) < len(t):\n return ''\n\n count_set = dict()\n j = 0\n start = 0\n end = float('inf')\n remains = len(t)\n\n for c in t:\n count_set[c] = count_set.get(c, 0) + 1\n\n for i in range(len(s)):\n while(j < len(s) and remains > 0):\n if s[j] in count_set:\n count_set[s[j]] -= 1\n if count_set[s[j]] >= 0:\n remains -= 1\n j += 1\n \n if (remains == 0 and j - i < end - start):\n start, end = i, j\n\n if s[i] in count_set:\n count_set[s[i]] += 1\n if count_set[s[i]] > 0:\n remains += 1\n\n return '' if end == float('inf') else s[start: end]\n","repo_name":"geemaple/leetcode","sub_path":"leetcode/76.minimum-window-substring.py","file_name":"76.minimum-window-substring.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":187,"dataset":"github-code","pt":"55"} +{"seq_id":"4209765263","text":"while 1:\r\n from random import randint\r\n x = input(\"make your move: \")\r\n if x == \"rock\":\r\n x = 0\r\n if x == \"paper\":\r\n x = 1\r\n if x == \"scissors\":\r\n x = 2\r\n y = randint (0,2)\r\n print (f\"pc move{y}\")\r\n if y == 0:\r\n print(\"rock\")\r\n if y == 1:\r\n print(\"paper\")\r\n if y == 2:\r\n print(\"scissors\")\r\n if x == y:\r\n print(\"draw\")\r\n if x == 1 and y == 0:\r\n print(\"you win\")\r\n if x == 1 and y == 2:\r\n print(\"you lose\")\r\n if x == 2 and y == 0:\r\n print(\"you lose\")\r\n if x == 0 and y == 1:\r\n print(\"you lose\")\r\n if x == 2 and y == 1:\r\n print(\"you win\")\r\n if x == 0 and y == 2:\r\n print(\"you win\") \r\n \r\n print(\"#####################################################\")\r\n","repo_name":"amir-omidi/rock-paper-scissors","sub_path":"rock-paper-scissors.py","file_name":"rock-paper-scissors.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"7905031837","text":"\n\"\"\" \n @project LeManchot-Analysis : Multi-Modal Texture Analysis to Enhance Drone-based Thermographic Inspection of Structures\n @organization Laval University\n @lab MiViM Lab\n @supervisor Professor Xavier Maldague\n @industrial-partner TORNGATS\n\"\"\"\n\nfrom torch import stack as torch_stack\nfrom torch.utils.data import Dataset, WeightedRandomSampler\n\ndef generate_weighted_sampler(\n dataset: Dataset, w_type: str = \"squared\", replacement: bool = True\n) -> WeightedRandomSampler:\n \"\"\"\n Function to auto generate a weighted random sampler for a \n specific dataset. Dataset statistics are computed at runtime \n which can be very long procedure.\n\n Args:\n dataset (Dataset): Dataset object to balance\n w_type (str, optional): Type of weighting to apply to each sample. \n Defaults to \"squared\". Any other option will be\n a linear weighting.\n replacement (bool, optional): if ``True``, samples are drawn with replacement.\n If not, they are drawn without replacement, which means that when a sample index \n is drawn for a row, it cannot be drawn again for that row. Defaults to True.\n\n Returns:\n WeightedRandomSampler\n \"\"\"\n scale = 2 if w_type == \"squared\" else 1\n sum = 0.0\n ind_weight = list()\n for idx in range(len(dataset)):\n sample = dataset[idx]\n ind_weight.append(sample.sum(dim=(-2, -1)))\n sum += ind_weight[-1]\n\n sum = sum.reciprocal() ** scale\n ind_weight = torch_stack(ind_weight, dim=0).mul(sum)\n return WeightedRandomSampler(\n weights=ind_weight.sum(dim=1).tolist(), num_samples=len(dataset), replacement=replacement\n )","repo_name":"alvinxds/lemanchot-analysis","sub_path":"lemanchot/dataset/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"55"} +{"seq_id":"17863263883","text":"'''\nLibrary for reflectivity calculations with neutrons.\n'''\nfrom numpy import *\nfrom . import math_utils as mu\nfrom . import int_lay_xmean\nfrom functools import reduce\nfrom genx.core.custom_logging import iprint\n\n\nCTYPE=complex128\n\ndef ass_X_2int(k_mj, k_mj1, k_pj, k_pj1, theta_diff):\n costd=cos(theta_diff/2.0)\n sintd=sin(theta_diff/2.0)\n X=zeros((4, 4)+k_pj.shape, dtype=CTYPE)\n X[0, 0]=costd*(k_pj1+k_pj)/2./k_pj\n X[0, 1]=-costd*(k_pj1-k_pj)/2./k_pj\n X[0, 2]=sintd*(k_pj+k_mj1)/2./k_pj\n X[0, 3]=sintd*(k_pj-k_mj1)/2./k_pj\n # X[0] = X[0]/2/k_pj\n X[1, 0]=X[0, 1] # -(costd*(k_pj1 - k_pj))/(2*k_pj)\n X[1, 1]=X[0, 0] # (costd*(k_pj1 + k_pj))/(2*k_pj)\n X[1, 2]=X[0, 3] # (sintd*(k_pj - k_mj1))/(2*k_pj)\n X[1, 3]=X[0, 2] # (sintd*(k_pj + k_mj1))/(2*k_pj)\n X[2, 0]=-(sintd*(k_pj1+k_mj))/(2.*k_mj)\n X[2, 1]=(sintd*(k_pj1-k_mj))/(2.*k_mj)\n X[2, 2]=(costd*(k_mj1+k_mj))/(2.*k_mj)\n X[2, 3]=-(costd*(k_mj1-k_mj))/(2.*k_mj)\n X[3, 0]=X[2, 1] # (sintd*(k_pj1 - k_mj))/(2*k_mj)\n X[3, 1]=X[2, 0] # -(sintd*(k_pj1 + k_mj))/(2*k_mj)\n X[3, 2]=X[2, 3] # -(costd*(k_mj1 - k_mj))/(2*k_mj)\n X[3, 3]=X[2, 2] # (costd*(k_mj1 + k_mj))/(2*k_mj)\n return X\n\ndef ass_X(k_p, k_m, theta_diff):\n ''' Make the interface transmission matrix for neutron reflection from \n a interface.\n '''\n # First is the substrate and last is the ambient (j=0)\n # The order is then [N .. j+1, j, ....0]\n k_pj1=k_p[:, :-1]\n k_pj=k_p[:, 1:]\n k_mj1=k_m[:, :-1]\n k_mj=k_m[:, 1:]\n return ass_X_2int(k_mj, k_mj1, k_pj, k_pj1, theta_diff)\n\ndef gauss(q, sigma2):\n '''Fourier transform of the interface roughness weight function\n '''\n return exp(-q**2*sigma2/2.0)\n\ndef include_sigma(X, k_p, k_m, sigma, w=gauss):\n '''Function to include roughness into the interface matrix.\n '''\n sigma2=sigma[..., :-1]**2\n k_pj1=k_p[:, :-1]\n k_pj=k_p[:, 1:]\n k_mj1=k_m[:, :-1]\n k_mj=k_m[:, 1:]\n X[0, 0]=X[0, 0]*w(k_pj-k_pj1, sigma2)\n X[0, 1]=X[0, 1]*w(k_pj+k_pj1, sigma2)\n X[0, 2]=X[0, 2]*w(k_pj-k_mj1, sigma2)\n X[0, 3]=X[0, 3]*w(k_pj+k_mj1, sigma2)\n X[1, 0]=X[0, 1] # X[1,0]*w(k_pj + k_pj1, sigma2)\n X[1, 1]=X[0, 0] # X[1,1]*w(k_pj - k_pj1, sigma2)\n X[1, 2]=X[0, 3] # X[1,2]*w(k_pj + k_mj1, sigma2)\n X[1, 3]=X[0, 2] # X[1,3]*w(k_pj - k_mj1, sigma2)\n X[2, 0]=X[2, 0]*w(k_mj-k_pj1, sigma2)\n X[2, 1]=X[2, 1]*w(k_mj+k_pj1, sigma2)\n X[2, 2]=X[2, 2]*w(k_mj-k_mj1, sigma2)\n X[2, 3]=X[2, 3]*w(k_mj+k_mj1, sigma2)\n X[3, 0]=X[2, 1] # X[3,0]*w(k_mj + k_pj1, sigma)\n X[3, 1]=X[2, 0] # X[3,1]*w(k_mj - k_pj1, sigma)\n X[3, 2]=X[2, 3] # X[3,2]*w(k_mj + k_mj1, sigma)\n X[3, 3]=X[2, 2] # X[3,3]*w(k_mj - k_mj1, sigma)\n return X\n\ndef ass_P(k_p, k_m, d):\n ''' Make the layer proagation matrix for a layer.\n '''\n P=zeros((4, 4)+k_p.shape, dtype=CTYPE)\n P[0, 0]=exp(-1.0J*k_p*d)\n P[1, 1]=1/P[0, 0] # exp(1.0J*k_p*d)\n P[2, 2]=exp(-1.0J*k_m*d)\n P[3, 3]=1/P[2, 2] # exp(1.0J*k_m*d)\n return P\n\ndef Refl(Q, Vp, Vm, d, M_ang, sigma=None, return_int=True):\n '''A quicker implementation than the ordinary slow implementaion in Refl\n Calculates spin-polarized reflectivity according to S.J. Blundell \n and J.A.C. Bland Phys rev. B. vol 46 3391 (1992)\n The algorithm assumes that the first element in the arrays represents\n the substrate and the last the ambient layer.\n Input parameters: Q : Scattering vector in reciprocal \n angstroms Q=4*pi/lambda *sin(theta)\n Vp: Neutron potential for spin up\n Vm: Neutron potential for spin down\n d: layer thickness\n M_ang: Angle of the magnetic \n moment(radians!) M_ang=0 =>M//neutron spin\n sigma: The roughness of the upper interface.\n return_int: Flag for returning the instensity, default=True. If False return the amplitudes.\n Returns: (Ruu,Rdd,Rud,Rdu)\n (up-up,down-down,up-down,down-up)\n '''\n # Assume first element=substrate and last=ambient!\n k_amb=Q[:, newaxis]/2.0\n Vp=Vp.astype(complex128);Vm=Vm.astype(complex128)\n if M_ang[-1]!=0:\n raise ValueError(\"The magnetization in the ambient layer has to be in polarization direction\")\n if Vp[-1]!=0 or Vm[-1]!=0:\n # Ambient not vacuum\n raise ValueError(\"The SLD in the ambient layer has to be zero, apply renormalization first\")\n if len(Vp)==2:\n # Algorithm breaks without a layer, so add an empty one\n Vp=hstack([Vp, [Vp[-1]]])\n Vm=hstack([Vm, [Vm[-1]]])\n M_ang=array([M_ang[0], 0., 0.], dtype=float64)\n d=array([d[0], 10., d[1]], dtype=float64)\n if sigma is not None:\n sigma=array([sigma[0], sigma[0], sigma[1]], dtype=float64)\n # Wavevectors in the layers\n k_p=sqrt(k_amb**2-Vp)\n k_m=sqrt(k_amb**2-Vm)\n # Angular difference between the magnetization\n theta_diff=M_ang[1:]-M_ang[:-1]\n # if sigma is None:\n # sigma = zeros(d.shape)\n # Assemble the interface reflectivity matrix\n X=ass_X(k_p, k_m, theta_diff)\n if sigma is not None:\n X=include_sigma(X, k_p, k_m, sigma)\n # Assemble the layer propagation matrices\n P=ass_P(k_p, k_m, d)\n # Multiply the propagation matrices with the interface matrix\n PX=mu.dot4_Adiag(P[..., 1:-1], X[..., :-1])\n # Multiply up the sample matrix\n # print 'X: ', X[:,:, 0, -1]\n M=mu.dot4(X[..., -1], reduce(mu.dot4, rollaxis(PX, 3)[::-1]))\n # print M.shape\n # print 'M: ', M[:,:, 0]\n # print 'denom: ', M[0,0]*M[2,2]-M[0,2]*M[2,0]\n denom=M[0, 0]*M[2, 2]-M[0, 2]*M[2, 0]\n Ruu=(M[1, 0]*M[2, 2]-M[1, 2]*M[2, 0])/denom\n Rud=(M[3, 0]*M[2, 2]-M[3, 2]*M[2, 0])/denom\n Rdu=(M[1, 2]*M[0, 0]-M[1, 0]*M[0, 2])/denom\n Rdd=(M[3, 2]*M[0, 0]-M[3, 0]*M[0, 2])/denom\n\n if return_int:\n return abs(Ruu)**2, abs(Rdd)**2, abs(Rud)**2, abs(Rdu)**2\n else:\n return Ruu, Rdd, Rud, Rdu\n\ndef Refl_int_lay(Q, V0, Vmag, d, M_ang, sigma, dmag_u, dd_u, M_ang_u, sigma_u,\n dmag_l, dd_l, M_ang_l, sigma_l, return_int=True):\n '''A quicker implementation than the ordinary slow implementaion in Refl\n Calculates spin-polarized reflectivity according to S.J. Blundell\n and J.A.C. Bland Phys rev. B. vol 46 3391 (1992)\n The algorithm assumes that the last element in the arrays represents\n the substrate and the first the ambient layer.\n Input parameters: Q : Scattering vector in reciprocal\n angstroms Q=4*pi/lambda *sin(theta)\n Vp: Neutron potential for spin up\n Vm: Neutron potential for spin down\n d: layer thickness\n M_ang: Angle of the magnetic\n moment(radians!) M_ang=0 =>M//nuetron spin\n sigma: The roughness of the upper interface.\n The subscript l and u denotes the lower and upper interface\n , respectively.\n Returns: (Ruu,Rdd,Rud,Rdu)\n (up-up,down-down,up-down,down-up)\n '''\n Vp=V0+Vmag\n Vm=V0-Vmag\n Vp_u=V0+Vmag*(1.+dmag_u)\n Vm_u=V0-Vmag*(1.+dmag_u)\n Vp_l=V0+Vmag*(1.+dmag_l)\n Vm_l=V0-Vmag*(1.+dmag_l)\n # Assume last element=substrate and first=ambient!\n k_amb=Q[:, newaxis]/2.0\n # Wavevectors in the layers\n kp=sqrt(k_amb**2-Vp).astype(complex128)\n km=sqrt(k_amb**2-Vm).astype(complex128)\n kp_u=sqrt(k_amb**2-Vp_u).astype(complex128)\n km_u=sqrt(k_amb**2-Vm_u).astype(complex128)\n kp_l=sqrt(k_amb**2-Vp_l).astype(complex128)\n km_l=sqrt(k_amb**2-Vm_l).astype(complex128)\n # Angular difference between the magnetization\n theta_diff=M_ang[:-1]-M_ang[1:]\n theta_diff_lu=M_ang_l[:-1]-M_ang_u[1:]\n theta_diff_l=M_ang[:-1]-M_ang_l[:-1]\n theta_diff_u=M_ang_u[1:]-M_ang[1:]\n\n X_lu=ass_X_2int(km_l[:, :-1], km_u[:, 1:], kp_l[:, :-1], kp_u[:, 1:], theta_diff_lu)\n X_l=ass_X_2int(km[:, :-1], km_l[:, :-1], kp[:, :-1], kp_l[:, :-1], theta_diff_l)\n X_u=ass_X_2int(km_u[:, 1:], km[:, 1:], kp_u[:, 1:], kp[:, 1:], theta_diff_u)\n\n X=int_lay_xmean.calc_neu_Xmean(X_l, X_lu, X_u, km, kp, km_l, kp_l, km_u, kp_u, dd_u, dd_l, sigma, sigma_l, sigma_u)\n\n # Assemble the layer propagation matrices\n P=ass_P(kp, km, d-dd_u-dd_l)\n # Multiply the propagation matrices with the interface matrix\n PX=mu.dot4_Adiag(P[..., 1:-1], X[..., 1:])\n # Multiply up the sample matrix\n M=mu.dot4(X[..., 0], reduce(mu.dot4, rollaxis(PX, 3)))\n # print M.shape\n denom=M[0, 0]*M[2, 2]-M[0, 2]*M[2, 0]\n Ruu=(M[1, 0]*M[2, 2]-M[1, 2]*M[2, 0])/denom\n Rud=(M[3, 0]*M[2, 2]-M[3, 2]*M[2, 0])/denom\n Rdu=(M[1, 2]*M[0, 0]-M[1, 0]*M[0, 2])/denom\n Rdd=(M[3, 2]*M[0, 0]-M[3, 0]*M[0, 2])/denom\n\n if return_int:\n return abs(Ruu)**2, abs(Rdd)**2, abs(Rud)**2, abs(Rdu)**2\n else:\n return Ruu, Rdd, Rud, Rdu\n\nfrom . import USE_NUMBA\n\nif USE_NUMBA:\n # try to use numba to speed up the calculation intensive functions:\n try:\n from .neutron_numba import Refl\n except Exception as e:\n iprint('Could not use numba, no speed up from JIT compiler:\\n'+str(e))\n","repo_name":"aglavic/genx","sub_path":"genx/genx/models/lib/neutron_refl.py","file_name":"neutron_refl.py","file_ext":"py","file_size_in_byte":9420,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"55"} +{"seq_id":"16119256124","text":"import math\nimport cmath\n\nfrom numpy import fft\n\nfrom SignalIntegrity.Lib.Exception import SignalIntegrityExceptionWaveform\nfrom SignalIntegrity.Lib.FrequencyDomain.FrequencyDomain import FrequencyDomain\nfrom SignalIntegrity.Lib.TimeDomain.Waveform.Waveform import Waveform\nfrom SignalIntegrity.Lib.TimeDomain.Waveform.SineWaveform import SineWaveform\nfrom SignalIntegrity.Lib.ChirpZTransform.ChirpZTransform import CZT\nfrom SignalIntegrity.Lib.TimeDomain.Waveform.TimeDescriptor import TimeDescriptor\n\nclass FrequencyContent(FrequencyDomain):\n \"\"\"Handles frequency content of waveforms. \n This is the frequency content view of a waveform. In other words, it assumes that a waveform is an actual waveform and\n contains the complex values of sinusoids that, if added together, would make up the waveform. This is the\n opposite of the FrequencyResponse() view.\n @see FrequencyResponse\n \"\"\"\n R=50.0\n P=1e-3\n LogRP10=10.*math.log10(R*P)\n dB3=20*math.log10(math.sqrt(2))\n dB6=20*math.log10(0.5)\n def __init__(self,wf,fd=None):\n \"\"\"Constructor\n @param wf in instance of class Waveform\n @param fd (optional) an instance of class FrequencyList (defaults to None)\n @remark initializes itself internally by computing the frequency content of the waveform.\n\n If fd is None then the frequency descriptor is simply the frequency descriptor corresponding to the time\n descriptor of the waveform and the frequency content is computed from the DFT.\n\n Otherwise, the CZT is used to compute the frequency content and the time descriptor corresponds to the\n frequency descriptor.\n\n the time descriptor and frequency descriptor are retained so a waveform can be obtained from the frequency content.\n\n @note the frequency content is scaled differently from the raw DFT or CZT outputs in that the absolute value of each\n complex number in the frequency content represents the amplitude of a cosine wave. This is not true with the raw\n DFT output and scaling things this way helps in the proper interpretation of the frequency content without having\n to think about the vagaries of the DFT.\n\n @see TimeDescriptor\n @see FrequencyList\n @see ChirpZTransform\n \"\"\"\n td=wf.td\n if fd is None:\n X=fft.fft(wf.Values())\n K=int(td.K)\n Keven=(K//2)*2 == K\n fd=td.FrequencyList()\n else:\n # pragma: silent exclude\n if not fd.EvenlySpaced():\n raise SignalIntegrityExceptionWaveform('cannot generate frequency content')\n # pragma: include\n K=fd.N*2\n Keven=True\n X=CZT(wf.Values(),td.Fs,0,fd.Fe,fd.N,True)\n td=TimeDescriptor(td.H,fd.N*2,fd.Fe*2.)\n FrequencyDomain.__init__(self,fd,[X[n]/K*\\\n (1. if (n==0 or ((n==fd.N) and Keven)) else 2.)*\\\n cmath.exp(-1j*2.*math.pi*fd[n]*td.H) for n in range(fd.N+1)])\n self.td=td\n def Values(self,unit=None):\n \"\"\"frequency content values\n @param unit (optional) string containing the unit for the values desired.\n @return a list of complex values representing the frequency content.\n @remark\n Valid frequency content units are:\\n\n - 'rms' - the root-mean-squared (rms) value.\n - 'dBm' - the values in decibels were 0 dBm corresponds to the voltage needed to deliver\n 1 mW to a 50 ohm load. It's computed as 20*Log(rms)+13.010.\n - 'dBmPerHz' - the spectral density in dBm/Hz.\n\n If no unit is specified, the complex frequency content is returned.\n If no valid frequency content units are found, then it defers to the FrequencyDomain base class.\n\n @see FrequencyDomain.\n \"\"\"\n if unit=='rms':\n Keven=(self.td.K/2)*2==self.td.K\n A=FrequencyDomain.Values(self,'mag')\n return [A[n]/(1 if (n==0 or ((n==self.m_f.N) and Keven))\n else math.sqrt(2)) for n in range(len(A))]\n elif unit=='dBm':\n return [-3000. if r < 1e-15 else 20.*math.log10(r)-self.LogRP10\n for r in self.Values('rms')]\n elif unit=='dBmPerHz':\n Keven=(self.td.K/2)*2==self.td.K\n Deltaf=self.m_f.Fe/self.m_f.N\n adder=-10*math.log10(Deltaf)\n dBm=self.Values('dBm')\n return [dBm[n]+adder+\n (self.dB3 if (n==0 or ((n==self.m_f.N) and Keven))\n else 0) for n in range(len(dBm))]\n else: return FrequencyDomain.Values(self,unit)\n def Waveform(self,td=None):\n \"\"\"Computes the time-domain waveform using IDFT methods\n @param td (optional) instance of class TimeDescriptor declaring the time descriptor of the waveform to produce.\n @return wf instance of class Waveform corresponding to the frequency content.\n @note\n If td is None then the time descriptor corresponding to the frequency descriptor is used.\\n\n The waveform produced is essentially the inverse process of class initialization.\\n\n @see WaveformFromDefinition()\n \"\"\"\n Keven=(self.td.K//2)*2==self.td.K\n X=self.Values()\n X=[X[n]*self.td.K*\\\n (1. if (n==0 or ((n==self.m_f.N) and Keven)) else 0.5)*\\\n cmath.exp(1j*2.*math.pi*self.m_f[n]*self.td.H)\n for n in range(self.m_f.N+1)]\n if Keven:\n X2=[X[self.m_f.N-n].conjugate() for n in range(1,self.m_f.N)]\n else:\n X2=[X[self.m_f.N-n+1].conjugate() for n in range(1,self.m_f.N+1)]\n X.extend(X2)\n x=[xk.real for xk in fft.ifft(X).tolist()]\n wf=Waveform(self.td,x)\n if not td is None:\n wf=wf.Adapt(td)\n return wf\n def WaveformFromDefinition(self,td=None):\n \"\"\"Computes the time-domain waveform using sums of cosines\n @param td instance of class TimeDescriptor declaring the time descriptor of the waveform to produce.\n @return wf instance of class Waveform corresponding to the frequency content.\n @note\n If td is None then the time descriptor corresponding to the frequency descriptor is used.\\n\n The waveform produced is essentially the inverse process of __init__().\\n\n This function should produce the exact same result as the Waveform() method, and is slow, but clearly\n written out to see how the waveform is produced by summing sinusoids. It used to essentially document\n the class.\\n\n @see Waveform().\n \"\"\"\n absX=self.Values('mag')\n theta=self.Values('deg')\n wf=Waveform(self.td)\n for n in range(self.m_f.N+1):\n wf=wf+SineWaveform(self.td,Frequency=self.m_f[n],\n Amplitude=absX[n],Phase=theta[n]+90)\n if not td is None:\n wf=wf.Adapt(td)\n return wf\n","repo_name":"TeledyneLeCroy/SignalIntegrity","sub_path":"SignalIntegrity/Lib/FrequencyDomain/FrequencyContent.py","file_name":"FrequencyContent.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"55"} +{"seq_id":"18438520275","text":"import asyncio\nimport configparser\nimport datetime\nimport re\nfrom datetime import datetime\nfrom typing import *\n\nimport aiomysql\nimport math\nimport pandas as pd\nfrom pykrx import stock\nimport time\n\nfrom calculate_target_point.load_stock_to_buy import load_stock_to_buy_for_simulation\n\nconfig = configparser.ConfigParser()\nconfig.read(['../db_conf.ini', '/var/jenkins_home/python-script/db_conf.ini'])\ndb_config: MutableMapping[str, str] = dict(config['mysql'])\n\n\nasync def load_stocks_info_after_target_date(loop: asyncio.AbstractEventLoop, db_name: str, target_date: str, market_code_list: list) -> pd.DataFrame:\n assert len(market_code_list) > 0\n\n pool = await aiomysql.create_pool(host=db_config['host'], port=int(db_config['port']), user=db_config['user'], password=db_config['password'], db=db_config['db'], autocommit=True, loop=loop)\n code_list = ', '.join(f\"'{code}'\" for code in market_code_list)\n async with pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(f\"\"\"\n select \n *\n from {db_name}\n where\n market_date >= '{target_date}'\n and market_code in ({code_list})\n order by market_code, market_date\n \"\"\")\n result = await cur.fetchall()\n columns = [x[0] for x in cur.description]\n\n pool.close()\n\n df = pd.DataFrame(\n data=result,\n columns=columns\n )\n return df\n\n\nclass Simulator:\n def __init__(\n self,\n start_date: str,\n end_date: str,\n seed: int,\n profit_rate: float,\n stop_loss_rate: Optional[int],\n loss_method: Optional[str],\n max_holding_period: int,\n buy_point_to_open_price: float,\n additional_buy_point_to_price: float,\n price_db: str,\n target_db: str\n ):\n self.start_date = start_date\n self.end_date = end_date\n self.seed = seed\n self.account_balance = seed\n self.profit_rate = profit_rate\n self.stop_loss_rate = stop_loss_rate\n self.loss_method = loss_method\n self.max_holding_period = max_holding_period\n self.buy_point_to_open_price = buy_point_to_open_price\n self.additional_buy_point_to_price = additional_buy_point_to_price\n self.price_db = price_db\n self.target_db = target_db\n self.business_days = [str(date.date()) for date in (stock.get_previous_business_days(fromdate=re.sub('[^0-9]', '', self.start_date), todate=re.sub('[^0-9]', '', self.end_date)))]\n self._lv1_max_bucket_size = 9\n self._lv2_max_bucket_size = 8\n self._lv3_max_bucket_size = 4\n self._lv4_max_bucket_size = 2\n self._lv1_price_size = math.floor(self.seed * 0.03) # (9,8,4,2 0.03)\n self._lv2_price_size = self._lv1_price_size\n self._lv3_price_size = self._lv2_price_size * 2\n self._lv4_price_size = self._lv3_price_size * 2\n self._hold_stock_dict = dict()\n self._hold_stock_price_dict = dict()\n self._profit = 0\n self.result: Dict = {}\n\n self._init_result_dataframe()\n self._simulator()\n\n @staticmethod\n def _get_date_diff(start_date: str, end_date: str) -> int:\n return (datetime.strptime(start_date, '%Y-%m-%d') - datetime.strptime(end_date, '%Y-%m-%d')).days\n\n @staticmethod\n def _list_to_str(list_data: list) -> str:\n return ', '.join(list_data)\n\n def _search_stock_to_buy(self, target_date: str) -> list:\n loop = asyncio.get_event_loop()\n df = loop.run_until_complete(load_stock_to_buy_for_simulation(loop, self.target_db, target_date))\n target_code_list = df['market_code'].values.tolist()\n\n return target_code_list\n\n def _load_stocks_info(self, target_date: str, market_code_list: List[str]) -> pd.DataFrame:\n loop = asyncio.get_event_loop()\n df = loop.run_until_complete(load_stocks_info_after_target_date(loop, self.price_db, target_date, market_code_list))\n return df\n\n def _init_result_dataframe(self):\n for business_day in self.business_days:\n self.result.update({\n business_day: {\n '평가 금액': 0,\n '매수 종목': [],\n '추가 매수 종목': [],\n '이익 매도 종목': [],\n '기간 초과 매도 종목': [],\n '매도 이익': 0,\n '수익률': 0.0\n }\n })\n\n def _get_hold_days_from_last_buy(self, market_code: str, target_date: str):\n last_buy_date = self._hold_stock_dict[market_code]['매수일'] if self._hold_stock_dict[market_code]['최종_매수일'] is None else self._hold_stock_dict[market_code]['최종_매수일']\n return self._get_date_diff(last_buy_date, target_date)\n\n def _get_remain_bucket_size(self, level: int) -> int:\n current_lv1_size = 0\n current_lv2_size = 0\n current_lv3_size = 0\n current_lv4_size = 0\n\n for value in self._hold_stock_dict.values():\n if value['level'] == 1:\n current_lv1_size += 1\n elif value['level'] == 2:\n current_lv1_size += 1\n current_lv2_size += 1\n elif value['level'] == 3:\n current_lv1_size += 1\n current_lv2_size += 1\n current_lv3_size += 1\n elif value['level'] == 4:\n current_lv1_size += 1\n current_lv2_size += 1\n current_lv3_size += 1\n current_lv4_size += 1\n\n if level == 1:\n return self._lv1_max_bucket_size - current_lv1_size\n elif level == 2:\n return self._lv2_max_bucket_size - current_lv2_size\n elif level == 3:\n return self._lv3_max_bucket_size - current_lv3_size\n elif level == 4:\n return self._lv4_max_bucket_size - current_lv4_size\n else:\n return 0\n\n def _get_price_size_for_bucket(self, level: int) -> int:\n if level == 1:\n return self._lv1_price_size\n elif level == 2:\n return self._lv2_price_size\n elif level == 3:\n return self._lv3_price_size\n elif level == 4:\n return self._lv4_price_size\n else:\n return 0\n\n def _check_is_hold_stock(self, market_code: str) -> bool:\n if self._hold_stock_dict.get(market_code) is None:\n return False\n else:\n return True\n\n def _hold_stock_before_date(self, target_date) -> List[str]:\n target_code_list = []\n for code in self._hold_stock_dict.keys():\n if self._hold_stock_dict[code]['매수일'] <= target_date:\n target_code_list.append(code)\n\n return target_code_list\n\n def _buy(self, current_date: str, next_date: str) -> None:\n if self._get_remain_bucket_size(level=1) > 0:\n target_code_list = self._search_stock_to_buy(current_date)\n\n if len(target_code_list) > 0:\n df_stock_price_info = self._load_stocks_info(next_date, target_code_list).astype(str)\n\n for market_code in target_code_list:\n filtered_df = df_stock_price_info[(df_stock_price_info['market_code'] == market_code) & (df_stock_price_info['market_date'] == next_date)]\n if not filtered_df.empty:\n open_price = filtered_df['open_price']\n else:\n continue\n\n target_buy_price = math.floor(int(open_price) * (1 + self.buy_point_to_open_price))\n low_price = int(filtered_df['low_price'])\n\n if target_buy_price > 0 and target_buy_price >= low_price and self._get_remain_bucket_size(level=1) > 0 and self._check_is_hold_stock(market_code) is False:\n self._hold_stock_dict.update({\n market_code: {\n '매수일': next_date,\n '최종_매수일': None,\n '보유기간': 1,\n '평단가': target_buy_price,\n '보유개수': self._lv1_price_size // target_buy_price,\n '총금액': target_buy_price * (self._lv1_price_size // target_buy_price),\n 'level': 1\n }\n })\n self._hold_stock_price_dict.update({\n market_code: df_stock_price_info[df_stock_price_info['market_code'] == market_code]\n })\n self.result[next_date]['매수 종목'].append(market_code)\n\n def _additional_buy(self, current_date: str, next_date: str):\n hold_stock_code_list = self._hold_stock_before_date(current_date)\n target_code_list = []\n for code in hold_stock_code_list:\n if self._hold_stock_dict[code]['level'] <= 3:\n target_code_list.append(code)\n\n for market_code in target_code_list:\n df_hold_stock_price_info = self._hold_stock_price_dict[market_code]\n df_current_info = df_hold_stock_price_info[df_hold_stock_price_info['market_date'] == current_date]\n df_next_info = df_hold_stock_price_info[df_hold_stock_price_info['market_date'] == next_date]\n bucket_level = self._hold_stock_dict[market_code]['level']\n\n if df_current_info.empty or df_next_info.empty:\n continue\n\n if self._get_remain_bucket_size(level=bucket_level+1) > 0 and int(df_current_info['close_price']) <= self._hold_stock_dict[market_code]['평단가'] * (1 + self.additional_buy_point_to_price):\n next_open_price = int(df_next_info['open_price'])\n\n self._hold_stock_dict[market_code]['최종_매수일'] = next_date\n self._hold_stock_dict[market_code]['평단가'] = \\\n ((self._hold_stock_dict[market_code]['평단가'] * self._hold_stock_dict[market_code]['보유개수']) + ((self._get_price_size_for_bucket(level=bucket_level) // next_open_price) * next_open_price)) / \\\n (self._hold_stock_dict[market_code]['보유개수'] + self._get_price_size_for_bucket(level=bucket_level) // next_open_price)\n self._hold_stock_dict[market_code]['보유개수'] += self._get_price_size_for_bucket(level=bucket_level) // next_open_price\n self._hold_stock_dict[market_code]['총금액'] = self._hold_stock_dict[market_code]['평단가'] * self._hold_stock_dict[market_code]['보유개수']\n self._hold_stock_dict[market_code]['level'] += 1\n\n self.result[next_date]['추가 매수 종목'].append(market_code)\n\n def _sell(self, target_date):\n hold_stock_code_list = self._hold_stock_before_date(target_date)\n\n for market_code in hold_stock_code_list:\n df_hold_stock_price_info = self._hold_stock_price_dict[market_code]\n df_current_info = df_hold_stock_price_info[df_hold_stock_price_info['market_date'] == target_date]\n\n if not df_current_info.empty:\n high_price = int(df_current_info['high_price'])\n close_price = int(df_current_info['close_price'])\n target_sell_point = math.floor(self._hold_stock_dict[market_code]['평단가'] * (1 + self.profit_rate))\n\n if target_sell_point <= high_price:\n profit = (target_sell_point * self._hold_stock_dict[market_code]['보유개수']) - (self._hold_stock_dict[market_code]['총금액'])\n self._profit += profit\n self._hold_stock_dict.pop(market_code)\n self.result[target_date]['이익 매도 종목'].append(market_code)\n self.result[target_date]['매도 이익'] += profit\n self._hold_stock_price_dict.pop(market_code)\n continue\n\n if self._get_hold_days_from_last_buy(market_code, target_date) >= self.max_holding_period:\n profit = (close_price - self._hold_stock_dict[market_code]['평단가']) * self._hold_stock_dict[market_code]['보유개수']\n self._profit += profit\n self._hold_stock_dict.pop(market_code)\n self._hold_stock_price_dict.pop(market_code)\n self.result[target_date]['기간 초과 매도 종목'].append(market_code)\n self.result[target_date]['매도 이익'] += profit\n\n def _update_account_balance(self, target_date):\n hold_stock_code_list = self._hold_stock_before_date(target_date)\n\n close_price_sum = 0\n for market_code in hold_stock_code_list:\n df_hold_stock_price_info = self._hold_stock_price_dict[market_code]\n df_current_info = df_hold_stock_price_info[df_hold_stock_price_info['market_date'] == target_date]\n\n if not df_current_info.empty:\n close_price = int(df_current_info['close_price'])\n print('>> 보유종목 :', market_code, '\\t평단가 :', round(self._hold_stock_dict[market_code]['평단가']), '\\t보유개수 :', self._hold_stock_dict[market_code]['보유개수'], '\\t종가 :', close_price, '\\t\\tLV :', self._hold_stock_dict[market_code]['level'])\n close_price_sum += (close_price - self._hold_stock_dict[market_code]['평단가']) * self._hold_stock_dict[market_code]['보유개수']\n\n self.account_balance = self.seed + self._profit + close_price_sum\n self.result[target_date]['평가 금액'] = self.account_balance\n self.result[target_date]['수익률'] = round((self.account_balance - self.seed) / self.seed * 100, 3)\n\n def _simulator(self):\n for current_date, next_date in zip(self.business_days, self.business_days[1:]):\n print(current_date)\n # 매수\n self._buy(current_date, next_date)\n\n # 추가 매수\n self._additional_buy(current_date, next_date)\n\n # 매도\n self._sell(current_date)\n\n # 계좌 잔액 업데이트\n self._update_account_balance(current_date)\n\n def output(self):\n df = pd.DataFrame.from_dict(self.result, orient='index')\n df['매수 종목'] = df['매수 종목'].apply(lambda x: ', '.join(x))\n df['추가 매수 종목'] = df['추가 매수 종목'].apply(lambda x: ', '.join(x))\n df['이익 매도 종목'] = df['이익 매도 종목'].apply(lambda x: ', '.join(x))\n df['기간 초과 매도 종목'] = df['기간 초과 매도 종목'].apply(lambda x: ', '.join(x))\n\n df.reset_index(inplace=True)\n df = df.rename(columns={'index': '날짜'})\n return df[:-1]\n\n\nif __name__ == \"__main__\":\n s = Simulator(\n start_date='2022-01-01',\n end_date='2023-04-06',\n seed=100000000,\n profit_rate=0.15,\n stop_loss_rate=None,\n loss_method=None,\n max_holding_period=180,\n buy_point_to_open_price=-0.02,\n additional_buy_point_to_price=-0.20,\n price_db='stock_info_2022_2023',\n target_db='stock_info_2022_2023_bollinger_5'\n )\n s.output().to_excel('./result.xlsx', index=False)","repo_name":"bero2/Asset-Management-Service-Batch","sub_path":"simulator/stock_simulator.py","file_name":"stock_simulator.py","file_ext":"py","file_size_in_byte":15483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17056586584","text":"# array\r\narr=[]\r\narr.append(1)\r\narr.append(2)\r\narr.append(3)\r\narr.append(4)\r\n\r\n# make copy of the array\r\narr2 = arr.copy()\r\n\r\n# remove all elements from array\r\narr.clear()\r\n\r\n# return count of number of time that element is occuring in that array\r\nprint(arr2.count(1))\r\n\r\n# when you pass something in extend, it iterates over that object and then add the output to the list\r\narr.extend(\"apple\") # it will add all characters in the string and add to the list\r\narr.extend([1,2,3,4,5,6,7,8,9]) # it will add the elements of the list to that list\r\n\r\n# index returns the index at which the element is located in the list\r\nprint(arr.index(4))\r\n\r\n# insert will insert the element ant the given index (index first, element second)\r\narr.insert(3,51)\r\n\r\n# pop returns the last element and deletes it from the list\r\nprint(arr.pop())\r\n\r\n# remove removes that element. Enter that element not its index. It returns None\r\nprint(arr.remove(51))\r\n\r\n# reverse reverses the list. It also returns None\r\nprint(arr.reverse())\r\n\r\n# sort in ascending or increasing order. It works only when the elements are of the same type\r\narr2 = ['agdydg','uyfg','hweu','oiwqdj','uewfyg']\r\narr2.sort()\r\n\r\n\r\nprint(arr2)\r\n\r\nprint(arr)\r\nprint(arr2)\r\n","repo_name":"jayeshkrt/pythondatastructures","sub_path":"array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"613426404","text":"# to import libraries\r\n\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# to read data\r\ndataset = pd.read_excel('C:/--your file path here--/mock_database_mlgr.xlsx')\r\nprint(dataset.shape)\r\nprint(dataset.describe())\r\nprint(dataset.info())\r\nprint(dataset.head())\r\n\r\n# to fit binomial logistic regression - split into training and testing datasets\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split, GridSearchCV\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay, roc_curve, auc, roc_auc_score\r\n\r\nX = dataset[['fac_pre', 'fim_pre', 'nihss_pre']]\r\ny = dataset['fim_eff_cat']\r\nprint(X.shape)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)\r\nprint(X_train.shape, X_test.shape, y_train.shape, y_test.shape)\r\n\r\nscaler = StandardScaler()\r\nscaler.fit(X_train)\r\nX_train = scaler.transform(X_train)\r\nX_test = scaler.transform(X_test)\r\n\r\nLR = LogisticRegression(random_state=0).fit(X_train, y_train)\r\n\r\ny_train_pred = LR.predict(X_train)\r\ncm = confusion_matrix(y_true=y_train, y_pred=y_train_pred)\r\nprint(cm)\r\ncm_display = ConfusionMatrixDisplay(cm).plot()\r\n\r\n# to print model parameters\r\nprint('Coefficients:', LR.coef_)\r\nprint('Column names:', X.columns)\r\nprint(classification_report(y_train, y_train_pred))\r\n\r\n# to print model summary\r\nimport statsmodels.api as sm\r\nX_train=sm.add_constant(X_train)\r\nLR=sm.Logit(y_train, X_train)\r\nest=LR.fit()\r\nprint(est.summary())\r\n\r\n# to compute and display ROC curve\r\nfpr, tpr, _ = roc_curve(y_train, y_train_pred)\r\nroc_auc = auc(fpr, tpr)\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.figure()\r\nplt.plot(fpr, tpr, color=\"darkorange\", label=\"ROC curve (area = %0.02f)\" % roc_auc)\r\nplt.plot([0, 1], [0, 1], color=\"navy\", linestyle=\"--\")\r\nplt.xlim([0.0, 1.0])\r\nplt.ylim([0.0, 1.0])\r\nplt.xlabel(\"False Positive Rate\")\r\nplt.ylabel(\"True Positive Rate\")\r\nplt.title(\"Receiver operating characteristic curve\")\r\nplt.legend(loc=\"lower right\")\r\nplt.show()\r\n\r\n","repo_name":"AugustineJoshua/Python_For_Clinicians","sub_path":"MultipleLogisticRegression_SplitDataset/MultipleLogisticRegression_SplitDataset.py","file_name":"MultipleLogisticRegression_SplitDataset.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20370873768","text":"class Cline:\n \"\"\"\n Represents a line in the cache.\n \"\"\"\n\n def __init__(self, size):\n self.use = 0\n self.modified = 0\n self.valid = 0\n self.tag = 0\n self.size = size\n self.data = [0] * size\n\n def read(self):\n \"\"\"\n read from the cache line\n \"\"\"\n return self.data\n\n def write(self, new_data):\n \"\"\"\n write to the cache line\n \"\"\"\n if len(new_data) > self.size:\n raise IndexError\n self.modified = 1\n self.data = new_data\n","repo_name":"CaptainFalco/Computer-Architecture","sub_path":"Memory_Test/cline.py","file_name":"cline.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5502337313","text":"import pandas as pd\nimport torch\nimport os\nimport numpy as np\nimport pickle\nfrom datasets import MultiSetSequence\nfrom torch.utils.data import DataLoader\nfrom sklearn import metrics\nimport math\nfrom util import get_mask\n\nareas = ['north', 'south', 'des']\ngalaxies = ['lrg', 'elg', 'qso', 'glbg', 'rlbg']\ndevice = 'cuda:0' if torch.cuda.is_available() else 'cpu:0'\nnum_workers = 0 if device == 'cpu:0' else 8\nNSIDE = 512\n\nfor area in areas:\n\n df_deep = None\n\n print(f'Area: {area} started loading.')\n with open(f'data/{area}/{area}_512_robust.pickle', 'rb') as f:\n trainset = pickle.load(f)\n f.close()\n with open(f'data/{area}/{area}_test_512_robust.pickle', 'rb') as f:\n testset = pickle.load(f)\n f.close()\n\n if area == \"north\":\n max_set_len = 30\n elif area == \"south\":\n max_set_len = 25\n else:\n max_set_len = 40\n df_test = pd.DataFrame.from_dict(testset, orient='index')\n df_train = pd.DataFrame.from_dict(trainset, orient='index')\n print(len(df_test), len(df_train))\n df_test = df_test.append(df_train)\n print(len(df_test))\n\n testdata = MultiSetSequence(dict=df_test.to_dict(orient='index'), num_pixels=len(df_test),\n max_ccds=max_set_len, num_features=5, test=True)\n\n pixel_id = testdata.pixel_id\n\n print(f'Area: {area} finished loading.')\n print()\n\n for gal in galaxies:\n testdata.set_targets(gal_type=gal)\n\n best_val = -100\n for model in os.listdir(f\"trained_models/{area}/{gal}\"):\n try:\n int(model[:-3])\n continue\n\n except:\n val = float(model[:-3])\n if val > best_val:\n best_val = val\n\n print()\n print()\n print(f' Area: {area}. Gal: {gal}. Best val: {best_val}.')\n print()\n\n if device == 'cpu:0':\n model = torch.load(f\"trained_models/{area}/{gal}/{best_val}.pt\",\n map_location=torch.device('cpu'))\n else:\n model = torch.load(f\"trained_models/{area}/{gal}/{best_val}.pt\")\n\n testloader = torch.utils.data.DataLoader(testdata, batch_size=128, shuffle=False)\n\n model.eval()\n y_pred = np.array([])\n y_gold = np.array([])\n\n with torch.no_grad():\n for i, (X1, X2, labels, set_sizes) in enumerate(testloader):\n # Extract inputs and associated labels from dataloader batch\n X1 = X1.to(device)\n\n X2 = X2.to(device)\n\n labels = labels.to(device)\n\n set_sizes = set_sizes.to(device)\n\n mask = get_mask(set_sizes, X1.shape[2])\n # Predict outputs (forward pass)\n\n outputs = model(X1, X2, mask=mask)\n # Predict outputs (forward pass)\n # Get predictions and append to label array + count number of correct and total\n y_pred = np.append(y_pred, outputs.cpu().detach().numpy())\n y_gold = np.append(y_gold, labels.cpu().detach().numpy())\n\n print(len(y_pred))\n r2 = metrics.r2_score(y_gold, y_pred)\n rmse = math.sqrt(metrics.mean_squared_error(y_gold, y_pred))\n mae = metrics.mean_absolute_error(y_gold, y_pred)\n\n print()\n print(f\" XXXXXX======== TRIAL {area} - {gal} ended\")\n print()\n print(\"Test Set - R-squared: \", r2)\n print(\"Test Set - RMSE: \", rmse)\n print(\"Test Set - MAE: \", mae)\n\n ax = np.stack((pixel_id, y_pred), axis=1)\n\n if df_deep is None:\n df_deep = pd.DataFrame(ax, columns=['pixel_id', f'{gal}_deep'])\n df_deep.pixel_id = df_deep.pixel_id.astype(int)\n else:\n df_temp = pd.DataFrame(ax, columns=['pixel_id', f'{gal}_deep'])\n df_deep = df_deep.merge(df_temp, how='inner', on='pixel_id')\n\n df_deep = df_deep.dropna()\n df_deep.to_csv(f'results/{area}_ds_predictions_fin.csv', index=False)\n print(f' Pixels in Area: {area}: {len(df_deep)}. ')\n","repo_name":"elleggert/astrostatistics","sub_path":"models/deep_set/final_predictions.py","file_name":"final_predictions.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"42402331483","text":"from copy import deepcopy\n\ndef delexicalize_da(meta, requestable):\n meta = deepcopy(meta)\n for k, v in meta.items():\n intent = k\n if intent in requestable:\n for pair in v:\n pair.insert(1, '?')\n else:\n counter = {}\n for pair in v:\n if pair[0] == 'none':\n pair.insert(1, 'none')\n else:\n if pair[0] in counter:\n counter[pair[0]] += 1\n else:\n counter[pair[0]] = 1\n pair.insert(1, str(counter[pair[0]]))\n return meta\n\ndef flat_da(meta):\n meta = deepcopy(meta)\n flaten = []\n for k, v in meta.items():\n for pair in v:\n flaten.append('-'.join((k, pair[0], str(pair[1]))))\n return flaten\n\ndef deflat_da(meta):\n meta = deepcopy(meta)\n dialog_act = {}\n for da in meta:\n i, s, v = da.split('-')\n k = i\n if k not in dialog_act:\n dialog_act[k] = []\n dialog_act[k].append([s, v])\n return dialog_act\n\ndef lexicalize_da(meta, entities, state, requestable):\n meta = deepcopy(meta)\n \n for k, v in meta.items():\n intent = k\n if intent in requestable:\n for pair in v:\n pair[1] = '?'\n elif intent.lower() in ['nooffer', 'nobook']:\n for pair in v:\n if pair[0] in state:\n pair[1] = state[pair[0]]\n else:\n pair[1] = 'none'\n else:\n for pair in v:\n if pair[1] == 'none':\n continue\n elif pair[0].lower() == 'choice':\n pair[1] = str(len(entities))\n else:\n n = int(pair[1]) - 1\n if len(entities) > n and pair[0] in entities[n]:\n pair[1] = entities[n][pair[0]]\n else:\n pair[1] = 'none' \n return meta\n","repo_name":"yuanshengjun/tatk","sub_path":"tatk/util/camrest/lexicalize.py","file_name":"lexicalize.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"74331949937","text":"import pygame\nimport client\nimport sys\nimport random\nimport thread\n\n\ndef message(obj):\n while True:\n message = raw_input(\"> \")\n obj.message(message)\n\n\nscreen = pygame.display.set_mode((800, 600))\nnetworkObj = client.MMOProtocolClient(\"173.63.75.82\", 8080, {\"moving\":False, \"direction\":\"right\", \"stats\":{\"speed\":5}, \"username\":\"tes\", \"class\":\"mage\", \"animationon\":0, \"lastanimation\":100, \"health\":100, \"energy\":100, \"attacking\":False, \"object\":(random.randint(50, 500), random.randint(50, 500), 32, 32), \"sessionid\":__import__(\"uuid\").uuid4().hex, \"password\":\"test\"})\nthread.start_new_thread(message, (networkObj,))\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN: \n if event.key == pygame.K_DOWN:\n networkObj.packet['direction'] = \"down\"\n elif event.key == pygame.K_UP:\n networkObj.packet['direction'] = \"up\"\n elif event.key == pygame.K_LEFT:\n networkObj.packet['direction'] = \"left\"\n elif event.key == pygame.K_RIGHT:\n networkObj.packet['direction'] = \"right\"\n \n networkObj.packet['moving'] = True\n networkObj.userUpdate()\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_UP:\n networkObj.packet['moving'] = False\n networkObj.userUpdate()\n elif event.key == pygame.K_DOWN:\n networkObj.packet['moving'] = False\n networkObj.userUpdate()\n elif event.key == pygame.K_LEFT:\n networkObj.packet['moving'] = False\n networkObj.userUpdate()\n elif event.key == pygame.K_RIGHT:\n networkObj.packet['moving'] = False\n networkObj.userUpdate()\n \n if networkObj.packet['moving']:\n if networkObj.packet['direction'] == \"left\":\n networkObj.packet['object'][0] -= 5\n elif networkObj.packet['direction'] == \"right\":\n networkObj.packet['object'][0] += 5\n elif networkObj.packet['direction'] == \"up\":\n networkObj.packet['object'][1] -= 5\n elif networkObj.packet['direction'] == \"down\":\n networkObj.packet['object'][1] += 5\n screen.fill((255,255,255)) \n pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(networkObj.packet['object'][0], networkObj.packet['object'][1], networkObj.packet['object'][2], networkObj.packet['object'][3]))\n data = networkObj.returnedData['users']\n for user in data:\n data = networkObj.returnedData['users'][user]\n pygame.draw.rect(screen, (0, 255, 0), pygame.Rect(data['object'][0], data['object'][1], 32, 32))\n if data['moving']:\n if data['direction'] == \"right\":\n data['object'][0] += data['stats']['speed']\n elif data['direction'] == \"left\":\n data['object'][0] -= data['stats']['speed']\n elif data['direction'] == \"down\":\n data['object'][1] += data['stats']['speed']\n elif data['direction'] == \"up\":\n data['object'][1] -= data['stats']['speed']\n pygame.display.update()\n pygame.time.wait(60)\n","repo_name":"f-prime/3ProngNetworkingFramework","sub_path":"testGame.py","file_name":"testGame.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30366932605","text":"#demo 1 \r\ncount=0\r\nwhile count<3:\r\n count=count+1\r\n print(\"hello lnb\")\r\n \r\n#demo2\r\ncount=0\r\nwhile count<3:count=count+1;print(\"hi lnb\")\r\n\r\n\r\n#demo 3 a list with while\r\n\r\nlnb=[1,3,5,6]\r\nwhile lnb:\r\n print(lnb.pop())\r\n print(lnb)\r\n \r\n \r\n#loop controllers\r\ni=0\r\na=\"hellolnb\"\r\nwhile i0:\n line = datafile.readline()\n line = line.strip('\\n')\n V.add(line);\n size-=1\n prior = datafile.readline()\n size = datafile.readline()\n size = int(size)\n condprob = {}\n condprobnot = {}\n while size>0:\n key = datafile.readline().strip('\\n')\n value1 = datafile.readline().strip('\\n')\n value1 = float(value1)\n value2 = datafile.readline().strip('\\n')\n value2 = float(value2)\n condprob[key]=value1\n condprobnot[key]=value2\n size-=1\n return V,prior,condprob,condprobnot\n\ndef getscore(filename):\n global V,prior,condprob,condprobnot\n evalfile = open(filename,\"r\")\n W = set()\n while True:\n try:\n line = evalfile.readline().strip('\\n')\n if not line:\n break\n terms = jieba.cut(line)\n for term in terms:\n if term not in rules and (not re.match(\"^[0-9]\\d*$\",term)):\n if term in V:\n W.add(term)\n except UnicodeDecodeError:\n print(\"Error:\"+filename)\n return 0\n score = math.log(float(prior),10)\n for t in W:\n # print(t+\":\"+str(condprob[t]/condprobnot[t]))\n if t in condprob:\n score += math.log(condprob[t]/condprobnot[t],10)\n # print(score)\n return score\n\nV,prior,condprob,condprobnot = getdata()\n# filename = \"./doc/evalueset/1/全国大学生物联网设计竞赛简介.txt\"\nevaldir = \"doc/evalueset/\"\necfilelist = []\nefilelist = []\nlosscount = 0\nfor filename in os.listdir(evaldir+\"1/\"):\n ecfilelist.append(evaldir+\"1/\"+filename)\nfor ecfile in ecfilelist:\n score = getscore(ecfile)\n if score < 1:\n print(ecfile+\":\"+str(score))\n losscount+=1\nfor filename in os.listdir(evaldir):\n if os.path.isdir(evaldir+filename):\n continue\n efilelist.append(evaldir+filename)\nfor efile in efilelist:\n score = getscore(efile)\n if score > 1 :\n print(efile+\":\"+str(score))\n losscount+=1\nprint(\"Accuracy:\"+str(1-(losscount/(len(ecfilelist)+len(efilelist)))))\n","repo_name":"microwu/InfoRetrieval","sub_path":"exp5/nbatest.py","file_name":"nbatest.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34061730990","text":"import numpy as np\r\nimport torch\r\nfrom scipy.optimize import minimize\r\n\r\n\r\n\"\"\"n*n网络,正问题\"\"\"\r\n\r\n\r\ndef conductance_to_kirchhoff_to_response(conductance_row, conductance_column):\r\n \"\"\"\r\n 根据给定的电导,计算基尔霍夫矩阵和响应矩阵。电阻网络的规模(n*n)由给定的电导(n*(n+1)和(n+1)*n)决定。\r\n :param conductance_row: numpy.ndarray. 行电导,大小为n*(n+1)。\r\n :param conductance_column: numpy.ndarray. 列电导,大小为(n+1)*n。\r\n :return: numpy.ndarray, numpy.ndarray. 基尔霍夫矩阵,大小为(n^2+4n)*(n^2+4n);响应矩阵,大小为4n*4n。\r\n \"\"\"\r\n\r\n \"\"\"基本定义\"\"\"\r\n n = conductance_row.shape[0] # 网格维数,n*n\r\n base = n + 2 # 基数,网格坐标为 n+2 进制的数\r\n coordinate = np.array(range(base ** 2)).reshape(base, base) # 坐标序号对应矩阵\r\n\r\n \"\"\"初始化基尔霍夫矩阵并赋值\"\"\"\r\n kirchhoff = np.zeros((base ** 2, base ** 2))\r\n for i in range(n):\r\n for j in range(base - 1):\r\n k1 = coordinate[i + 1, j]\r\n kirchhoff[k1, k1 + 1] = kirchhoff[k1 + 1, k1] = - conductance_row[i, j]\r\n k2 = coordinate[j, i + 1]\r\n kirchhoff[k2, k2 + base] = kirchhoff[k2 + base, k2] = - conductance_column[j, i]\r\n\r\n \"\"\"为了方便计算响应矩阵,将点分为边界点和内部点,并重新排列\"\"\"\r\n boundary1 = list(coordinate[1: - 1, 0])\r\n boundary2 = list(coordinate[- 1, 1: - 1])\r\n boundary3 = list(coordinate[- 2: 0:- 1, - 1])\r\n boundary4 = list(coordinate[0, - 2: 0: -1])\r\n boundary = boundary1 + boundary2 + boundary3 + boundary4\r\n interior = list(coordinate[1: n + 1, 1: n + 1].reshape(n ** 2))\r\n rearrange = boundary + interior\r\n kirchhoff = kirchhoff[np.ix_(rearrange, rearrange)]\r\n\r\n \"\"\"对角线为行(列)和\"\"\"\r\n kirchhoff[np.diag_indices_from(kirchhoff)] = - np.sum(kirchhoff, axis=1)\r\n\r\n \"\"\"给定基尔霍夫矩阵,计算响应矩阵\"\"\"\r\n num_node = n * (n + 4)\r\n num_boundary = len(boundary)\r\n kirchhoff_boundary_to_boundary = kirchhoff[0: num_boundary, 0: num_boundary]\r\n kirchhoff_boundary_to_interior = kirchhoff[0: num_boundary, num_boundary: num_node]\r\n kirchhoff_interior_to_interior = kirchhoff[num_boundary: num_node, num_boundary: num_node]\r\n kirchhoff_interior_to_interior_inverse = np.linalg.inv(kirchhoff_interior_to_interior)\r\n response = kirchhoff_boundary_to_boundary - np.dot(kirchhoff_boundary_to_interior,\r\n np.dot(kirchhoff_interior_to_interior_inverse,\r\n kirchhoff_boundary_to_interior.T))\r\n\r\n return kirchhoff, response\r\n\r\n\r\ndef conductance_to_kirchhoff_to_response_torch(conductance_row, conductance_column):\r\n \"\"\"\r\n 根据给定的电导,计算基尔霍夫矩阵和响应矩阵,GPU版本。电阻网络的规模(n*n)由给定的电导(n*(n+1)和(n+1)*n)决定。\r\n :param conductance_row: torch.Tensor. 行电导,大小为n*(n+1)。\r\n :param conductance_column: torch.Tensor. 列电导,大小为(n+1)*n。\r\n :return: torch.Tensor, torch.Tensor. 基尔霍夫矩阵,大小为(n^2+4n)*(n^2+4n);响应矩阵,大小为4n*4n。\r\n \"\"\"\r\n\r\n \"\"\"基本定义\"\"\"\r\n n = conductance_row.shape[0] # 网格维数,n*n\r\n base = n + 2 # 基数,网格坐标为 n+2 进制的数\r\n coordinate = np.array(range(base ** 2)).reshape(base, base) # 坐标序号对应矩阵\r\n\r\n \"\"\"初始化基尔霍夫矩阵并赋值\"\"\"\r\n kirchhoff = torch.zeros(base ** 2, base ** 2, device=torch.device('cuda') if torch.cuda.is_available() else 'cpu')\r\n index_row_1_1, index_row_1_2, index_row_2_1, index_row_2_2 = [], [], [], []\r\n index_col_1_1, index_col_1_2, index_col_2_1, index_col_2_2 = [], [], [], []\r\n\r\n for i in range(n):\r\n for j in range(base - 1):\r\n k1 = coordinate[i + 1, j]\r\n index_row_1_1.append(k1)\r\n index_row_1_2.append(k1 + 1)\r\n index_row_2_1.append(k1 + 1)\r\n index_row_2_2.append(k1)\r\n\r\n k2 = coordinate[j, i + 1]\r\n index_col_1_1.append(k2)\r\n index_col_1_2.append(k2 + base)\r\n index_col_2_1.append(k2 + base)\r\n index_col_2_2.append(k2)\r\n\r\n kirchhoff[index_row_1_1, index_row_1_2] = - torch.reshape(conductance_row, (-1,))\r\n kirchhoff[index_row_2_1, index_row_2_2] = - torch.reshape(conductance_row, (-1,))\r\n kirchhoff[index_col_1_1, index_col_1_2] = - torch.reshape(conductance_column.t(), (-1,))\r\n kirchhoff[index_col_2_1, index_col_2_2] = - torch.reshape(conductance_column.t(), (-1,))\r\n\r\n \"\"\"为了方便计算响应矩阵,将点分为边界点和内部点,并重新排列\"\"\"\r\n boundary1 = list(coordinate[1: - 1, 0])\r\n boundary2 = list(coordinate[- 1, 1: - 1])\r\n boundary3 = list(coordinate[- 2: 0:- 1, - 1])\r\n boundary4 = list(coordinate[0, - 2: 0: -1])\r\n boundary = boundary1 + boundary2 + boundary3 + boundary4\r\n interior = list(coordinate[1: n + 1, 1: n + 1].reshape(n ** 2))\r\n rearrange = boundary + interior\r\n kirchhoff = kirchhoff[np.ix_(rearrange, rearrange)]\r\n\r\n \"\"\"对角线为行(列)和\"\"\"\r\n kirchhoff[np.diag_indices_from(kirchhoff)] = - torch.sum(kirchhoff, 1)\r\n\r\n \"\"\"给定基尔霍夫矩阵,计算响应矩阵\"\"\"\r\n num_node = n * (n + 4)\r\n num_boundary = len(boundary)\r\n kirchhoff_boundary_to_boundary = kirchhoff[0: num_boundary, 0: num_boundary]\r\n kirchhoff_boundary_to_interior = kirchhoff[0: num_boundary, num_boundary: num_node]\r\n kirchhoff_interior_to_interior = kirchhoff[num_boundary: num_node, num_boundary: num_node]\r\n kirchhoff_interior_to_interior_inverse = torch.linalg.inv(kirchhoff_interior_to_interior)\r\n response = kirchhoff_boundary_to_boundary - torch.mm(kirchhoff_boundary_to_interior,\r\n torch.mm(kirchhoff_interior_to_interior_inverse,\r\n kirchhoff_boundary_to_interior.t()))\r\n\r\n return kirchhoff, response\r\n\r\n\r\n\"\"\"n*n网络,反问题\"\"\"\r\n\r\n\r\ndef response_to_conductance(response):\r\n \"\"\"\r\n 根据给定的响应矩阵,计算电导。基本算法。\r\n :param response: numpy.ndarray. 响应矩阵,大小为4n*4n。\r\n :return: numpy.ndarray, numpy.ndarray. 行电导,大小为n*(n+1);列电导,大小为(n+1)*n。\r\n \"\"\"\r\n\r\n \"\"\"基本定义\"\"\"\r\n n = int(response.shape[0] / 4) # 网络维数\r\n n1 = 3 * n\r\n n2 = n1 - 1\r\n n3 = 4 * n - 1\r\n\r\n \"\"\"初始化电导计算值矩阵\"\"\"\r\n conductance_row = np.zeros([n, n + 1])\r\n conductance_column = np.zeros([n + 1, n])\r\n\r\n \"\"\"对边界点顺序的置换\"\"\"\r\n permutation = np.append(np.arange(2 * n, 4 * n), np.arange(0, 2 * n))\r\n\r\n \"\"\"计算电导值左上一半和右下一半\"\"\"\r\n for m in range(2):\r\n\r\n \"\"\"每一半计算n层电导\"\"\"\r\n for i in range(n):\r\n\r\n \"\"\"计算边界未知电势\"\"\"\r\n potential_boundary_unknown = - np.dot(np.linalg.inv(response[n2 - i: n1, 0: i + 1]),\r\n response[n2 - i: n1, n3 - i])\r\n\r\n \"\"\"计算边界未知电流,从所有非0电势位置,到待求电流位置\"\"\"\r\n position = np.append(np.arange(0, i + 1), [n3 - i])\r\n current_column = np.dot(response[np.ix_(range(n3, n3 - i, - 1), position)],\r\n np.append(potential_boundary_unknown, 1))\r\n current_row = np.dot(response[np.ix_(range(1, i), position)], np.append(potential_boundary_unknown, 1))\r\n current_flow = np.dot(response[i, position], np.append(potential_boundary_unknown, 1))\r\n current_flow_end = np.dot(response[n3 - i, position], np.append(potential_boundary_unknown, 1))\r\n\r\n \"\"\"初始化电势,计算电导的节点电势,行电流算出的内部点电势,每算一层更新一次\"\"\"\r\n potential_start = potential_boundary_unknown[- 1]\r\n potential_row = potential_boundary_unknown[1: i] - current_row / conductance_row[1: i, 0]\r\n\r\n \"\"\"列电流算出的内部点电势\"\"\"\r\n potential_column = - current_column / conductance_column[0, 0: i]\r\n\r\n for j in range(i):\r\n conductance_row[i - j, j] = current_flow / potential_start\r\n potential_temp = np.append(potential_column[j], potential_row)\r\n potential_end = potential_start = potential_temp[- 1]\r\n conductance_column[i - j, j] = - current_flow / potential_end\r\n if j is not (i - 1):\r\n potential_temp_diff = potential_temp[0: - 1] - potential_row\r\n current_temp = potential_temp_diff * conductance_column[1: i - j, j]\r\n current_flow = current_flow + current_row[- 1] + current_temp[- 1]\r\n current_temp_diff = current_temp[0: - 1] - current_temp[1:]\r\n current_row = current_row[0: - 1] + current_temp_diff\r\n potential_row = potential_row[0: - 1] - current_row / conductance_row[1: i - j - 1, j + 1]\r\n\r\n conductance_row[0, i] = - current_flow_end / potential_start\r\n conductance_column[0, i] = current_flow_end\r\n\r\n \"\"\"交换响应矩阵的行和列,并旋转电导计算值矩阵180度\"\"\"\r\n response = response[permutation, :]\r\n response = response[:, permutation]\r\n conductance_row = np.rot90(conductance_row, 2)\r\n conductance_column = np.rot90(conductance_column, 2)\r\n\r\n return conductance_row, conductance_column\r\n\r\n\r\ndef response_to_conductance_sub(response):\r\n \"\"\"\r\n 根据给定的响应矩阵,计算电导。子图算法。\r\n :param response: numpy.ndarray. 响应矩阵,大小为4n*4n。\r\n :return: numpy.ndarray, numpy.ndarray. 行电导,大小为n*(n+1);列电导,大小为(n+1)*n。\r\n \"\"\"\r\n\r\n \"\"\"基本定义\"\"\"\r\n n = int(response.shape[0] / 4) # 网络维数\r\n base = n + 2 # 基数,网格坐标为 n+2 进制的数\r\n num_node_extra = base ** 2 # 所有节点个数,包括额外的四个角\r\n coordinate = np.array(range(num_node_extra)).reshape(base, base) # 坐标序号对应矩阵\r\n kirchhoff_extend = np.zeros((num_node_extra, num_node_extra)) # 扩展的基尔霍夫矩阵初始化,包括额外的四个角\r\n\r\n \"\"\"内点分组,左上ul,右下lr\"\"\"\r\n group_interior_ul = [[]]\r\n group_interior_lr = [[]]\r\n for i in range(n):\r\n group_interior_ul.append(coordinate[range(1, 2 + i), range(1 + i, 0, - 1)])\r\n group_interior_lr.append(coordinate[range(n, n - i - 1, - 1), range(n - i, n + 1)])\r\n group_interior = [group_interior_ul, group_interior_lr]\r\n\r\n \"\"\"节点分层,左上ul,右下lr\"\"\"\r\n layer_ul = []\r\n layer_lr = []\r\n for i in range(1, n + 1):\r\n layer_ul.append(coordinate[0, i])\r\n layer_lr.append(coordinate[base - 1, base - 1 - i])\r\n for j in range(1, i + 1):\r\n layer_ul[- 1] = np.append(layer_ul[- 1], coordinate[j, i - j: i - j + 2][::- 1])\r\n layer_lr[- 1] = np.append(layer_lr[- 1], coordinate[base - 1 - j, base - i + j - 2: base - i + j])\r\n layer = [layer_ul, layer_lr]\r\n\r\n \"\"\"用得到的序号\"\"\"\r\n n1 = 3 * n\r\n n2 = n1 - 1\r\n n3 = 4 * n - 1\r\n\r\n \"\"\"对边界点顺序的置换\"\"\"\r\n permutation = np.append(np.arange(2 * n, 4 * n), np.arange(0, 2 * n))\r\n\r\n \"\"\"计算电导值左上一半和右下一半\"\"\"\r\n for m in range(2):\r\n\r\n \"\"\"初始化子图内点\"\"\"\r\n sub_interior = np.array([])\r\n\r\n \"\"\"每一半计算n层电导\"\"\"\r\n for i in range(n):\r\n\r\n \"\"\"计算边界未知电势\"\"\"\r\n potential_boundary_unknown = - np.dot(np.linalg.inv(response[n2 - i: n1, 0: i + 1]),\r\n response[n2 - i: n1, n3 - i])\r\n\r\n \"\"\"计算边界未知电流,从所有非0电势位置,到待求电流位置\"\"\"\r\n position = np.append(np.arange(0, i + 1), [n3 - i])\r\n current_boundary_unknown = np.dot(response[np.ix_(range(0, i), position)],\r\n np.append(potential_boundary_unknown, 1))\r\n current_flow = np.dot(response[n3 - i, position], np.append(potential_boundary_unknown, 1))\r\n\r\n \"\"\"初始化节点电势序列,以及电流更新序列\"\"\"\r\n potential_sequence = np.array([1, potential_boundary_unknown[- 1]])\r\n current_update = np.array([0])\r\n\r\n \"\"\"计算子图的基尔霍夫矩阵和响应矩阵\"\"\"\r\n if i != 0:\r\n\r\n \"\"\"子图的边界点,内点,基尔霍夫矩阵\"\"\"\r\n sub_boundary = np.concatenate((coordinate[1: i + 1, 0], group_interior[m][i], coordinate[0, 1: i + 1]))\r\n sub_interior = np.append(sub_interior, group_interior[m][i - 1])\r\n sub = np.append(sub_boundary, sub_interior).astype(int)\r\n num_sub = len(sub)\r\n kirchhoff_sub = kirchhoff_extend[np.ix_(sub, sub)]\r\n kirchhoff_sub[range(num_sub), range(num_sub)] = - np.sum(kirchhoff_sub, axis=1)\r\n\r\n \"\"\"子图的响应矩阵\"\"\"\r\n response_sub = kirchhoff_sub\r\n if i != 1:\r\n num_sub_boundary = len(sub_boundary)\r\n kirchhoff_sub_boundary_to_boundary = kirchhoff_sub[0: num_sub_boundary, 0: num_sub_boundary]\r\n kirchhoff_sub_boundary_to_interior = kirchhoff_sub[0: num_sub_boundary, num_sub_boundary: num_sub]\r\n kirchhoff_sub_interior_to_interior = kirchhoff_sub[\r\n num_sub_boundary: num_sub, num_sub_boundary: num_sub\r\n ]\r\n kirchhoff_sub_interior_to_interior_inverse = np.linalg.inv(kirchhoff_sub_interior_to_interior)\r\n temp = np.dot(kirchhoff_sub_interior_to_interior_inverse, kirchhoff_sub_boundary_to_interior.T)\r\n response_sub = kirchhoff_sub_boundary_to_boundary - np.dot(kirchhoff_sub_boundary_to_interior, temp)\r\n\r\n \"\"\"计算子图边界未知电势电流\"\"\"\r\n potential_sub_boundary_known = np.dot(np.linalg.inv(response_sub[0: i, i: 2 * i]),\r\n current_boundary_unknown - np.dot(response_sub[0: i, 0: i],\r\n potential_boundary_unknown[\r\n 0: - 1]))\r\n current_sub_boundary_unknown = np.dot(response_sub[i: 2 * i, 0: 2 * i],\r\n np.append(potential_boundary_unknown[0: - 1],\r\n potential_sub_boundary_known))\r\n\r\n \"\"\"写出节点电势序列,以及电流更新序列\"\"\"\r\n potential_sequence = np.insert(potential_sequence, 1, potential_sub_boundary_known)\r\n current_update = np.append(current_update, current_sub_boundary_unknown)\r\n\r\n \"\"\"给扩展的基尔霍夫矩阵赋值\"\"\"\r\n order_node = layer[m][i]\r\n for j in range(i + 1):\r\n current_flow = current_flow - current_update[j]\r\n j2 = j * 2\r\n kirchhoff_extend[[order_node[j2], order_node[j2 + 1]],\r\n [order_node[j2 + 1], order_node[j2]]] = - current_flow / potential_sequence[j]\r\n kirchhoff_extend[[order_node[j2 + 1], order_node[j2 + 2]],\r\n [order_node[j2 + 2], order_node[j2 + 1]]] = current_flow / potential_sequence[j + 1]\r\n\r\n \"\"\"交换响应矩阵的行和列,并旋转坐标序号对应矩阵180度\"\"\"\r\n response = response[permutation, :]\r\n response = response[:, permutation]\r\n coordinate = np.rot90(coordinate, 2)\r\n\r\n \"\"\"扩展的基尔霍夫矩阵转成电导行列矩阵\"\"\"\r\n conductance_row = np.zeros([n, n + 1])\r\n conductance_column = np.zeros([n + 1, n])\r\n for i in range(n + 1):\r\n conductance_row[:, i] = - kirchhoff_extend[coordinate[1: - 1, i], coordinate[1: - 1, i + 1]]\r\n conductance_column[i, :] = - kirchhoff_extend[coordinate[i, 1: - 1], coordinate[i + 1, 1: - 1]]\r\n\r\n return conductance_row, conductance_column\r\n\r\n\r\ndef response_to_conductance_improved(response):\r\n \"\"\"\r\n 根据给定的响应矩阵,计算电导。基本改进算法。\r\n :param response: numpy.ndarray. 响应矩阵,大小为4n*4n。\r\n :return: numpy.ndarray, numpy.ndarray. 行电导,大小为n*(n+1);列电导,大小为(n+1)*n。\r\n \"\"\"\r\n\r\n \"\"\"基本定义\"\"\"\r\n n = int(response.shape[0] / 4) # 网络维数\r\n n1 = 3 * n\r\n n2 = n1 - 1\r\n n3 = 4 * n - 1\r\n\r\n \"\"\"初始化电导计算值矩阵\"\"\"\r\n conductance_row_calculate = np.zeros([n, n + 1])\r\n conductance_column_calculate = np.zeros([n + 1, n])\r\n\r\n \"\"\"对边界点顺序的置换\"\"\"\r\n permutation = np.append(np.arange(2 * n, 4 * n), np.arange(0, 2 * n))\r\n\r\n \"\"\"计算电导值左上一半和右下一半\"\"\"\r\n for m in range(2):\r\n\r\n \"\"\"每一半计算n层电导\"\"\"\r\n for i in range(n):\r\n\r\n \"\"\"计算边界未知电势\"\"\"\r\n potential_boundary_unknown = - np.dot(\r\n np.linalg.inv(response[n2 - i: n1, 0: i + 1]),\r\n response[n2 - i: n1, n3 - i]\r\n )\r\n\r\n \"\"\"计算边界未知电流,从所有非0电势位置,到待求电流\"\"\"\r\n position = np.append(np.arange(0, i + 1), [n3 - i])\r\n current_left = np.dot(\r\n response[np.ix_(range(0, i), position)],\r\n np.append(potential_boundary_unknown, 1)\r\n )\r\n current_up = np.dot(\r\n response[np.ix_(range(n3, n3 - i, - 1), position)],\r\n np.append(potential_boundary_unknown, 1)\r\n )\r\n current_flow_left = np.dot(\r\n response[i, position],\r\n np.append(potential_boundary_unknown, 1)\r\n )\r\n current_flow_up = np.dot(\r\n response[n3 - i, position],\r\n np.append(potential_boundary_unknown, 1)\r\n )\r\n\r\n \"\"\"计算内部节点电势\"\"\"\r\n potential_left = potential_boundary_unknown[0: i] - current_left / conductance_row_calculate[0: i, 0]\r\n potential_up = - current_up / conductance_column_calculate[0, 0: i]\r\n potential_left_start = potential_boundary_unknown[- 1]\r\n potential_up_start = 1\r\n\r\n for j in range((i+1)//2):\r\n\r\n \"\"\"从左边计算\"\"\"\r\n conductance_row_calculate[i-j, j] = current_flow_left / potential_left_start\r\n potential_left_end = potential_left_start = potential_left[-1]\r\n conductance_column_calculate[i-j, j] = - current_flow_left / potential_left_end\r\n\r\n \"\"\"从上边计算\"\"\"\r\n conductance_column_calculate[j, i-j] = current_flow_up / potential_up_start\r\n potential_up_end = potential_up_start = potential_up[-1]\r\n conductance_row_calculate[j, i-j] = - current_flow_up / potential_up_end\r\n\r\n if j is not ((i+1)//2-1):\r\n\r\n \"\"\"从左边计算\"\"\"\r\n potential_left_diff = potential_left[0: -1] - potential_left[1:]\r\n current_left_temp = potential_left_diff * conductance_column_calculate[1+j: i-j, j]\r\n current_flow_left = current_flow_left + current_left[-1] + current_left_temp[-1]\r\n current_left = current_left_temp[0: -1] - current_left_temp[1:] + current_left[1: -1]\r\n potential_left = potential_left[1: -1] - current_left / conductance_row_calculate[1+j: i-1-j, j+1]\r\n\r\n \"\"\"从上边计算\"\"\"\r\n potential_up_diff = potential_up[0: -1] - potential_up[1:]\r\n current_up_temp = potential_up_diff * conductance_row_calculate[j, 1+j: i-j]\r\n current_flow_up = current_flow_up + current_up[-1] + current_up_temp[-1]\r\n current_up = current_up_temp[0: -1] - current_up_temp[1:] + current_up[1: -1]\r\n potential_up = potential_up[1: -1] - current_up / conductance_column_calculate[j+1, 1+j: i-1-j]\r\n\r\n if (i % 2) == 0:\r\n\r\n k = int(i / 2)\r\n\r\n if i != 0:\r\n\r\n \"\"\"从左边计算\"\"\"\r\n potential_left_diff = potential_left[0] - potential_left[-1]\r\n current_left_temp = potential_left_diff * conductance_column_calculate[k, k-1]\r\n current_flow_left = current_flow_left + current_left[-1] + current_left_temp\r\n\r\n \"\"\"从上边计算\"\"\"\r\n potential_up_diff = potential_up[0] - potential_up[-1]\r\n current_up_temp = potential_up_diff * conductance_row_calculate[k-1, k]\r\n current_flow_up = current_flow_up + current_up[-1] + current_up_temp\r\n\r\n \"\"\"从左边计算\"\"\"\r\n conductance_row_calculate[k, k] = current_flow_left / potential_left_start\r\n\r\n \"\"\"从上边计算\"\"\"\r\n conductance_column_calculate[k, k] = current_flow_up / potential_up_start\r\n\r\n \"\"\"交换响应矩阵的行和列,并旋转电导计算值矩阵180度\"\"\"\r\n response = response[permutation, :]\r\n response = response[:, permutation]\r\n conductance_row_calculate = np.rot90(conductance_row_calculate, 2)\r\n conductance_column_calculate = np.rot90(conductance_column_calculate, 2)\r\n\r\n return conductance_row_calculate, conductance_column_calculate\r\n\r\n\r\ndef response_to_conductance_optimize(response):\r\n \"\"\"\r\n 根据给定的响应矩阵,计算电导。优化算法。\r\n :param response: numpy.ndarray. 响应矩阵,大小为4n*4n。\r\n :return: numpy.ndarray, numpy.ndarray. 行电导,大小为n*(n+1);列电导,大小为(n+1)*n。\r\n \"\"\"\r\n\r\n \"\"\"基本定义\"\"\"\r\n n = int(response.shape[0] / 4) # 网络维数\r\n m = n * (n + 1) # 电阻个数\r\n\r\n def vector_to_matrix(v):\r\n \"\"\"\r\n 根据给定电导向量,写出行电导和列电导。\r\n :param v: numpy.ndarray. 电阻向量,大小为2*m。\r\n :return: numpy.float64, numpy.float64. 行电导,大小为n*(n+1);列电导,大小为(n+1)*n。\r\n \"\"\"\r\n\r\n nonlocal n, m\r\n ma = v[:m].reshape(n, n + 1)\r\n mb = v[m:].reshape(n + 1, n)\r\n\r\n return ma, mb\r\n\r\n def vector_to_response(conductance):\r\n \"\"\"\r\n 根据给定电阻向量,计算响应矩阵。\r\n :param conductance: numpy.ndarray. 电阻向量,大小为2*m。\r\n :return: numpy.float64. 损失函数值,两矩阵之差的Frobenius范数平方。\r\n \"\"\"\r\n nonlocal response, n, m\r\n conductance_row_tmp, conductance_column_tmp = vector_to_matrix(conductance)\r\n _, response_cal = conductance_to_kirchhoff_to_response(conductance_row_tmp, conductance_column_tmp)\r\n response_diff = response - response_cal # 矩阵之差\r\n loss = np.linalg.norm(response_diff) ** 2 # 损失为Frobenius范数平方\r\n\r\n return loss\r\n\r\n \"\"\"优化\"\"\"\r\n x0 = 1 / (np.ones(2 * m) * 25) # 初始解\r\n bnds = tuple([(1 / 30, 1 / 20) for _ in range(2 * m)]) # 边界\r\n conductance_opt = minimize(vector_to_response, x0, method='L-BFGS-B', bounds=bnds) # 优化计算\r\n conductance_row, conductance_column = vector_to_matrix(conductance_opt.x)\r\n\r\n return conductance_row, conductance_column\r\n\r\n\r\ndef response_to_conductance_optimize_torch(response):\r\n \"\"\"\r\n 根据给定的响应矩阵,计算电导。优化算法GPU版本。\r\n :param response: numpy.ndarray. 响应矩阵,大小为4n*4n。\r\n :return: numpy.ndarray, numpy.ndarray. 行电导,大小为n*(n+1);列电导,大小为(n+1)*n。\r\n \"\"\"\r\n\r\n \"\"\"基本定义\"\"\"\r\n n = int(response.shape[0] / 4) # 网络维数\r\n m = n * (n + 1) # 电阻个数\r\n\r\n \"\"\"numpy转torch\"\"\"\r\n response = torch.tensor(response, device=torch.device('cuda') if torch.cuda.is_available() else 'cpu')\r\n\r\n def vector_to_matrix(v):\r\n \"\"\"\r\n 根据给定电导向量,写出行电导和列电导。\r\n :param v: numpy.ndarray. 电导向量,大小为2*m。\r\n :return: numpy.float64, numpy.float64. 行电导,大小为n*(n+1);列电导,大小为(n+1)*n。\r\n \"\"\"\r\n\r\n nonlocal n, m\r\n ma = v[:m].reshape(n, n + 1)\r\n mb = v[m:].reshape(n + 1, n)\r\n\r\n return ma, mb\r\n\r\n def vector_to_response(conductance):\r\n \"\"\"\r\n 根据给定电阻向量,计算响应矩阵。\r\n :param conductance: torch.Tensor. 电导向量,大小为2*m。\r\n :return: torch.Tensor. 损失函数值,两矩阵之差的Frobenius范数平方。\r\n \"\"\"\r\n\r\n nonlocal response, n, m\r\n conductance_row_tmp, conductance_column_tmp = vector_to_matrix(conductance)\r\n _, response_cal = conductance_to_kirchhoff_to_response_torch(conductance_row_tmp, conductance_column_tmp)\r\n response_diff = response - response_cal # 矩阵之差\r\n loss = torch.norm(response_diff) ** 2 # 损失为Frobenius范数平方\r\n\r\n return loss\r\n\r\n \"\"\"优化\"\"\"\r\n x0 = 1 / (torch.ones([2 * m], device=torch.device('cuda') if torch.cuda.is_available() else 'cpu') * 25) # 初始解\r\n x0.requires_grad = True # 可求梯度\r\n optimizer = torch.optim.Adam([x0], lr=1e-3) # 优化器\r\n for _ in range(1000):\r\n y = vector_to_response(x0)\r\n optimizer.zero_grad()\r\n y.backward()\r\n optimizer.step()\r\n if y <= 1e-10:\r\n break\r\n conductance_row, conductance_column = vector_to_matrix(x0.detach().cpu().numpy())\r\n\r\n return conductance_row, conductance_column\r\n\r\n\r\n\"\"\"n*n网络,还原误差\"\"\"\r\n\r\n\r\ndef resistance_diff(conductance_a_row, conductance_a_col, conductance_b_row, conductance_b_col):\r\n \"\"\"\r\n 计算电阻向量之差。\r\n :param conductance_a_row: numpy.ndarray. 行电导a,大小为n*(n+1)。\r\n :param conductance_a_col: numpy.ndarray. 列电导a,大小为(n+1)*n。\r\n :param conductance_b_row: numpy.ndarray. 行电导b,大小为n*(n+1)。\r\n :param conductance_b_col: numpy.ndarray. 列电导b,大小为(n+1)*n。\r\n :return: numpy.float64, numpy.float64, numpy.float64. 矩阵之差绝对值的总和,最大值,均值。\r\n \"\"\"\r\n\r\n conductance_a = np.concatenate((np.ravel(conductance_a_row), np.ravel(conductance_a_col)))\r\n conductance_b = np.concatenate((np.ravel(conductance_b_row), np.ravel(conductance_b_col)))\r\n diff = np.abs(1 / conductance_a - 1 / conductance_b)\r\n diff_sum = np.sum(diff)\r\n diff_max = np.max(diff)\r\n diff_mean = np.mean(diff)\r\n\r\n return diff_sum, diff_max, diff_mean\r\n\r\n\r\n\"\"\"自定义网络,正问题\"\"\"\r\n\r\n\r\ndef conductance_to_kirchhoff_to_response_customize(num_boundary, num_interior, edge, conductance):\r\n \"\"\"\r\n 根据自定义的顶点、边和电导,计算基尔霍夫矩阵和响应矩阵。\r\n :param num_boundary: int. 边界点个数。\r\n :param num_interior: int. 内点个数.\r\n :param edge: list. 边拓扑。\r\n :param conductance: list. 边电导。\r\n :return: numpy.ndarray, numpy.ndarray. 基尔霍夫矩阵,大小为(num_boundary+num_interior)*(num_boundary+num_interior);\r\n 响应矩阵,大小为num_boundary*num_boundary。\r\n \"\"\"\r\n\r\n num_node = num_boundary + num_interior # 顶点个数\r\n\r\n \"\"\"计算基尔霍夫矩阵\"\"\"\r\n kirchhoff = np.zeros((num_node, num_node))\r\n for i in range(len(edge)):\r\n j, k = edge[i]\r\n kirchhoff[j, k] = kirchhoff[k, j] = - conductance[i]\r\n kirchhoff[np.diag_indices_from(kirchhoff)] = - np.sum(kirchhoff, axis=1)\r\n\r\n \"\"\"计算响应矩阵\"\"\"\r\n kirchhoff_boundary_to_boundary = kirchhoff[0: num_boundary, 0: num_boundary]\r\n kirchhoff_boundary_to_interior = kirchhoff[0: num_boundary, num_boundary: num_node]\r\n kirchhoff_interior_to_interior = kirchhoff[num_boundary: num_node, num_boundary: num_node]\r\n kirchhoff_interior_to_interior_inverse = np.linalg.inv(kirchhoff_interior_to_interior)\r\n response = kirchhoff_boundary_to_boundary - np.dot(kirchhoff_boundary_to_interior,\r\n np.dot(kirchhoff_interior_to_interior_inverse,\r\n kirchhoff_boundary_to_interior.T))\r\n\r\n return kirchhoff, response\r\n\r\n\r\ndef conductance_to_kirchhoff_to_response_customize_torch(num_boundary, num_interior, edge, conductance):\r\n \"\"\"\r\n 根据自定义的顶点、边和电导,计算基尔霍夫矩阵和响应矩阵,GPU版本。\r\n :param num_boundary: int. 边界点个数。\r\n :param num_interior: int. 内点个数.\r\n :param edge: list. 边拓扑。\r\n :param conductance: torch.Tensor. 边电导。\r\n :return: torch.Tensor, torch.Tensor. 基尔霍夫矩阵,大小为(num_boundary+num_interior)*(num_boundary+num_interior);\r\n 响应矩阵,大小为num_boundary*num_boundary。\r\n \"\"\"\r\n\r\n num_node = num_boundary + num_interior # 顶点个数\r\n\r\n \"\"\"计算基尔霍夫矩阵\"\"\"\r\n kirchhoff = torch.zeros(num_node, num_node, device=torch.device('cuda') if torch.cuda.is_available() else 'cpu')\r\n index_1 = list(map(lambda x: x[0], edge))\r\n index_2 = list(map(lambda x: x[1], edge))\r\n\r\n kirchhoff[index_1, index_2] = - conductance\r\n kirchhoff[index_2, index_1] = - conductance\r\n\r\n kirchhoff[np.diag_indices_from(kirchhoff)] = - torch.sum(kirchhoff, 1)\r\n\r\n \"\"\"计算响应矩阵\"\"\"\r\n kirchhoff_boundary_to_boundary = kirchhoff[0: num_boundary, 0: num_boundary]\r\n kirchhoff_boundary_to_interior = kirchhoff[0: num_boundary, num_boundary: num_node]\r\n kirchhoff_interior_to_interior = kirchhoff[num_boundary: num_node, num_boundary: num_node]\r\n kirchhoff_interior_to_interior_inverse = torch.linalg.inv(kirchhoff_interior_to_interior)\r\n response = kirchhoff_boundary_to_boundary - torch.mm(kirchhoff_boundary_to_interior,\r\n torch.mm(kirchhoff_interior_to_interior_inverse,\r\n kirchhoff_boundary_to_interior.t()))\r\n\r\n return kirchhoff, response\r\n\r\n\r\n\"\"\"自定义网络,反问题\"\"\"\r\n\r\n\r\ndef response_to_conductance_customize_optimize_torch(num_boundary, num_interior, edge, response):\r\n \"\"\"\r\n 根据自定义的顶点、边和电导,以及给定的响应矩阵,计算电导。优化算法GPU版本。\r\n :param num_boundary: int. 边界点个数。\r\n :param num_interior: int. 内点个数。\r\n :param edge: list. 边拓扑。\r\n :param response: numpy.ndarray. 响应矩阵。\r\n :return: numpy.ndarray. 电导\r\n \"\"\"\r\n\r\n \"\"\"numpy转torch\"\"\"\r\n response = torch.tensor(response, device=torch.device('cuda') if torch.cuda.is_available() else 'cpu')\r\n\r\n def vector_to_response(conductance):\r\n \"\"\"\r\n 根据给定电阻向量,计算响应矩阵。\r\n :param conductance: torch.Tensor. 电导向量。\r\n :return: torch.Tensor. 损失函数值,两矩阵之差的Frobenius范数平方。\r\n \"\"\"\r\n\r\n nonlocal num_boundary, num_interior, edge, response\r\n _, response_cal = conductance_to_kirchhoff_to_response_customize_torch(\r\n num_boundary, num_interior, edge, conductance\r\n )\r\n response_diff = response - response_cal # 矩阵之差\r\n loss = torch.norm(response_diff) ** 2 # 损失为Frobenius范数平方\r\n\r\n return loss\r\n\r\n \"\"\"优化\"\"\"\r\n x0 = 1 / (torch.ones([len(edge)], device=torch.device('cuda') if torch.cuda.is_available() else 'cpu') * 25) # 初始解\r\n x0.requires_grad = True # 可求梯度\r\n optimizer = torch.optim.Adam([x0], lr=1e-3) # 优化器\r\n for _ in range(1000):\r\n y = vector_to_response(x0)\r\n optimizer.zero_grad()\r\n y.backward()\r\n optimizer.step()\r\n # if y <= 1e-10:\r\n # break\r\n\r\n return x0.detach().cpu().numpy()\r\n\r\n\r\n\"\"\"自定义网络,还原误差\"\"\"\r\n\r\n\r\ndef resistance_diff_customize(conductance_a, conductance_b):\r\n \"\"\"\r\n 计算电阻向量之差。\r\n :param conductance_a: numpy.ndarray. 电导a。\r\n :param conductance_b: numpy.ndarray. 电导b。\r\n :return: numpy.float64, numpy.float64, numpy.float64. 向量之差绝对值的总和,最大值,均值。\r\n \"\"\"\r\n\r\n diff = np.abs(1 / conductance_a - 1 / conductance_b)\r\n diff_sum = np.sum(diff)\r\n diff_max = np.max(diff)\r\n diff_mean = np.mean(diff)\r\n\r\n return diff_sum, diff_max, diff_mean\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n \"\"\"n*n网络\"\"\"\r\n \"\"\"随机生成电导\"\"\"\r\n # row = 1 / np.random.uniform(20, 30, (4, 5))\r\n # col = 1 / np.random.uniform(20, 30, (5, 4))\r\n # _, re = conductance_to_kirchhoff_to_response(row, col)\r\n # re = re + np.random.normal(0, 0.000000001, (16, 16))\r\n \"\"\"依次测试还原算法\"\"\"\r\n # row_cal, col_cal = response_to_conductance(re) # 基本算法\r\n # print(resistance_diff(row, col, row_cal, col_cal))\r\n # row_cal, col_cal = response_to_conductance_sub(re) # 子图算法\r\n # print(resistance_diff(row, col, row_cal, col_cal))\r\n # row_cal, col_cal = response_to_conductance_improved(re) # 改进基本算法\r\n # print(resistance_diff(row, col, row_cal, col_cal))\r\n # row_cal, col_cal = response_to_conductance_optimize(re) # 优化算法\r\n # print(resistance_diff(row, col, row_cal, col_cal))\r\n\r\n \"\"\"六芒星网络\"\"\"\r\n # connect = [\r\n # [0, 1], [1, 2], [2, 10], [10, 3], [3, 4], [4, 5],\r\n # [5, 11], [11, 6], [6, 7], [7, 8], [8, 9], [9, 0],\r\n # [1, 10], [10, 4], [4, 11], [11, 7], [7, 9], [9, 1],\r\n # [1, 12], [10, 12], [4, 12], [11, 12], [7, 12], [9, 12]\r\n # ]\r\n \"\"\"随机生成电导\"\"\"\r\n # con = 1 / np.random.uniform(20, 30, 24)\r\n # _, re = conductance_to_kirchhoff_to_response_customize(9, 4, connect, con)\r\n \"\"\"测试还原算法\"\"\"\r\n # con_cal = response_to_conductance_customize_optimize_torch(9, 4, connect, re)\r\n # print(resistance_diff_customize(con, con_cal))\r\n","repo_name":"AlexNovus/LeiZu2","sub_path":"resistance_recovery/elecnet.py","file_name":"elecnet.py","file_ext":"py","file_size_in_byte":34579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72174982897","text":"from scripts.pgengine import ImgsManager, ObjsManager, Obj, Window, Camera, Clock, ParticleManager, SoundManager, load_map\nfrom scripts.objs.player import Player\nfrom scripts.objs.cactus1 import Cactus1\nfrom scripts.objs.shot import Shot\nfrom scripts.objs.lifebar import LifeBar\n\ndef reset():\n sound_mng.stop_music()\n particles_mng.particles = []\n camera.shake_ticks = 0\n\n#WINDOW\nwindow = Window(900, 600, 'CactusBoss')\ndisplay = window.display\n\n#CAMERA\ncamera = Camera(window)\ncamera.delay_x = 20\ncamera.delay_y = 20\n\n#CLOCK\nclock = Clock()\nclock.fps = 144\n\n#TILE\nTILE_SIZE = 16\n\n#SOUNDS\nsound_mng = SoundManager()\nsound_mng.add_sounds_from_past('assets/sounds/sfx/')\nsound_mng.add_sounds_from_past('assets/sounds/musics/', 'mp3')\nsound_mng.sounds['jump'].set_volume(0.5)\nsound_mng.sounds['shot'].set_volume(0.5)\nsound_mng.sounds['shot2'].set_volume(0.5)\nsound_mng.sounds['cactus_powerup'].set_volume(0.3)\nsound_mng.sounds['explosion'].set_volume(0.4)\n\n#IMAGES\nimgs_path = 'assets/images/'\nimgs_mng = ImgsManager()\nimgs_mng.add_imgs_from_past(imgs_path)\nimgs_mng.add_animations_from_past(imgs_path + 'animations/')\nIMGS = imgs_mng.imgs\nANIMATIONS = imgs_mng.animations\n\n#OBJS\nobjs_mng = ObjsManager()\nobjs_mng.add_obj('mountains1', Obj(IMGS['background1']))\nobjs_mng.add_obj('mountains2', Obj(IMGS['background2']))\nobjs_mng.add_obj('tile_center', Obj(IMGS['tile1']))\nobjs_mng.add_obj('tile_ground', Obj(IMGS['tile2']))\nobjs_mng.add_obj('tile_left', Obj(IMGS['tile3']))\nobjs_mng.add_obj('tile_right', Obj(IMGS['tile4']))\nobjs_mng.add_obj('tile_f_center', Obj(IMGS['tile5']))\nobjs_mng.add_obj('tile_f_left', Obj(IMGS['tile6']))\nobjs_mng.add_obj('tile_f_right', Obj(IMGS['tile7']))\nobjs_mng.add_obj('player', Player(ANIMATIONS['player_idle'][0]))\nobjs_mng.add_obj('cactus1', Cactus1(ANIMATIONS['cactus_idle'][0]))\nobjs_mng.add_obj('thorn', Shot(IMGS['thorn']))\nobjs_mng.add_obj('lifebar', LifeBar(IMGS['lifebar_b']))\nobjs_mng.add_obj('liferect_r', Obj(IMGS['liferect_r']))\nobjs_mng.add_obj('liferect_g', Obj(IMGS['liferect_g']))\nobjs_mng.add_obj('player_life', Obj(IMGS['player_life']))\n\n#OBJS CONFIG\nOBJS = objs_mng.objs.copy()\n #bg\nOBJS['mountains1'].y = 140\nOBJS['mountains1'].x = display.get_width() / 2\nOBJS['mountains1'].height = 150\nOBJS['mountains1'].width = int(display.get_width() * 1.5)\nOBJS['mountains2'].y = 90\nOBJS['mountains2'].x = display.get_width() / 2\nOBJS['mountains2'].height = 140\nOBJS['mountains2'].width = int(display.get_width() * 1.5)\n\n #player\nOBJS['player'].add_imgs_data(ANIMATIONS['player_idle'], 'idle', [15, 15])\nOBJS['player'].add_imgs_data(ANIMATIONS['player_run'], 'run', [7, 7])\nOBJS['player'].add_imgs_data(ANIMATIONS['player_damage'], 'damage', [2, 10])\n\n #cactuslifebar\nOBJS['lifebar'].y = 10\nOBJS['lifebar'].x = display.get_width() / 2\nOBJS['lifebar'].height = 5\nOBJS['lifebar'].width = 200\nOBJS['lifebar'].add_liferect(OBJS['liferect_r'])\nOBJS['lifebar'].add_liferect(OBJS['liferect_g'])\n #cactus1\nOBJS['cactus1'].add_imgs_data(ANIMATIONS['cactus_idle'], 'idle', [10, 10, 10, 10])\nOBJS['cactus1'].add_imgs_data(ANIMATIONS['cactus_atack'], 'atack', [10, 10, 10, 10])\nOBJS['cactus1'].add_imgs_data(ANIMATIONS['cactus_damage'], 'damage', [3, 3, 3, 3])\nOBJS['cactus1'].add_imgs_data(ANIMATIONS['cactus_a_idle'], 'a_idle', [10, 10, 10, 10])\nOBJS['cactus1'].add_imgs_data(ANIMATIONS['cactus_a_atack'], 'a_atack', [10, 10, 10, 10])\n\n#PARTICLES\nparticles_mng = ParticleManager()\n\nMAPS_PATH = {\n 'tutorial' : 'assets/maps/tutorial_map.txt',\n 'level1' : 'assets/maps/map1.txt',\n 'level2' : 'assets/maps/map2.txt'\n}\n","repo_name":"EuReinoso/CactusBoss","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28701903371","text":"OUTPUT_PATTERNS = {\n 0: \"abcefg\",\n 1: \"cf\",\n 2: \"acdeg\",\n 3: \"acdfg\",\n 4: \"bcdf\",\n 5: \"abdfg\",\n 6: \"abdefg\",\n 7: \"acf\",\n 8: \"abcdefg\",\n 9: \"abcdfg\",\n}\n\nwith open(\"input.txt\") as input_file:\n lines = input_file.read().splitlines()\n\n signal_patterns = []\n ouput_values = []\n for l in lines:\n sp, ov = l.split(\"|\")\n signal_patterns.extend(sp.split())\n ouput_values.extend(ov.split())\n\n count = {i: 0 for i in OUTPUT_PATTERNS.keys()}\n for o in ouput_values:\n for k, v in OUTPUT_PATTERNS.items():\n if len(v) == len(o):\n count[k] += 1\n print(count)\n print(count[1] + count[4] + count[7] + count[8])\n","repo_name":"niklasschloegel/advent-of-code","sub_path":"2021/day-08/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"23118299831","text":"\"\"\"\nThe arraypad module contains a group of functions to pad values onto the edges\nof an n-dimensional array.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom numpy.core.overrides import array_function_dispatch\n\n\n__all__ = ['pad']\n\n\n###############################################################################\n# Private utility functions.\n\n\ndef _arange_ndarray(arr, shape, axis, reverse=False):\n \"\"\"\n Create an ndarray of `shape` with increments along specified `axis`\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n shape : tuple of ints\n Shape of desired array. Should be equivalent to `arr.shape` except\n `shape[axis]` which may have any positive value.\n axis : int\n Axis to increment along.\n reverse : bool\n If False, increment in a positive fashion from 1 to `shape[axis]`,\n inclusive. If True, the bounds are the same but the order reversed.\n\n Returns\n -------\n padarr : ndarray\n Output array sized to pad `arr` along `axis`, with linear range from\n 1 to `shape[axis]` along specified `axis`.\n\n Notes\n -----\n The range is deliberately 1-indexed for this specific use case. Think of\n this algorithm as broadcasting `np.arange` to a single `axis` of an\n arbitrarily shaped ndarray.\n\n \"\"\"\n initshape = tuple(1 if i != axis else shape[axis]\n for (i, x) in enumerate(arr.shape))\n if not reverse:\n padarr = np.arange(1, shape[axis] + 1)\n else:\n padarr = np.arange(shape[axis], 0, -1)\n padarr = padarr.reshape(initshape)\n for i, dim in enumerate(shape):\n if padarr.shape[i] != dim:\n padarr = padarr.repeat(dim, axis=i)\n return padarr\n\n\ndef _round_ifneeded(arr, dtype):\n \"\"\"\n Rounds arr inplace if destination dtype is integer.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n dtype : dtype\n The dtype of the destination array.\n\n \"\"\"\n if np.issubdtype(dtype, np.integer):\n arr.round(out=arr)\n\n\ndef _slice_at_axis(shape, sl, axis):\n \"\"\"\n Construct a slice tuple the length of shape, with sl at the specified axis\n \"\"\"\n slice_tup = (slice(None),)\n return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1)\n\n\ndef _slice_first(shape, n, axis):\n \"\"\" Construct a slice tuple to take the first n elements along axis \"\"\"\n return _slice_at_axis(shape, slice(0, n), axis=axis)\n\n\ndef _slice_last(shape, n, axis):\n \"\"\" Construct a slice tuple to take the last n elements along axis \"\"\"\n dim = shape[axis] # doing this explicitly makes n=0 work\n return _slice_at_axis(shape, slice(dim - n, dim), axis=axis)\n\n\ndef _do_prepend(arr, pad_chunk, axis):\n return np.concatenate(\n (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis)\n\n\ndef _do_append(arr, pad_chunk, axis):\n return np.concatenate(\n (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis)\n\n\ndef _prepend_const(arr, pad_amt, val, axis=-1):\n \"\"\"\n Prepend constant `val` along `axis` of `arr`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n val : scalar\n Constant value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` constant `val` prepended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis)\n\n\ndef _append_const(arr, pad_amt, val, axis=-1):\n \"\"\"\n Append constant `val` along `axis` of `arr`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n val : scalar\n Constant value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` constant `val` appended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis)\n\n\n\ndef _prepend_edge(arr, pad_amt, axis=-1):\n \"\"\"\n Prepend `pad_amt` to `arr` along `axis` by extending edge values.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, extended by `pad_amt` edge values appended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n edge_slice = _slice_first(arr.shape, 1, axis=axis)\n edge_arr = arr[edge_slice]\n return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis)\n\n\ndef _append_edge(arr, pad_amt, axis=-1):\n \"\"\"\n Append `pad_amt` to `arr` along `axis` by extending edge values.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, extended by `pad_amt` edge values prepended along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n edge_slice = _slice_last(arr.shape, 1, axis=axis)\n edge_arr = arr[edge_slice]\n return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis)\n\n\ndef _prepend_ramp(arr, pad_amt, end, axis=-1):\n \"\"\"\n Prepend linear ramp along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n end : scalar\n Constal value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region ramps linearly from the edge value to `end`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Slice a chunk from the edge to calculate stats on and extract edge\n edge_slice = _slice_first(arr.shape, 1, axis=axis)\n edge = arr[edge_slice]\n\n ramp_arr = np.linspace(\n start=end,\n stop=edge.squeeze(axis),\n num=pad_amt,\n endpoint=False,\n dtype=arr.dtype,\n axis=axis\n )\n\n return _do_prepend(arr, ramp_arr, axis)\n\n\ndef _append_ramp(arr, pad_amt, end, axis=-1):\n \"\"\"\n Append linear ramp along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n end : scalar\n Constal value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region ramps linearly from the edge value to `end`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Slice a chunk from the edge to calculate stats on and extract edge\n edge_slice = _slice_last(arr.shape, 1, axis=axis)\n edge = arr[edge_slice]\n\n ramp_arr = np.linspace(\n start=end,\n stop=edge.squeeze(axis),\n num=pad_amt,\n endpoint=False,\n dtype=arr.dtype,\n axis=axis\n )\n # Reverse linear space in appropriate dimension\n ramp_arr = ramp_arr[\n _slice_at_axis(ramp_arr.shape, slice(None, None, -1), axis)\n ]\n\n return _do_append(arr, ramp_arr, axis)\n\n\ndef _prepend_max(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` maximum values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate maximum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n prepended region is the maximum of the first `num` values along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n max_slice = _slice_first(arr.shape, num, axis=axis)\n\n # Extract slice, calculate max\n max_chunk = arr[max_slice].max(axis=axis, keepdims=True)\n\n # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`\n return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis)\n\n\ndef _append_max(arr, pad_amt, num, axis=-1):\n \"\"\"\n Pad one `axis` of `arr` with the maximum of the last `num` elements.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate maximum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the maximum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n if num is not None:\n max_slice = _slice_last(arr.shape, num, axis=axis)\n else:\n max_slice = tuple(slice(None) for x in arr.shape)\n\n # Extract slice, calculate max\n max_chunk = arr[max_slice].max(axis=axis, keepdims=True)\n\n # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`\n return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis)\n\n\ndef _prepend_mean(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` mean values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate mean.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the mean of the first `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n mean_slice = _slice_first(arr.shape, num, axis=axis)\n\n # Extract slice, calculate mean\n mean_chunk = arr[mean_slice].mean(axis, keepdims=True)\n _round_ifneeded(mean_chunk, arr.dtype)\n\n # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`\n return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)\n\n\ndef _append_mean(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` mean values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate mean.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the maximum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n if num is not None:\n mean_slice = _slice_last(arr.shape, num, axis=axis)\n else:\n mean_slice = tuple(slice(None) for x in arr.shape)\n\n # Extract slice, calculate mean\n mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True)\n _round_ifneeded(mean_chunk, arr.dtype)\n\n # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`\n return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)\n\n\ndef _prepend_med(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate median.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the median of the first `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n med_slice = _slice_first(arr.shape, num, axis=axis)\n\n # Extract slice, calculate median\n med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)\n _round_ifneeded(med_chunk, arr.dtype)\n\n # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`\n return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis)\n\n\ndef _append_med(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate median.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the median of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n if num is not None:\n med_slice = _slice_last(arr.shape, num, axis=axis)\n else:\n med_slice = tuple(slice(None) for x in arr.shape)\n\n # Extract slice, calculate median\n med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)\n _round_ifneeded(med_chunk, arr.dtype)\n\n # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`\n return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis)\n\n\ndef _prepend_min(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` minimum values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate minimum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the minimum of the first `num` values along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n min_slice = _slice_first(arr.shape, num, axis=axis)\n\n # Extract slice, calculate min\n min_chunk = arr[min_slice].min(axis=axis, keepdims=True)\n\n # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`\n return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis)\n\n\ndef _append_min(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate minimum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the minimum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n if num is not None:\n min_slice = _slice_last(arr.shape, num, axis=axis)\n else:\n min_slice = tuple(slice(None) for x in arr.shape)\n\n # Extract slice, calculate min\n min_chunk = arr[min_slice].min(axis=axis, keepdims=True)\n\n # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`\n return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis)\n\n\ndef _pad_ref(arr, pad_amt, method, axis=-1):\n \"\"\"\n Pad `axis` of `arr` by reflection.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n method : str\n Controls method of reflection; options are 'even' or 'odd'.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded with reflected\n values from the original array.\n\n Notes\n -----\n This algorithm does not pad with repetition, i.e. the edges are not\n repeated in the reflection. For that behavior, use `mode='symmetric'`.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis)\n\n ref_chunk1 = arr[ref_slice]\n\n # Memory/computationally more expensive, only do this if `method='odd'`\n if 'odd' in method and pad_amt[0] > 0:\n edge_slice1 = _slice_first(arr.shape, 1, axis=axis)\n edge_chunk = arr[edge_slice1]\n ref_chunk1 = 2 * edge_chunk - ref_chunk1\n del edge_chunk\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n start = arr.shape[axis] - pad_amt[1] - 1\n end = arr.shape[axis] - 1\n ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis)\n rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)\n ref_chunk2 = arr[ref_slice][rev_idx]\n\n if 'odd' in method:\n edge_slice2 = _slice_last(arr.shape, 1, axis=axis)\n edge_chunk = arr[edge_slice2]\n ref_chunk2 = 2 * edge_chunk - ref_chunk2\n del edge_chunk\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)\n\n\ndef _pad_sym(arr, pad_amt, method, axis=-1):\n \"\"\"\n Pad `axis` of `arr` by symmetry.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n method : str\n Controls method of symmetry; options are 'even' or 'odd'.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded with symmetric\n values from the original array.\n\n Notes\n -----\n This algorithm DOES pad with repetition, i.e. the edges are repeated.\n For padding without repeated edges, use `mode='reflect'`.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis)\n rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)\n sym_chunk1 = arr[sym_slice][rev_idx]\n\n # Memory/computationally more expensive, only do this if `method='odd'`\n if 'odd' in method and pad_amt[0] > 0:\n edge_slice1 = _slice_first(arr.shape, 1, axis=axis)\n edge_chunk = arr[edge_slice1]\n sym_chunk1 = 2 * edge_chunk - sym_chunk1\n del edge_chunk\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis)\n sym_chunk2 = arr[sym_slice][rev_idx]\n\n if 'odd' in method:\n edge_slice2 = _slice_last(arr.shape, 1, axis=axis)\n edge_chunk = arr[edge_slice2]\n sym_chunk2 = 2 * edge_chunk - sym_chunk2\n del edge_chunk\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)\n\n\ndef _pad_wrap(arr, pad_amt, axis=-1):\n \"\"\"\n Pad `axis` of `arr` via wrapping.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded wrapped values\n from the opposite end of `axis`.\n\n Notes\n -----\n This method of padding is also known as 'tile' or 'tiling'.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis)\n wrap_chunk1 = arr[wrap_slice]\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis)\n wrap_chunk2 = arr[wrap_slice]\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)\n\n\ndef _as_pairs(x, ndim, as_index=False):\n \"\"\"\n Broadcast `x` to an array with the shape (`ndim`, 2).\n\n A helper function for `pad` that prepares and validates arguments like\n `pad_width` for iteration in pairs.\n\n Parameters\n ----------\n x : {None, scalar, array-like}\n The object to broadcast to the shape (`ndim`, 2).\n ndim : int\n Number of pairs the broadcasted `x` will have.\n as_index : bool, optional\n If `x` is not None, try to round each element of `x` to an integer\n (dtype `np.intp`) and ensure every element is positive.\n\n Returns\n -------\n pairs : nested iterables, shape (`ndim`, 2)\n The broadcasted version of `x`.\n\n Raises\n ------\n ValueError\n If `as_index` is True and `x` contains negative elements.\n Or if `x` is not broadcastable to the shape (`ndim`, 2).\n \"\"\"\n if x is None:\n # Pass through None as a special case, otherwise np.round(x) fails\n # with an AttributeError\n return ((None, None),) * ndim\n\n x = np.array(x)\n if as_index:\n x = np.round(x).astype(np.intp, copy=False)\n\n if x.ndim < 3:\n # Optimization: Possibly use faster paths for cases where `x` has\n # only 1 or 2 elements. `np.broadcast_to` could handle these as well\n # but is currently slower\n\n if x.size == 1:\n # x was supplied as a single value\n x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2\n if as_index and x < 0:\n raise ValueError(\"index can't contain negative values\")\n return ((x[0], x[0]),) * ndim\n\n if x.size == 2 and x.shape != (2, 1):\n # x was supplied with a single value for each side\n # but except case when each dimension has a single value\n # which should be broadcasted to a pair,\n # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]\n x = x.ravel() # Ensure x[0], x[1] works\n if as_index and (x[0] < 0 or x[1] < 0):\n raise ValueError(\"index can't contain negative values\")\n return ((x[0], x[1]),) * ndim\n\n if as_index and x.min() < 0:\n raise ValueError(\"index can't contain negative values\")\n\n # Converting the array with `tolist` seems to improve performance\n # when iterating and indexing the result (see usage in `pad`)\n return np.broadcast_to(x, (ndim, 2)).tolist()\n\n\n###############################################################################\n# Public functions\n\n\ndef _pad_dispatcher(array, pad_width, mode, **kwargs):\n return (array,)\n\n\n@array_function_dispatch(_pad_dispatcher, module='numpy')\ndef pad(array, pad_width, mode, **kwargs):\n \"\"\"\n Pads an array.\n\n Parameters\n ----------\n array : array_like of rank N\n Input array\n pad_width : {sequence, array_like, int}\n Number of values padded to the edges of each axis.\n ((before_1, after_1), ... (before_N, after_N)) unique pad widths\n for each axis.\n ((before, after),) yields same before and after pad for each axis.\n (pad,) or int is a shortcut for before = after = pad width for all\n axes.\n mode : str or function\n One of the following string values or a user supplied function.\n\n 'constant'\n Pads with a constant value.\n 'edge'\n Pads with the edge values of array.\n 'linear_ramp'\n Pads with the linear ramp between end_value and the\n array edge value.\n 'maximum'\n Pads with the maximum value of all or part of the\n vector along each axis.\n 'mean'\n Pads with the mean value of all or part of the\n vector along each axis.\n 'median'\n Pads with the median value of all or part of the\n vector along each axis.\n 'minimum'\n Pads with the minimum value of all or part of the\n vector along each axis.\n 'reflect'\n Pads with the reflection of the vector mirrored on\n the first and last values of the vector along each\n axis.\n 'symmetric'\n Pads with the reflection of the vector mirrored\n along the edge of the array.\n 'wrap'\n Pads with the wrap of the vector along the axis.\n The first values are used to pad the end and the\n end values are used to pad the beginning.\n \n Padding function, see Notes.\n stat_length : sequence or int, optional\n Used in 'maximum', 'mean', 'median', and 'minimum'. Number of\n values at edge of each axis used to calculate the statistic value.\n\n ((before_1, after_1), ... (before_N, after_N)) unique statistic\n lengths for each axis.\n\n ((before, after),) yields same before and after statistic lengths\n for each axis.\n\n (stat_length,) or int is a shortcut for before = after = statistic\n length for all axes.\n\n Default is ``None``, to use the entire axis.\n constant_values : sequence or int, optional\n Used in 'constant'. The values to set the padded values for each\n axis.\n\n ((before_1, after_1), ... (before_N, after_N)) unique pad constants\n for each axis.\n\n ((before, after),) yields same before and after constants for each\n axis.\n\n (constant,) or int is a shortcut for before = after = constant for\n all axes.\n\n Default is 0.\n end_values : sequence or int, optional\n Used in 'linear_ramp'. The values used for the ending value of the\n linear_ramp and that will form the edge of the padded array.\n\n ((before_1, after_1), ... (before_N, after_N)) unique end values\n for each axis.\n\n ((before, after),) yields same before and after end values for each\n axis.\n\n (constant,) or int is a shortcut for before = after = end value for\n all axes.\n\n Default is 0.\n reflect_type : {'even', 'odd'}, optional\n Used in 'reflect', and 'symmetric'. The 'even' style is the\n default with an unaltered reflection around the edge value. For\n the 'odd' style, the extended part of the array is created by\n subtracting the reflected values from two times the edge value.\n\n Returns\n -------\n pad : ndarray\n Padded array of rank equal to `array` with shape increased\n according to `pad_width`.\n\n Notes\n -----\n .. versionadded:: 1.7.0\n\n For an array with rank greater than 1, some of the padding of later\n axes is calculated from padding of previous axes. This is easiest to\n think about with a rank 2 array where the corners of the padded array\n are calculated by using padded values from the first axis.\n\n The padding function, if used, should return a rank 1 array equal in\n length to the vector argument with padded values replaced. It has the\n following signature::\n\n padding_func(vector, iaxis_pad_width, iaxis, kwargs)\n\n where\n\n vector : ndarray\n A rank 1 array already padded with zeros. Padded values are\n vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].\n iaxis_pad_width : tuple\n A 2-tuple of ints, iaxis_pad_width[0] represents the number of\n values padded at the beginning of vector where\n iaxis_pad_width[1] represents the number of values padded at\n the end of vector.\n iaxis : int\n The axis currently being calculated.\n kwargs : dict\n Any keyword arguments the function requires.\n\n Examples\n --------\n >>> a = [1, 2, 3, 4, 5]\n >>> np.pad(a, (2,3), 'constant', constant_values=(4, 6))\n array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])\n\n >>> np.pad(a, (2, 3), 'edge')\n array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5])\n\n >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))\n array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])\n\n >>> np.pad(a, (2,), 'maximum')\n array([5, 5, 1, 2, 3, 4, 5, 5, 5])\n\n >>> np.pad(a, (2,), 'mean')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n\n >>> np.pad(a, (2,), 'median')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n\n >>> a = [[1, 2], [3, 4]]\n >>> np.pad(a, ((3, 2), (2, 3)), 'minimum')\n array([[1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [3, 3, 3, 4, 3, 3, 3],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1]])\n\n >>> a = [1, 2, 3, 4, 5]\n >>> np.pad(a, (2, 3), 'reflect')\n array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])\n\n >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')\n array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])\n\n >>> np.pad(a, (2, 3), 'symmetric')\n array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])\n\n >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')\n array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])\n\n >>> np.pad(a, (2, 3), 'wrap')\n array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])\n\n >>> def pad_with(vector, pad_width, iaxis, kwargs):\n ... pad_value = kwargs.get('padder', 10)\n ... vector[:pad_width[0]] = pad_value\n ... vector[-pad_width[1]:] = pad_value\n ... return vector\n >>> a = np.arange(6)\n >>> a = a.reshape((2, 3))\n >>> np.pad(a, 2, pad_with)\n array([[10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 0, 1, 2, 10, 10],\n [10, 10, 3, 4, 5, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10]])\n >>> np.pad(a, 2, pad_with, padder=100)\n array([[100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100],\n [100, 100, 0, 1, 2, 100, 100],\n [100, 100, 3, 4, 5, 100, 100],\n [100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100]])\n \"\"\"\n if not np.asarray(pad_width).dtype.kind == 'i':\n raise TypeError('`pad_width` must be of integral type.')\n\n narray = np.array(array)\n pad_width = _as_pairs(pad_width, narray.ndim, as_index=True)\n\n allowedkwargs = {\n 'constant': ['constant_values'],\n 'edge': [],\n 'linear_ramp': ['end_values'],\n 'maximum': ['stat_length'],\n 'mean': ['stat_length'],\n 'median': ['stat_length'],\n 'minimum': ['stat_length'],\n 'reflect': ['reflect_type'],\n 'symmetric': ['reflect_type'],\n 'wrap': [],\n }\n\n kwdefaults = {\n 'stat_length': None,\n 'constant_values': 0,\n 'end_values': 0,\n 'reflect_type': 'even',\n }\n\n if isinstance(mode, np.compat.basestring):\n # Make sure have allowed kwargs appropriate for mode\n for key in kwargs:\n if key not in allowedkwargs[mode]:\n raise ValueError('%s keyword not in allowed keywords %s' %\n (key, allowedkwargs[mode]))\n\n # Set kwarg defaults\n for kw in allowedkwargs[mode]:\n kwargs.setdefault(kw, kwdefaults[kw])\n\n # Need to only normalize particular keywords.\n for i in kwargs:\n if i == 'stat_length':\n kwargs[i] = _as_pairs(kwargs[i], narray.ndim, as_index=True)\n if i in ['end_values', 'constant_values']:\n kwargs[i] = _as_pairs(kwargs[i], narray.ndim)\n else:\n # Drop back to old, slower np.apply_along_axis mode for user-supplied\n # vector function\n function = mode\n\n # Create a new padded array\n rank = list(range(narray.ndim))\n total_dim_increase = [np.sum(pad_width[i]) for i in rank]\n offset_slices = tuple(\n slice(pad_width[i][0], pad_width[i][0] + narray.shape[i])\n for i in rank)\n new_shape = np.array(narray.shape) + total_dim_increase\n newmat = np.zeros(new_shape, narray.dtype)\n\n # Insert the original array into the padded array\n newmat[offset_slices] = narray\n\n # This is the core of pad ...\n for iaxis in rank:\n np.apply_along_axis(function,\n iaxis,\n newmat,\n pad_width[iaxis],\n iaxis,\n kwargs)\n return newmat\n\n # If we get here, use new padding method\n newmat = narray.copy()\n\n # API preserved, but completely new algorithm which pads by building the\n # entire block to pad before/after `arr` with in one step, for each axis.\n if mode == 'constant':\n for axis, ((pad_before, pad_after), (before_val, after_val)) \\\n in enumerate(zip(pad_width, kwargs['constant_values'])):\n newmat = _prepend_const(newmat, pad_before, before_val, axis)\n newmat = _append_const(newmat, pad_after, after_val, axis)\n\n elif mode == 'edge':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n newmat = _prepend_edge(newmat, pad_before, axis)\n newmat = _append_edge(newmat, pad_after, axis)\n\n elif mode == 'linear_ramp':\n for axis, ((pad_before, pad_after), (before_val, after_val)) \\\n in enumerate(zip(pad_width, kwargs['end_values'])):\n newmat = _prepend_ramp(newmat, pad_before, before_val, axis)\n newmat = _append_ramp(newmat, pad_after, after_val, axis)\n\n elif mode == 'maximum':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_max(newmat, pad_before, chunk_before, axis)\n newmat = _append_max(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'mean':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)\n newmat = _append_mean(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'median':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_med(newmat, pad_before, chunk_before, axis)\n newmat = _append_med(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'minimum':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_min(newmat, pad_before, chunk_before, axis)\n newmat = _append_min(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'reflect':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n if narray.shape[axis] == 0:\n # Axes with non-zero padding cannot be empty.\n if pad_before > 0 or pad_after > 0:\n raise ValueError(\"There aren't any elements to reflect\"\n \" in axis {} of `array`\".format(axis))\n # Skip zero padding on empty axes.\n continue\n\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n if ((pad_before > 0) or\n (pad_after > 0)) and newmat.shape[axis] == 1:\n # Extending singleton dimension for 'reflect' is legacy\n # behavior; it really should raise an error.\n newmat = _prepend_edge(newmat, pad_before, axis)\n newmat = _append_edge(newmat, pad_after, axis)\n continue\n\n method = kwargs['reflect_type']\n safe_pad = newmat.shape[axis] - 1\n while ((pad_before > safe_pad) or (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_ref(newmat, (pad_iter_b,\n pad_iter_a), method, axis)\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)\n\n elif mode == 'symmetric':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n method = kwargs['reflect_type']\n safe_pad = newmat.shape[axis]\n while ((pad_before > safe_pad) or\n (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_sym(newmat, (pad_iter_b,\n pad_iter_a), method, axis)\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)\n\n elif mode == 'wrap':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n safe_pad = newmat.shape[axis]\n while ((pad_before > safe_pad) or\n (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)\n\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)\n\n return newmat\n","repo_name":"catboost/catboost","sub_path":"contrib/python/numpy/py2/numpy/lib/arraypad.py","file_name":"arraypad.py","file_ext":"py","file_size_in_byte":44316,"program_lang":"python","lang":"en","doc_type":"code","stars":7463,"dataset":"github-code","pt":"57"} +{"seq_id":"34625242891","text":"\"\"\"this defines the urlconf of the ig application\"\"\"\nfrom django.urls import path\n# from django.views.generic import TemplateView\n# from django.contrib.auth.decorators import login_required, permission_required\n\n# local imports\nfrom ig_app.views import HomeView, RegisterView, LoginView, LogoutView, PostView, ProfileView, LikeView\n\napp_name = 'igapp' # application namespace\nurlpatterns = [\n path('', HomeView.as_view(), name='index'),\n path('register/', RegisterView.as_view(), name='register'),\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(), name='logout'),\n path('newpost/', PostView.as_view(), name='newpost'),\n path('profile//', ProfileView.as_view(), name='profile'),\n path('like/', LikeView.as_view(), name='like'),\n]\n","repo_name":"Willbeckh/iglive","sub_path":"ig_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"36137579786","text":"from turtle import *\r\nfrom time import *\r\nfrom calendar import *\r\n\r\n\r\ndef draw_pointer(pointer,deg,length): \r\n pointer.ht()\r\n pointer.width(4)\r\n pointer.color(\"white\", \"white\")\r\n pointer.seth(deg)\r\n pointer.fd(length)\r\n pointer.begin_fill()\r\n pointer.left(90)\r\n pointer.fd(10)\r\n pointer.left(240)\r\n pointer.fd(20)\r\n pointer.left(240)\r\n pointer.fd(20)\r\n pointer.left(240)\r\n pointer.fd(10)\r\n pointer.end_fill()\r\n\r\n\r\ndef draw_clock():\r\n color(\"orange\", \"#FFBB00\")\r\n goto(0, -250)\r\n begin_fill()\r\n width(50)\r\n circle(250)\r\n end_fill()\r\n color(\"white\")\r\n pu()\r\n width(5)\r\n for i in range(1,61):\r\n home()\r\n seth(-i/5*30+90)\r\n fd(248)\r\n if int(i/5) == i/5:\r\n seth(-90)\r\n fd(14)\r\n write(str(int(i/5)), False, 'center', ('Arial', 22, 'normal'))\r\n else:\r\n pd()\r\n fd(1)\r\n pu()\r\n\r\n goto(0,0)\r\n dot(20, \"#FFFFFF\")\r\n ht()\r\n\r\ndef loop():\r\n tracer(0)\r\n draw_txt.reset()\r\n hour_pointer.reset()\r\n min_pointer.reset()\r\n sec_pointer.reset()\r\n h=localtime().tm_hour\r\n m=localtime().tm_min\r\n s=localtime().tm_sec\r\n hour_deg = -360/(12*60)*(60*h+m)+90\r\n draw_pointer(hour_pointer,hour_deg,160)\r\n min_deg = -6*m+90+s/6\r\n draw_pointer(min_pointer,min_deg,210)\r\n sec_deg = -6*s+90\r\n draw_pointer(sec_pointer, sec_deg, 240)\r\n draw_txt.pu()\r\n draw_txt.color(\"#FFFFFF\")\r\n draw_txt.ht()\r\n draw_txt.goto(0,-120)\r\n draw_txt.write(wdays[localtime().tm_wday]+'\\n\\n\\n\\n\\n\\n' +\r\n str(strftime(\"%Y-%m-%d\",localtime())),False,'center',('Arial',22,'normal'))\r\n tracer(1)\r\n ontimer(loop,500)\r\n\r\n\r\ntracer(0)\r\nprint(month(localtime().tm_year,localtime().tm_mon))\r\nhour_pointer = Turtle()\r\nmin_pointer=Turtle()\r\nsec_pointer=Turtle()\r\ndraw_txt = Turtle()\r\nwdays = [' Monday ', ' Tuesday ', 'Wednesday',\r\n ' Thursday ', ' Friday ', ' Saturday', ' Sunday ']\r\ndraw_clock()\r\nloop()\r\ndone()\r\n","repo_name":"JackyLu0520/py","sub_path":"时钟.py","file_name":"时钟.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34485005254","text":"from datetime import timedelta\nfrom pathlib import Path\n\nimport click\nfrom discord import AllowedMentions, Color, Embed, File\n\nfrom juniorguru.cli.sync import main as cli\nfrom juniorguru.lib import discord_sync, loggers\nfrom juniorguru.lib.discord_club import (\n ClubClient,\n is_message_over_period_ago,\n parse_channel,\n)\nfrom juniorguru.lib.mutations import mutating_discord\nfrom juniorguru.models.base import db\nfrom juniorguru.models.club import ClubMessage\nfrom juniorguru.models.partner import Partnership\n\n\nIMAGES_DIR = Path(\"juniorguru/images\")\n\n\nlogger = loggers.from_path(__file__)\n\n\n@cli.sync_command(dependencies=[\"club-content\", \"partners\"])\n@click.option(\"--channel\", \"channel_id\", default=\"partners_list\", type=parse_channel)\n@click.option(\n \"--recreate-interval\",\n \"recreate_interval_days\",\n default=30,\n type=int,\n help=\"In days.\",\n)\ndef main(channel_id: int, recreate_interval_days: int):\n discord_sync.run(recreate_archive, channel_id, recreate_interval_days)\n\n\n@db.connection_context()\nasync def recreate_archive(\n client: ClubClient, channel_id: int, recreate_interval_days: int\n):\n partnerships = list(Partnership.active_listing())\n messages = ClubMessage.channel_listing(channel_id, by_bot=True)\n try:\n last_message = messages[-1]\n except IndexError:\n logger.info(\"No messages in the channel\")\n else:\n if is_message_over_period_ago(\n last_message, timedelta(days=recreate_interval_days)\n ):\n logger.info(\"Channel content is too old\")\n else:\n logger.info(\"Channel content is recent, skipping\")\n return\n\n channel = await client.fetch_channel(channel_id)\n with mutating_discord(channel) as proxy:\n await proxy.purge(limit=None)\n with mutating_discord(channel) as proxy:\n await proxy.send(\n (\n \"# Seznam partnerských firem\\n\\n\"\n \"Tyto firmy se podílejí na financování junior.guru. \"\n \"Můžeš se tady prokliknout na jejich stránky. \"\n \"Partnerství neznamená, že junior.guru doporučuje konkrétní kurzy, nebo že na ně nemáš psát recenze v klubu. \"\n \"\\n\\n\"\n \"Když sem partnerské firmy pošlou lidi, tak ti dostanou roli <@&837316268142493736> a k tomu ještě i roli pro konkrétní firmu, například <@&938306918097747968>. \"\n \"Role využívej a firmu označ, pokud po ní něco potřebuješ. \"\n \"Seznam firem je tady seřazený podle počtu lidí v klubu. \"\n ),\n suppress=True,\n allowed_mentions=AllowedMentions.none(),\n )\n partners = sorted(\n [partnership.partner for partnership in partnerships],\n key=lambda partner: (len(partner.list_members), partner.name),\n reverse=True,\n )\n for partner in partners:\n logger.info(f\"Posting {partner.name!r}\")\n embed = Embed(\n title=partner.name,\n url=partner.url,\n color=Color.dark_grey(),\n description=f\"Role: <@&{partner.role_id}>\\nČlenů: {len(partner.list_members)}\",\n )\n embed.set_thumbnail(url=f\"attachment://{Path(partner.poster_path).name}\")\n file = File(IMAGES_DIR / partner.poster_path)\n with mutating_discord(channel) as proxy:\n await proxy.send(\n embed=embed,\n file=file,\n allowed_mentions=AllowedMentions.none(),\n )\n","repo_name":"juniorguru/junior.guru","sub_path":"juniorguru/sync/club_partners_list.py","file_name":"club_partners_list.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"57"} +{"seq_id":"326244625","text":"import fnmatch\nimport os\nimport subprocess\nimport sys\nimport json\nfrom enum import Enum\nfrom chkbit import hashfile, hashtext\n\nVERSION = 2 # index version\nINDEX = \".chkbit\"\nIGNORE = \".chkbitignore\"\n\n\nclass Stat(Enum):\n ERR_DMG = \"DMG\"\n ERR_BITROT = \"DMG\" # legacy\n ERR_IDX = \"EIX\"\n WARN_OLD = \"old\"\n NEW = \"new\"\n UPDATE = \"upd\"\n OK = \"ok \"\n SKIP = \"skp\"\n INTERNALEXCEPTION = \"EXC\"\n FLAG_MOD = \"fmod\"\n\n\nclass Index:\n def __init__(self, path, files, *, log=None):\n self.path = path\n self.files = files\n self.old = {}\n self.new = {}\n self.ignore = []\n self.load_ignore()\n self.updates = []\n self.modified = True\n self.log = log\n\n @property\n def ignore_file(self):\n return os.path.join(self.path, IGNORE)\n\n @property\n def idx_file(self):\n return os.path.join(self.path, INDEX)\n\n def should_ignore(self, name):\n for ignore in self.ignore:\n if fnmatch.fnmatch(name, ignore):\n return True\n return False\n\n def _setmod(self):\n self.modified = True\n\n def _log(self, stat, name):\n if self.log:\n self.log(stat, os.path.join(self.path, name))\n\n # calc new hashes for this index\n def update(self, context):\n for name in self.files:\n if self.should_ignore(name):\n self._log(Stat.SKIP, name)\n continue\n\n a = context.hash_algo\n # check previously used hash\n if name in self.old:\n old = self.old[name]\n if \"md5\" in old:\n a = \"md5\" # legacy structure\n self.old[name] = {\"mod\": old[\"mod\"], \"a\": a, \"h\": old[\"md5\"]}\n elif \"a\" in old:\n a = old[\"a\"]\n self.new[name] = self._calc_file(name, a)\n\n # check/update the index (old vs new)\n def check_fix(self, force):\n for name in self.new.keys():\n if not name in self.old:\n self._log(Stat.NEW, name)\n self._setmod()\n continue\n\n a = self.old[name]\n b = self.new[name]\n amod = a[\"mod\"]\n bmod = b[\"mod\"]\n if a[\"h\"] == b[\"h\"]:\n # ok, if the content stays the same the mod time does not matter\n self._log(Stat.OK, name)\n if amod != bmod:\n self._setmod()\n continue\n\n if amod == bmod:\n # damage detected\n self._log(Stat.ERR_DMG, name)\n # replace with old so we don't loose the information on the next run\n # unless force is set\n if not force:\n self.new[name] = a\n else:\n self._setmod()\n elif amod < bmod:\n # ok, the file was updated\n self._log(Stat.UPDATE, name)\n self._setmod()\n elif amod > bmod:\n self._log(Stat.WARN_OLD, name)\n self._setmod()\n\n def _calc_file(self, name, a):\n path = os.path.join(self.path, name)\n info = os.stat(path)\n mtime = int(info.st_mtime * 1000)\n return {\"mod\": mtime, \"a\": a, \"h\": hashfile(path, a)}\n\n def save(self):\n if self.modified:\n data = {\"v\": VERSION, \"idx\": self.new}\n text = json.dumps(self.new, separators=(\",\", \":\"))\n data[\"idx_hash\"] = hashtext(text)\n\n with open(self.idx_file, \"w\", encoding=\"utf-8\") as f:\n json.dump(data, f, separators=(\",\", \":\"))\n self.modified = False\n return True\n else:\n return False\n\n def load(self):\n if not os.path.exists(self.idx_file):\n return False\n self.modified = False\n with open(self.idx_file, \"r\", encoding=\"utf-8\") as f:\n data = json.load(f)\n if \"data\" in data:\n # extract old format from js version\n for item in json.loads(data[\"data\"]):\n self.old[item[\"name\"]] = {\n \"mod\": item[\"mod\"],\n \"a\": \"md5\",\n \"h\": item[\"md5\"],\n }\n elif \"idx\" in data:\n self.old = data[\"idx\"]\n text = json.dumps(self.old, separators=(\",\", \":\"))\n if data.get(\"idx_hash\") != hashtext(text):\n self.modified = True\n self._log(Stat.ERR_IDX, self.idx_file)\n return True\n\n def load_ignore(self):\n if not os.path.exists(self.ignore_file):\n return\n with open(self.ignore_file, \"r\", encoding=\"utf-8\") as f:\n text = f.read()\n\n self.ignore = list(\n filter(\n lambda x: x and x[0] != \"#\" and len(x.strip()) > 0, text.splitlines()\n )\n )\n","repo_name":"laktak/chkbit-py","sub_path":"chkbit/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"57"} +{"seq_id":"24636592315","text":"import logging\nimport os\n\nfrom opentelemetry.sdk.metrics import MeterProvider\n\nfrom splunk_otel.util import _is_truthy\n\nlogger = logging.getLogger(__name__)\n\n\ndef start_metrics() -> MeterProvider:\n # pylint: disable=import-outside-toplevel\n from opentelemetry.instrumentation.system_metrics import SystemMetricsInstrumentor\n\n enabled = os.environ.get(\"OTEL_METRICS_ENABLED\", True)\n if not _is_truthy(enabled):\n logger.info(\"metering has been disabled with OTEL_METRICS_ENABLED=%s\", enabled)\n return None\n\n try:\n meter_provider = _configure_metrics()\n system_metrics = SystemMetricsInstrumentor()\n system_metrics.instrument(meter_provider=meter_provider)\n logger.debug(\"Instrumented runtime metrics\")\n return meter_provider\n except Exception as exc: # pylint:disable=broad-except\n logger.exception(\"Instrumenting of runtime metrics failed\")\n raise exc\n\n\ndef _configure_metrics() -> MeterProvider:\n # pylint: disable=import-outside-toplevel\n from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (\n OTLPMetricExporter,\n )\n from opentelemetry.metrics import set_meter_provider\n from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader\n\n metrics_exporter = OTLPMetricExporter()\n meter_provider = MeterProvider([PeriodicExportingMetricReader(metrics_exporter)])\n set_meter_provider(meter_provider)\n return meter_provider\n","repo_name":"signalfx/splunk-otel-python","sub_path":"splunk_otel/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"57"} +{"seq_id":"27300668919","text":"\"\"\"\nFlask web app connects to Mongo database.\nKeep a simple list of dated memoranda.\n\nRepresentation conventions for dates: \n - We use Arrow objects when we want to manipulate dates, but for all\n storage in database, in session or g objects, or anything else that\n needs a text representation, we use ISO date strings. These sort in the\n order as arrow date objects, and they are easy to convert to and from\n arrow date objects. (For display on screen, we use the 'humanize' filter\n below.) A time zone offset will \n - User input/output is in local (to the server) time. \n\"\"\"\n\nimport flask\nfrom flask import g\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\n\nimport json\nimport logging\n\n# Date handling \nimport arrow # Replacement for datetime, based on moment.js\n# import datetime # But we may still need time\nfrom dateutil import tz # For interpreting local times\nimport datetime\n\n# Mongo database\nfrom pymongo import MongoClient\nimport secrets.admin_secrets\nimport secrets.client_secrets\nMONGO_CLIENT_URL = \"mongodb://{}:{}@localhost:{}/{}\".format(\n secrets.client_secrets.db_user,\n secrets.client_secrets.db_user_pw,\n secrets.admin_secrets.port, \n secrets.client_secrets.db)\n\n###\n# Globals\n###\nimport CONFIG\napp = flask.Flask(__name__)\napp.secret_key = CONFIG.secret_key\n\n####\n# Database connection per server process\n###\n\ntry: \n dbclient = MongoClient(MONGO_CLIENT_URL)\n db = getattr(dbclient, secrets.client_secrets.db)\n collection = db.dated\n\nexcept:\n print(\"Failure opening database. Is Mongo running? Correct password?\")\n sys.exit(1)\n\n\n\n###\n# Pages\n###\n\n@app.route(\"/\")\n@app.route('/index', methods =['POST'])\ndef addremove_Memo():\n if request.form['options'] == \"Add\":\n \t record = { \"type\" : \"dated_memo\",\n \t \"date\" : arrow.get(request.form['memoDate'], \"YYYY/M/D\", tzinfo=tz.tzlocal()).naive,\n \"text\" : request.form['memoMake']}\n \t collection.insert(record)\n if request.form['options'] == \"Remove\":\n db = request.form['memoMake']\n collection.remove( { \"text\" : db } )\n\n return index()\n\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n app.logger.debug(\"Main page entry\")\n g.memos = get_memos()\n for memo in g.memos: \n app.logger.debug(\"Memo: \" + str(memo))\n return flask.render_template('index.html')\n\n\n\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n app.logger.debug(\"Page not found\")\n return flask.render_template('page_not_found.html',\n badurl=request.base_url,\n linkback=url_for(\"index\")), 404\n\n#################\n#\n# Functions used within the templates\n#\n#################\n\n\n@app.template_filter( 'humanize' )\ndef humanize_arrow_date( date ):\n \"\"\"\n Date is internal UTC ISO format string.\n Output should be \"today\", \"yesterday\", \"in 5 days\", etc.\n Arrow will try to humanize down to the minute, so we\n need to catch 'today' as a special case. \n \"\"\"\n try:\n now = arrow.utcnow().to('local').replace(hour=1, minute=0, second=0, microsecond=0) #the current date, but not time\n then = arrow.get(date)\n\n if then.tzinfo==None:\n then.to('local').replace(hour=1, minute=0, second=0, microsecond=0, tzinfo=tz.tzlocal()) #the current date, in proper tz\n else:\n then.replace(hour=1,minute=0, second=0, microsecond=0) #if the tz was read as local (from html form)\n\n tomorrow = now.replace(days=+1) #cases for rounding humanized dates\n yesterday = now.replace(days=-1) \n \n if then.date() == now.date():\n human = \"Today\"\n elif then.date() == tomorrow.date():\n human = \"Tomorrow\"\n elif then.date() == yesterday.date():\n human = \"Yesterday\"\n else:\n human = then.humanize(now)\n if human == \"in a day\":\n human = \"Tomorrow\"\n\n except: \n human = date\n return human\n\n############\n#\n# Functions for testing purposes\n#\n############\n\ndef add_memo(db, mem):\n \"\"\"\n Adds a memo to our collection\n \"\"\"\n db.insert(mem)\n\ndef remove_memo(db, txt):\n \"\"\"\n Removes a memo containing the text\n \"\"\"\n db.remove( { \"text\" : txt } ) \n\n\n#############\n#\n# Functions available to the page code above\n#\n##############\ndef get_memos():\n \"\"\"\n Returns all memos in the database, in a form that\n can be inserted directly in the 'session' object.\n \"\"\"\n records = [ ]\n for record in collection.find( { \"type\": \"dated_memo\" } ):\n record['date'] = arrow.get(record['date']).isoformat()\n del record['_id']\n records.append(record)\n return sorted(records, key=lambda entry : entry['date']) #sorts by date\n\n\nif __name__ == \"__main__\":\n app.debug=CONFIG.DEBUG\n app.logger.setLevel(logging.DEBUG)\n app.run(port=CONFIG.PORT,host=\"0.0.0.0\")\n\n \n","repo_name":"zenranda/proj6-mongod","sub_path":"flask_main.py","file_name":"flask_main.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"10321956439","text":"\"\"\"\nSlack Events Blueprint\n\nThis module provides routes for handling incoming Slack events and interactive actions.\n\"\"\"\n\nimport json\n\nfrom flask import Blueprint, request, Response\nfrom slack_sdk.signature import SignatureVerifier\n\nfrom src.environment import env\nfrom src.services.events_service.events_service import handle_events\nfrom src.services.interactive_service.interactive_service import handle_actions\n\nslack_events_blueprint = Blueprint(\"slack_events\", __name__)\nsignature_verifier = SignatureVerifier(env.get_signing_secret())\n\n\n@slack_events_blueprint.route(\"/slack/events\", methods=[\"POST\"])\ndef handle_slack_events() -> tuple[Response, int]:\n \"\"\"\n Handle incoming Slack events received at the \"/slack/events\" endpoint.\n\n This function verifies the incoming request, processes the event payload,\n and delegates handling to appropriate event and action handlers.\n\n Returns:\n tuple[Response, int]: A tuple containing a response object and an HTTP status code.\n \"\"\"\n if not signature_verifier.is_valid_request(request.get_data(), request.headers):\n return Response(), 403\n\n data = request.get_json()\n\n if data.get(\"event\") is not None:\n response = handle_events(data[\"event\"])\n if response is not None:\n return Response(), 200\n\n return Response(), 200\n\n\n@slack_events_blueprint.route(\"/interactive-endpoint\", methods=[\"POST\"])\ndef interactive_endpoint() -> tuple[Response, int]:\n \"\"\"\n Handle incoming interactive actions received at the \"/interactive-endpoint\" endpoint.\n\n This function processes interactive action payloads, and\n delegates handling to appropriate action.\n\n Returns:\n tuple[Response, int]: A tuple containing a response object and an HTTP status code.\n \"\"\"\n\n try:\n payload = json.loads(request.form[\"payload\"])\n except Exception as ex:\n print(ex)\n payload = request.get_json()[\"payload\"]\n\n print(json.dumps(payload, indent=4))\n\n if not payload[\"user\"]:\n return Response(), 200\n\n # Actions flow\n if payload.get(\"actions\") is not None:\n result = handle_actions(payload)\n if result is not None:\n return Response(), 200\n\n return Response(), 200\n","repo_name":"Gushono/bot-slack","sub_path":"src/controller/slack_events_controller.py","file_name":"slack_events_controller.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7239533933","text":"\"\"\"genre in book changed\n\nRevision ID: 3035567744cb\nRevises: 80047b60ec81\nCreate Date: 2019-07-01 15:26:45.592426\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '3035567744cb'\ndown_revision = '80047b60ec81'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('books', 'genre')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('books', sa.Column('genre', postgresql.ENUM('Comedy', 'Drama', 'Romance', 'Social', 'Religious', 'Historical', name='genre'), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n","repo_name":"msdmazarei/book-store-backend","sub_path":"alembic/old_versions/3035567744cb_genre_in_book_changed.py","file_name":"3035567744cb_genre_in_book_changed.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"70635780338","text":"import argparse, os, random, torch\nfrom omegaconf import OmegaConf\nfrom taming_comb.modules.style_encoder.network import *\nfrom taming_comb.modules.diffusionmodules.model import * \nfrom taming_comb.models.cond_transformer import * \nfrom dataset import dataset_single_enc_sty\nfrom utils import get_rand_input, get_coord_idx, sample_gen, save_tensor\n\n\ntorch.cuda.empty_cache()\n\nif __name__==\"__main__\":\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--device\", default='0',\n help=\"specify the GPU(s)\",\n type=str)\n\n parser.add_argument(\"--root_dir\", default='/eva_data0/dataset/summer2winter_yosemite',\n help=\"dataset path\",\n type=str)\n\n parser.add_argument(\"--dataset\", default='summer2winter_yosemite',\n help=\"dataset directory name\",\n type=str)\n\n parser.add_argument(\"--first_stage_model\", default='/eva_data7/VQ-I2I/summer2winter_yosemite_512_512_settingc_256_final_test/settingc_latest.pt',\n help=\"first stage model\",\n type=str)\n\n parser.add_argument(\"--transformer_model\", default='/eva_data7/VQ-I2I/summer2winter_yosemite_512_512_transformer_final_test/n_700.pt',\n help=\"transformer model (second stage model)\",\n type=str)\n\n parser.add_argument(\"--save_name\", default='./summer2winter_yosemite_completion',\n help=\"save directory name\",\n type=str)\n\n parser.add_argument(\"--sample_num\", default=5,\n help=\"the total generation number\",\n type=int)\n\n parser.add_argument(\"--input_domain\", default='B',\n choices=['A', 'B'],\n help=\"the input image domain\",\n type=str)\n\n parser.add_argument(\"--sty_domain\", default='A',\n choices=['A', 'B'],\n help=\"the generated image domain\",\n type=str)\n\n parser.add_argument(\"--pure_completion\", default=True,\n help=\"set True to only complete the input domain image without translation\",\n type=str)\n\n parser.add_argument(\"--partial_input\", default='top-left',\n choices=['top-left', 'left-half', 'top-half'],\n help=\"top-left: given 1/4 of top-left corner image\",\n type=str)\n\n parser.add_argument(\"--ne\", default=512,\n help=\"the number of embedding\",\n type=int)\n\n parser.add_argument(\"--ed\", default=512,\n help=\"embedding dimension\",\n type=int)\n\n parser.add_argument(\"--z_channel\",default=256,\n help=\"z channel\",\n type=int)\n\n\n args = parser.parse_args()\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device\n device = torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\n print('device: ', device)\n\n # load first stage + second stage model\n transformer_config = OmegaConf.load('transformer.yaml')\n transformer_config.model.params.f_path = args.first_stage_model\n transformer_config.model.params.first_stage_model_config.params.embed_dim = args.ed\n transformer_config.model.params.transformer_config.params.vocab_size = args.ne\n transformer_config.model.params.transformer_config.params.n_embd = args.ne\n transformer_config.model.params.cond_stage_config.params.n_embed = args.ne\n transformer_config.model.params.first_stage_model_config.params.n_embed = args.ne\n transformer_config.model.params.first_stage_model_config.params.ddconfig.z_channels = args.z_channel\n transformer_config.model.params.device = str(device)\n model = instantiate_from_config(transformer_config.model)\n if(os.path.isfile(args.transformer_model)):\n print('load ' + args.transformer_model)\n ck = torch.load( args.transformer_model, map_location=device)\n model.load_state_dict(ck['model_state_dict'], strict=False)\n model = model.to(device)\n model.eval()\n print('Finish Loading!')\n \n os.makedirs(args.save_name, exist_ok=True)\n coord_idx = get_coord_idx(model, device)\n content_set = dataset_single_enc_sty(args.root_dir, 'test', args.input_domain, model.first_stage_model, device)\n style_set = dataset_single_enc_sty(args.root_dir, 'test', args.sty_domain, model.first_stage_model, device)\n if args.partial_input == 'top-left':\n div_w, div_h = 2, 2\n elif args.partial_input == 'left-half':\n div_w, div_h = 2, 1\n else: # top-half\n div_w, div_h = 1, 2\n\n\n ## load test images\n for i in range(len(content_set)):\n if i == args.sample_num:\n break\n\n print(i)\n img = content_set[i]\n\n _, z_indices, _ = model.encode_to_z(img['image'], img['label']) # [1, 256]\n \n # new_idx contains z_indices + random indices\n new_idx = get_rand_input(device, z_indices, h=16, w=16, div_w=div_w, div_h=div_h, codebook_size=args.ne)\n\n content_idx = sample_gen(new_idx, coord_idx, model, original_h=16//div_h, original_w=16//div_w,\n z_code_shape=(1, args.ne, 16, 16))\n \n if args.pure_completion:\n test_samples = model.decode_to_img(content_idx, \n (1, args.ne, content_idx.shape[1], content_idx.shape[2]),\n img['style'], img['label'])\n else:\n style_img = style_set[random.randint(0, len(style_set)-1)]\n test_samples = model.decode_to_img(content_idx, \n (1, args.ne, content_idx.shape[1], content_idx.shape[2]),\n style_img['style'], style_img['label'])\n\n save_tensor(test_samples, args.save_name, 'inpaint_{}_{}'.format(args.partial_input, img['img_name']))\n save_tensor(img['image'], args.save_name, 'input_{}'.format( img['img_name']))","repo_name":"cyj407/VQ-I2I","sub_path":"save_completion.py","file_name":"save_completion.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"57"} +{"seq_id":"71820647857","text":"from brownie import AdvancedCollectible\nfrom scripts.helpful_scripts import fund_with_link, get_account\nfrom web3 import Web3\n\n\ndef main():\n account = get_account()\n advanced_collectible = AdvancedCollectible[-1]\n fund_with_link(advanced_collectible.address, amount=Web3.toWei(0.1, \"ether\"))\n creation_txn = advanced_collectible.createCollectible({\"from\": account})\n creation_txn.wait(1)\n print(\"Collectible created!\")\n","repo_name":"spo0ds/Journey-to-become-a-Blockchain-Engineer","sub_path":"Day19/Code/Advanced NFT/scripts/create_collectible.py","file_name":"create_collectible.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":748,"dataset":"github-code","pt":"57"} +{"seq_id":"41172503959","text":"nota1 = float(input())\nnota2 = float(input())\nnota3 = float(input())\n\naprovado = \"aprovado\"\nreprovado = \"reprovado\"\nfinal = \"prova final\"\n\nmedia = (nota1 + nota2 + nota3) / 3\n\nif media >= 7:\n print(aprovado)\nelif media >= 3 and media < 7:\n print(final)\nelse:\n print(reprovado)","repo_name":"AlmirOliveira77/Python","sub_path":"Exercicios_TheHuxley - IFPB/Nível 1/Aprovado.py","file_name":"Aprovado.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"2946065280","text":"import json\nimport requests\nfrom track import Track\n\nclass SpotifyClient:\n\n def __init__(self, authorization_token):\n\n self.authorization_token = authorization_token\n\n def get_playlists_tracks(self, playlist_id, offset):\n\n url = f'https://api.spotify.com/v1/playlists/{playlist_id}/tracks?offset={offset}'\n response = self._place_get_api_request(url)\n response_json = response.json()\n\n tracks = [Track(track[\"track\"][\"name\"], track[\"track\"][\"id\"], track[\"track\"][\"artists\"][0][\"name\"]) for track in\n response_json[\"items\"]]\n\n return tracks\n\n def _place_get_api_request(self, url):\n response = requests.get(\n url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {self.authorization_token}\",\n }\n )\n return response\n","repo_name":"Vgal189/getYourSong","sub_path":"pythonProject/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1636301521","text":"#!/usr/bin/env python \n# -*- coding:utf-8 _*-\n\"\"\" \n@author: wangye(Wayne) \n@license: Apache Licence \n@file: Describe the Painting.py \n@time: 2021/07/26\n@contact: wang121ye@hotmail.com\n@site: \n@software: PyCharm \n\n# code is far away from bugs.\n\"\"\"\n\nfrom typing import *\nimport collections\n\n\nclass Solution:\n def splitPainting(self, segments: List[List[int]]) -> List[List[int]]:\n res = collections.defaultdict(int)\n for s, e, c in segments:\n res[s] += c\n res[e] -= c\n # print(res)\n ret = []\n pre, c = None, 0\n for r in sorted(res):\n # print(r)\n if pre and c != 0:\n ret.append([pre, r, c])\n c += res[r]\n pre = r\n return ret\n\n\nso = Solution()\nprint(so.splitPainting(segments=[[1, 4, 5], [4, 7, 7], [1, 7, 9]]))\n","repo_name":"wangyendt/LeetCode","sub_path":"Biweekly Contests/51-100/biweek 57/1943. Describe the Painting/Describe the Painting.py","file_name":"Describe the Painting.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"12718907781","text":"a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nb = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n\na = set(a)\nb = set(b)\n\nresult = []\n\nfor elem in b:\n if elem in a:\n result.append(elem)\n\nprint('Overlaped numbe rs: ' + str(result))","repo_name":"piotrszacilowski/practice-python","sub_path":"05-list-overlap/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17463373821","text":"import glob, os, json\n\nreports_dir = 'C:/Users/Kuan/Desktop/malgan_exp*'\ndirs = glob.glob(reports_dir)\nfor d in dirs:\n loadpath = os.path.join(d, 'reports', 'report.json')\n if os.path.exists(loadpath):\n with open(loadpath, 'r') as f:\n data = json.load(f)\n name = data['target']['file']['name']\n print(name)\n if 'behavior' in data:\n if 'apistats' in data['behavior']:\n data = data['behavior']['apistats']\n apistats = {}\n for apistat in data.values():\n apistats = dict(apistats, **apistat)\n savejson = {'name': name, 'apistats': apistats, 'class': 'benign'}\n savepath = os.path.join('../apistats', name[:-3]+'json')\n with open(savepath, 'w') as s:\n json.dump(savejson, s, ensure_ascii=False)","repo_name":"yanminglai/Malware-GAN","sub_path":"extract_apistats.py","file_name":"extract_apistats.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"57"} +{"seq_id":"5012630752","text":"import csv\r\n\r\nPROLOG = '''\r\n\r\n \r\n \r\n \r\n \r\n \r\n Word Book\r\n \r\n \r\n \t

英単リスト

\r\n\r\n \r\n'''\r\nHEADER = ''' \r\n \r\n \r\n \r\n'''\r\nEPILOG = ''' \r\n
%s%s%s
\r\n\r\n \r\n'''\r\nRECORD = ' %s%s%s\\n'\r\n\r\ndef read_csv(filename):\r\n with open(filename, encoding='shift_jis') as f:\r\n reader = csv.reader(f)\r\n data = []\r\n for i, row in enumerate(reader):\r\n if i == 0:\r\n header = row[:3]\r\n else:\r\n data.append(row)\r\n return header, data\r\n\r\ndef write_table(header, data, html):\r\n with open(html, 'w', encoding='utf-8') as f:\r\n f.write(PROLOG)\r\n f.write(HEADER % (header[0], header[1], header[2]))\r\n for record in data:\r\n f.write(RECORD % (record[0], record[1], record[2]))\r\n f.write(EPILOG)\r\n\r\n\r\nif __name__ == '__main__':\r\n header, data = read_csv('output.csv')\r\n write_table(header, data, 'word.html')\r\n","repo_name":"Imai-k-0117/make_eitanlist","sub_path":"make_html.py","file_name":"make_html.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1635677441","text":"#!/usr/bin/env python \n# -*- coding:utf-8 _*-\n\"\"\" \n@author: wangye(Wayne) \n@license: Apache Licence \n@file: Minimum Jumps to Reach Home.py \n@time: 2020/11/15\n@contact: wang121ye@hotmail.com\n@site: \n@software: PyCharm \n\n# code is far away from bugs.\n\"\"\"\n\nfrom typing import *\n\nimport collections\n\n\nclass Solution:\n def minimumJumps(self, forbidden: List[int], a: int, b: int, x: int) -> int:\n max_val = max([x] + forbidden) + a + b\n\n jumps = [0] + [float('inf')] * max_val\n for pos in forbidden: jumps[pos] = -1\n queue = collections.deque([0])\n\n while queue:\n pos = queue.popleft()\n if pos + a <= max_val and jumps[pos + a] > jumps[pos] + 1:\n queue.append(pos + a)\n jumps[pos + a] = jumps[pos] + 1\n if pos - b > 0 and jumps[pos - b] > jumps[pos] + 1:\n jumps[pos - b] = jumps[pos] + 1\n if pos - b + a <= max_val and jumps[pos - b + a] > jumps[pos] + 2:\n queue.append(pos - b + a)\n jumps[pos - b + a] = jumps[pos] + 2\n\n return jumps[x] if jumps[x] < float('inf') else -1\n","repo_name":"wangyendt/LeetCode","sub_path":"Biweekly Contests/0-50/biweek 39/1654. Minimum Jumps to Reach Home/Minimum Jumps to Reach Home.py","file_name":"Minimum Jumps to Reach Home.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"11428436903","text":"\"\"\"\nProject: Zed camera management\nAuthor: Juan Carlos Miranda\nDate: August 2021\nDescription:\nThis is an wrapper to use Zed functions as a thread process\nThis class is used when we need to launch process from loops as\na remote client. Methods are similar to zed_manager.py\n\nCode converted to OOP and adapted from original functions made by https://github.com/Jordi-Gene-Mola\n\nUse:\n\"\"\"\nimport threading\nimport logging\nimport os\nimport pyzed.sl as sl\nimport cv2 as cv2\nfrom datetime import datetime\n\n\nclass JobThreadZed(threading.Thread):\n # ZED camera device\n _zed_device = None\n zed_device_config = None\n zed_runtime_config = None\n _f_path = None\n\n # todo: add depth functions\n\n def __init__(self, device_config_param=None, f_path_output_param=None):\n if device_config_param is None:\n logging.debug(\"Default config loaded\")\n # Load config by default\n self.zed_device_config = sl.InitParameters() # default config\n else:\n logging.debug('External config loaded')\n self.zed_device_config = device_config_param\n\n self.zed_runtime_config = sl.RuntimeParameters() # todo check this in SDK help\n self._f_path = f_path_output_param\n logging.debug(\"__init__(self): - Initialize loading Zed Manager\")\n\n threading.Thread.__init__(self)\n self.shutdown_flag = threading.Event()\n\n def __del__(self):\n print('__DEL__')\n logging.debug(\"__del__(self): - Finalize Zed Manager\")\n\n def initialize_sensor(self):\n logging.debug(\"Initialize_sensor()\")\n self._zed_device = sl.Camera()\n if not self._zed_device.is_opened():\n print(\"Opening ZED Camera...\")\n status = self._zed_device.open(self.zed_device_config)\n if status != sl.ERROR_CODE.SUCCESS:\n print(repr(status))\n exit(1)\n # todo: raise and exception to manage this\n\n def finalize_sensor(self):\n logging.debug(\"finalize_sensor()\")\n self._zed_device.close()\n\n def get_svo_file_name(self):\n print(\"Recording ZED data\")\n resolutionp = \"_\"\n if self.zed_device_config.camera_resolution == sl.RESOLUTION.HD720:\n resolutionp = \"720\"\n if self.zed_device_config.camera_resolution == sl.RESOLUTION.HD1080:\n resolutionp = \"1080\"\n if self.zed_device_config.camera_resolution == sl.RESOLUTION.HD2K:\n resolutionp = \"HD2K\"\n\n utc_time_now = datetime.utcnow()\n date_string = utc_time_now.strftime(\"%Y%m%d_%H%M%S\")\n f_extension = \".svo\"\n f_name = date_string + \"_\" + resolutionp + f_extension\n f_path_name = os.path.join(self._f_path, f_name)\n logging.info(f\"CREATING_FILE {f_path_name}\")\n\n return f_path_name\n\n def run(self):\n # todo: check this function\n # todo: based on S01-camera_control.py\n self.initialize_sensor()\n print(\"Recording data\")\n f_path_name = self.get_svo_file_name()\n print(\"Recording... Creating file.\")\n runtime = sl.RuntimeParameters()\n #image_mat = sl.Mat()\n record_param = sl.RecordingParameters(f_path_name)\n vid = self._zed_device.enable_recording(record_param)\n\n # #####################\n # # RECORD loop\n # #####################\n vid = sl.ERROR_CODE.FAILURE\n out = False\n while vid != sl.ERROR_CODE.SUCCESS and not out:\n vid = self._zed_device.enable_recording(record_param)\n print(repr(vid))\n if vid == sl.ERROR_CODE.SUCCESS:\n print(\"Recording started...\")\n frames_recorded = 0\n out = True\n try:\n print('RECORDING-STARTED #%s' % self.ident)\n logging.info(f'RECORDING-STARTED #{self.ident}')\n start_video = datetime.utcnow()\n while not self.shutdown_flag.is_set():\n err = self._zed_device.grab(runtime)\n if err == sl.ERROR_CODE.SUCCESS:\n frames_recorded += 1\n #logging.info(f'frames_recorded -- {frames_recorded}')\n stop_video = datetime.utcnow()\n time_recorded = stop_video - start_video\n logging.info(f\"RECORDING- {time_recorded} recorded time, {frames_recorded} frames written\")\n\n except KeyboardInterrupt:\n print(\"(Ctrl-C) pressed!!\")\n print(f\"{frames_recorded} frames written.\")\n logging.info(f\"{frames_recorded} frames written.\")\n else:\n print(\"There is a problem with the SVO file!\")\n print(\"Recording not started.!\")\n self._zed_device.disable_recording()\n # ... Clean shutdown code here ...\n print(f'RECORDING-STOPPED #{self.ident}')\n logging.info(f'RECORDING-STOPPED #{self.ident}')\n # close sensor sensor\n self._zed_device.disable_recording()\n self.finalize_sensor()\n\n def run_real_time(self):\n # todo: review this method\n print('RECORDING-STARTED #%s' % self.ident)\n self.initialize_sensor()\n while not self.shutdown_flag.is_set():\n image_mat = sl.Mat()\n err_flag = self._zed_device.grab(self.zed_device_config)\n if err_flag == sl.ERROR_CODE.SUCCESS:\n self._zed_device.retrieve_image(image_mat, sl.VIEW.LEFT)\n cv2.imshow(\"Real time data\", image_mat.get_data())\n else:\n print(\"err_flag->\", err_flag)\n break\n key = cv2.waitKey(10)\n if key == ord('q'):\n cv2.destroyAllWindows()\n image_mat.free(memory_type=sl.MEM.CPU)\n break\n print(f'RECORDING-STOPPED #{self.ident}')\n logging.info(f'RECORDING-STOPPED #{self.ident}')\n self.finalize_sensor()\n\n# todo: def show_a_capture(self):\n# todo: def get_a_capture(self):\n","repo_name":"GRAP-UdL-AT/ak_acquisition_system","sub_path":"remote_client_zed/src/camera_classes/job_thread_zed.py","file_name":"job_thread_zed.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"72141422899","text":"# --------------------------------------------------------------------------\r\n# Function vovelHistogram\r\n# --------------------------------------------------------------------------\r\n# Read a file and get the character frequency \r\n# Histogram of characters\r\n# --------------------------------------------------------------------------\r\n# @param fname : File path/name\r\n# @param caseSensitive : True / False \r\n# Default=True \r\n# @param return : dictionary of vovel frequency, \r\n# else None if file not found\r\n# --------------------------------------------------------------------------\r\ndef vovelHistogram(fname, caseSensitive=True) :\r\n # Read file : Read Only mode\r\n try :\r\n fhand = open(fname, 'r')\r\n except :\r\n # Quit\r\n return None\r\n VOVELS = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\r\n # Initialize a char dictionary\r\n chardict = dict()\r\n # Initalize a list for storing words\r\n wordlist = list()\r\n # Read file & Loop over lines\r\n for line in fhand :\r\n # Strip whitespaces\r\n if caseSensitive is False :\r\n no_ws_line = line.upper().strip()\r\n else :\r\n no_ws_line = line.strip()\r\n # Split into words\r\n wordlist = no_ws_line.split()\r\n # For each word, get characters, create dictionary\r\n for word in wordlist :\r\n for ch in word :\r\n # Check if character is alnum or not\r\n if ch in VOVELS:\r\n \"\"\"\r\n # Use if-else to check 'key' existence\r\n if chr not in chardict :\r\n chardict[ch] = 1\r\n else :\r\n chardict[ch] = chardict[ch] + 1\r\n \"\"\"\r\n # OR use default value of key using get() method as 0 if not present\r\n chardict[ch] = chardict.get(ch, 0) + 1\r\n else:\r\n # Don't add to dictionary\r\n continue\r\n # Return dictionary\r\n return chardict\r\n # End of Function","repo_name":"cuongdv1/Practice-Python","sub_path":"Python3/text_processing/vovel_histogram.py","file_name":"vovel_histogram.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26777396537","text":"import pytest\nimport numpy as np\nfrom easydict import EasyDict\nfrom dizoo.gym_anytrading.envs import StocksEnv\n\n\n@pytest.mark.envtest\nclass TestStocksEnv:\n\n def test_naive(self):\n env = StocksEnv(EasyDict({\"env_id\": 'stocks-v0', \"eps_length\": 300,\\\n \"window_size\": 20, \"train_range\": None, \"test_range\": None, \"stocks_data_filename\": 'STOCKS_GOOGL'}))\n env.seed(314, dynamic_seed=False)\n assert env._seed == 314\n obs = env.reset()\n assert obs.shape == (62, )\n for _ in range(5):\n env.reset()\n np.random.seed(314)\n print('=' * 60)\n for i in range(10):\n # Both ``env.random_action()``, and utilizing ``np.random`` as well as action space,\n # can generate legal random action.\n if i < 5:\n random_action = np.array([env.action_space.sample()])\n else:\n random_action = env.random_action()\n timestep = env.step(random_action)\n print(timestep)\n assert isinstance(timestep.obs, np.ndarray)\n assert isinstance(timestep.done, bool)\n assert timestep.obs.shape == (62, )\n assert timestep.reward.shape == (1, )\n assert timestep.reward >= env.reward_space.low\n assert timestep.reward <= env.reward_space.high\n print(env.observation_space, env.action_space, env.reward_space)\n env.close()\n","repo_name":"opendilab/DI-engine","sub_path":"dizoo/gym_anytrading/envs/test_stocks_env.py","file_name":"test_stocks_env.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":2963,"dataset":"github-code","pt":"57"} +{"seq_id":"11866637767","text":"#! ./venv/bin python\nimport requests\nfrom bs4 import BeautifulSoup\nimport praw\nimport os\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.header import Header\nimport re\n\n\nreddit_links = []\nhackernews_links = []\nlobster_links = []\nmanga_links = [] \n\ndef soupify(link):\n r = requests.get(link)\n return BeautifulSoup(r.content, \"html.parser\")\n\n# Code for Reddit\nreddit = praw.Reddit(client_id=os.getenv('CLIENT_ID'),\n client_secret=os.getenv('CLIENT_SECRET'),\n user_agent=os.getenv('USER_AGENT'),\n username=os.getenv('REDDIT_USERNAME'),\n password=os.getenv('REDDIT_PASSWORD'))\nhot_news_posts = reddit.subreddit('news').hot(limit=15)\nfor post in hot_news_posts:\n reddit_links.append((post.title, post.url))\n\n# Manga from reddit\nmanga = [\"One Piece\",\n \"Black Clover\",\n \"Berserk\",\n \"Kingdom\",\n \"My Hero Academia\",\n \"Boku no Hero Academia\",\n \"Fire Force\", \n \"Attack on Titan\",\n \"Solo Leveling\"]\n\nmangas = ''\n\n# Form regex\nfor i in range(len(manga)):\n mangas += \"(\" + manga[i] + \")|\" \n\n\nmanga_regex = re.compile(mangas[:-1], re.IGNORECASE)\n\nmanga_posts = reddit.subreddit('manga').hot(limit=100)\n\nfor post in manga_posts:\n check = manga_regex.search(post.title)\n if(check != None):\n manga_links.append((post.title, post.url))\n\n# Code for Hacker News\nhacker_news = soupify(\"https://news.ycombinator.com/\")\nhack_cells = hacker_news.find_all(\"tr\", class_=\"athing\")\n\n# Code for lobste.rs\nlobsters = soupify(\"https://lobste.rs/\")\nlob_cells = lobsters.find_all(\"li\", class_=\"story\")\n\nfor i in range(15):\n hack_link = hack_cells[i].find('a', class_=\"storylink\") # title with the link to the article\n hackernews_links.append((hack_link.text,hack_link.get('href')))\n \n lob_link = lob_cells[i].find('a', class_=\"u-url\")\n lobster_links.append((lob_link.text, lob_link.get('href')))\n\nbody = \"Reddit\\n\\n\"\nfor cell in reddit_links:\n body += \"Title: {}\\nLink: {}\\n\\n\".format(cell[0],cell[1])\nbody += \"\\nHackernews\\n\\n\"\nfor cell in hackernews_links:\n body += \"Title: {}\\nLink: {}\\n\\n\".format(cell[0],cell[1])\nbody += \"\\nLobste.rs\\n\\n\"\nfor cell in lobster_links:\n body += \"Title: {}\\nLink: {}\\n\\n\".format(cell[0],cell[1])\nbody += \"\\nManga\\n\\n\"\nfor cell in manga_links:\n body += \"Title: {}\\nLink: {}\\n\\n\".format(cell[0],cell[1])\n\n\n\n\n#############################################################\n#### Email ####\n#############################################################\nsmtp = smtplib.SMTP(\"smtp.gmail.com\", 587)\n\n# Required to setup the server\nsmtp.ehlo()\nsmtp.starttls()\nuser = os.getenv(\"EMAIL_USERNAME\")\n# For Gmail this is probably an app password\npswd = os.getenv(\"EMAIL_PASSWORD\")\n\n# Authenticates the user\nsmtp.login(user, pswd)\n\ntarget = os.getenv(\"TARGET\") \n\nsource = user \n\nmsg = MIMEText(body, _charset=\"UTF-8\")\nmsg.set_charset('utf8')\nmsg['FROM'] = source\nmsg['To'] = target\nmsg['Subject'] = Header(\"Personal RSS\",'UTF-8').encode()\n\n# _attach = MIMEText(body.encode('utf-8'), 'html', 'UTF-8')\n# msg.attach(_attach)\n\n# For some reason there could not be a space between the \\n and the next letter\n# Could probably be better by just using triple quote messages\nsmtp.sendmail(source, target, msg.as_string())\n\n# Closes the server\nsmtp.quit()\n","repo_name":"VVoruganti/personal-rss","sub_path":"rss.py","file_name":"rss.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"34458142841","text":"# Project: Image Encrypt and Decrypt\n# Contributers: Abraham Medina, Ethan Ward, Bret Stine\n# Class: CST 205-02 Spring 2017\n# Date: March 16, 2017\n# Abstract: This program allows the user to upload an image to a webpage and allows them to encrypt of decrypt the image.\n# Contribution: Ethan and Bret both coded lines 47-79 of proj2Team230.py and Abraham worked on the rest of proj2Team230.py and the other files.\n# Github: https://github.com/abrahamleyva/CST_205_ImageEncryption\n\n# Importing libraries\nimport flask\nfrom flask import *\nimport os\nfrom PIL import Image\nimport math\nimport time\n\napp = flask.Flask(__name__) # Define flask app\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\n# Defining routes so programs knows what html pages to display\n@app.route('/')\ndef index():\n return flask.render_template(\"upload.html\")\n@app.route('/decision')\ndef decision():\n return flask.render_template(\"decision.html\")\n@app.route('/encryptDecrypt')\ndef encryptDecrypt():\n return flask.render_template(\"encryptDecrypt.html\")\n \n# When upload.html redirects to decision.html this method exicutes first\n@app.route(\"/decision\", methods = ['POST'])\ndef upload():\n target = os.path.join(APP_ROOT, 'static/')\n print(target)\n \n # If static folder is not preset it is created\n if not os.path.isdir(target):\n os.mkdir(target)\n \n # Obtains a list of images that will be uploaded and saves them\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, 'image.png'])\n print(destination)\n file.save(destination)\n # Redirects to decision.html\n return render_template(\"decision.html\")\n\n# When decision.html redirects to encryptDecrypt.html this method exicutes first \n@app.route(\"/encryptDecrypt\", methods = ['POST'])\ndef runEncrypt():\n start_timer = time.time()\n \n img = Image.open(\"static/image.png\")\n \n arr = img.load() #pixel data stored in this 2D array\n \n def rot(A, n, x1, y1): #this is the function which rotates a given block\n temple = []\n for i in range(n):\n temple.append([])\n for j in range(n):\n temple[i].append(arr[x1+i, y1+j])\n for i in range(n):\n for j in range(n):\n arr[x1+i,y1+j] = temple[n-1-i][n-1-j]\n \n \n xres, yres = img.size\n BLKSZ = 50 #blocksize\n \n for i in range(2, BLKSZ+1):\n for j in range(int(math.floor(float(xres)/float(i)))):\n for k in range(int(math.floor(float(yres)/float(i)))):\n rot(arr, i, j*i, k*i)\n for i in range(3, BLKSZ+1):\n for j in range(int(math.floor(float(xres)/float(BLKSZ+2-i)))):\n for k in range(int(math.floor(float(yres)/float(BLKSZ+2-i)))):\n rot(arr, BLKSZ+2-i, j*(BLKSZ+2-i), k*(BLKSZ+2-i))\n \n img.save(\"static/finalImage.png\")\n \n return render_template(\"encryptDecrypt.html\")\n\n# This actually runs the flask app on port 8080\nif __name__ == \"__main__\": \n app.run(\n port = int(os.getenv('PORT', 8080)),\n host = os.getenv('IP', '0.0.0.0')\n )\n\napp.run(\n port = int(os.getenv('PORT', 8080)),\n host = os.getenv('IP', '0.0.0.0')\n)","repo_name":"abrahamleyva/CST_205_ImageEncryption","sub_path":"proj2Team230.py","file_name":"proj2Team230.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26775572717","text":"import pytest\n\nfrom ding.utils.loader import mapping, MappingError, mpfilter, mpkeys, mpvalues, mpitems, item, item_or, is_type, \\\n optional\n\n\n@pytest.mark.unittest\nclass TestConfigLoaderMapping:\n\n def test_mapping(self):\n _loader = mapping(str, optional(is_type(int) | float))\n assert _loader({'sdfjk': 1}) == {'sdfjk': 1}\n assert _loader({'a': 1, 'b': 2.4, 'c': None}) == {'a': 1, 'b': 2.4, 'c': None}\n with pytest.raises(MappingError) as ei:\n _loader({'a': 1, 345: 'sdjfhk', 'b': [], None: 389450})\n err = ei.value\n assert len(err.key_errors()) == 2\n assert len(err.value_errors()) == 2\n assert len(err.errors()) == 4\n assert {key for key, _ in err.key_errors()} == {345, None}\n assert {key for key, _ in err.value_errors()} == {345, 'b'}\n\n with pytest.raises(TypeError):\n _loader(1)\n with pytest.raises(TypeError):\n _loader([])\n\n def test_mpfilter(self):\n _loader = mpfilter(lambda k, v: k in {'a', 'b', 'sum'})\n assert _loader({'a': 1, 'b': 2, 'sum': 3, 'sdk': 4}) == {'a': 1, 'b': 2, 'sum': 3}\n\n def test_mpkeys(self):\n _loader = mpkeys()\n assert _loader({'a': 1, 'b': 2, 'sum': 3, 'sdk': 4}) == {'a', 'b', 'sum', 'sdk'}\n\n def test_mpvalues(self):\n _loader = mpvalues()\n assert _loader({'a': 1, 'b': 2, 'sum': 3, 'sdk': 4}) == {1, 2, 3, 4}\n\n def test_mpitems(self):\n _loader = mpitems()\n assert _loader({'a': 1, 'b': 2, 'sum': 3, 'sdk': 4}) == {('a', 1), ('b', 2), ('sum', 3), ('sdk', 4)}\n\n def test_item(self):\n _loader = item('a') | item('b')\n assert _loader({'a': 1}) == 1\n assert _loader({'b': 2}) == 2\n assert _loader({'a': 3, 'b': -2}) == 3\n\n def test_item_or(self):\n _loader = item_or('a', 0)\n assert _loader({'a': 1}) == 1\n assert _loader({'b': 2}) == 0\n","repo_name":"opendilab/DI-engine","sub_path":"ding/utils/loader/tests/loader/test_mapping.py","file_name":"test_mapping.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":2963,"dataset":"github-code","pt":"57"} +{"seq_id":"28241133271","text":"\"\"\"\nНапишите функцию которая:\na)\tпринимает список с произвольными значениями\nb)\tдобавляет к нему произвольное значение\nc)\tвозвращает результирующий список\n\"\"\"\n\n\ndef add_value_to_list(list):\n\n import random\n x = random.randint(0, 100)\n list.append(x)\n\n return list1\n\n\nlist1 = [1, 2, 3, 'abc']\nprint(add_value_to_list(list1))\n","repo_name":"UsovDmitrii/Python_course_2023","sub_path":"hw_task_1/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74623498098","text":"import json\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, Generator, List, Optional\n\nimport typer\nfrom click import BadArgumentUsage\n\nfrom ..config import cfg\nfrom ..role import SystemRole\nfrom .handler import Handler\n\nCHAT_CACHE_LENGTH = int(cfg.get(\"CHAT_CACHE_LENGTH\"))\nCHAT_CACHE_PATH = Path(cfg.get(\"CHAT_CACHE_PATH\"))\n\n\nclass ChatSession:\n \"\"\"\n This class is used as a decorator for OpenAI chat API requests.\n The ChatSession class caches chat messages and keeps track of the\n conversation history. It is designed to store cached messages\n in a specified directory and in JSON format.\n \"\"\"\n\n def __init__(self, length: int, storage_path: Path):\n \"\"\"\n Initialize the ChatSession decorator.\n\n :param length: Integer, maximum number of cached messages to keep.\n \"\"\"\n self.length = length\n self.storage_path = storage_path\n self.storage_path.mkdir(parents=True, exist_ok=True)\n\n def __call__(self, func: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"\n The Cache decorator.\n\n :param func: The chat function to cache.\n :return: Wrapped function with chat caching.\n \"\"\"\n\n def wrapper(*args: Any, **kwargs: Any) -> Generator[str, None, None]:\n chat_id = kwargs.pop(\"chat_id\", None)\n messages = kwargs[\"messages\"]\n if not chat_id:\n yield from func(*args, **kwargs)\n return\n old_messages = self._read(chat_id)\n for message in messages:\n old_messages.append(message)\n kwargs[\"messages\"] = old_messages\n response_text = \"\"\n for word in func(*args, **kwargs):\n response_text += word\n yield word\n old_messages.append({\"role\": \"assistant\", \"content\": response_text})\n self._write(kwargs[\"messages\"], chat_id)\n\n return wrapper\n\n def _read(self, chat_id: str) -> List[Dict[str, str]]:\n file_path = self.storage_path / chat_id\n if not file_path.exists():\n return []\n parsed_cache = json.loads(file_path.read_text())\n return parsed_cache if isinstance(parsed_cache, list) else []\n\n def _write(self, messages: List[Dict[str, str]], chat_id: str) -> None:\n file_path = self.storage_path / chat_id\n json.dump(messages[-self.length :], file_path.open(\"w\"))\n\n def invalidate(self, chat_id: str) -> None:\n file_path = self.storage_path / chat_id\n file_path.unlink(missing_ok=True)\n\n def get_messages(self, chat_id: str) -> List[str]:\n messages = self._read(chat_id)\n return [f\"{message['role']}: {message['content']}\" for message in messages]\n\n def exists(self, chat_id: Optional[str]) -> bool:\n return bool(chat_id and bool(self._read(chat_id)))\n\n def list(self) -> List[Path]:\n # Get all files in the folder.\n files = self.storage_path.glob(\"*\")\n # Sort files by last modification time in ascending order.\n return sorted(files, key=lambda f: f.stat().st_mtime)\n\n\nclass ChatHandler(Handler):\n chat_session = ChatSession(CHAT_CACHE_LENGTH, CHAT_CACHE_PATH)\n\n def __init__(self, chat_id: str, role: SystemRole) -> None:\n super().__init__(role)\n self.chat_id = chat_id\n self.role = role\n\n if chat_id == \"temp\":\n # If the chat id is \"temp\", we don't want to save the chat session.\n self.chat_session.invalidate(chat_id)\n\n self.validate()\n\n @classmethod\n def list_ids(cls, value: str) -> None:\n if not value:\n return\n # Prints all existing chat IDs to the console.\n for chat_id in cls.chat_session.list():\n typer.echo(chat_id)\n raise typer.Exit()\n\n @property\n def initiated(self) -> bool:\n return self.chat_session.exists(self.chat_id)\n\n @property\n def initial_message(self) -> str:\n chat_history = self.chat_session.get_messages(self.chat_id)\n index = 1 if cfg.get(\"SYSTEM_ROLES\") == \"true\" else 0\n return chat_history[index] if chat_history else \"\"\n\n @property\n def is_same_role(self) -> bool:\n # TODO: Should be optimized for REPL mode.\n return self.role.same_role(self.initial_message)\n\n @classmethod\n def show_messages_callback(cls, chat_id: str) -> None:\n if not chat_id:\n return\n cls.show_messages(chat_id)\n raise typer.Exit()\n\n @classmethod\n def show_messages(cls, chat_id: str) -> None:\n # Prints all messages from a specified chat ID to the console.\n for index, message in enumerate(cls.chat_session.get_messages(chat_id)):\n # Remove output type from the message, e.g. \"text\\nCommand:\" -> \"text\"\n if message.startswith(\"user:\"):\n message = \"\\n\".join(message.splitlines()[:-1])\n color = \"magenta\" if index % 2 == 0 else \"green\"\n typer.secho(message, fg=color)\n\n def validate(self) -> None:\n if self.initiated:\n # print(\"initial message:\", self.initial_message)\n chat_role_name = self.role.get_role_name(self.initial_message)\n if not chat_role_name:\n raise BadArgumentUsage(\n f'Could not determine chat role of \"{self.chat_id}\"'\n )\n if self.role.name == \"default\":\n # If user didn't pass chat mode, we will use the one that was used to initiate the chat.\n self.role = SystemRole.get(chat_role_name)\n else:\n if not self.is_same_role:\n raise BadArgumentUsage(\n f'Cant change chat role to \"{self.role.name}\" '\n f'since it was initiated as \"{chat_role_name}\" chat.'\n )\n\n def make_prompt(self, prompt: str) -> str:\n prompt = prompt.strip()\n return self.role.make_prompt(prompt, not self.initiated)\n\n def make_messages(self, prompt: str) -> List[Dict[str, str]]:\n messages = []\n if not self.initiated and cfg.get(\"SYSTEM_ROLES\") == \"true\":\n messages.append({\"role\": \"system\", \"content\": self.role.role})\n messages.append({\"role\": \"user\", \"content\": prompt})\n return messages\n\n @chat_session\n def get_completion(\n self,\n **kwargs: Any,\n ) -> Generator[str, None, None]:\n yield from super().get_completion(**kwargs)\n","repo_name":"TheR1D/shell_gpt","sub_path":"sgpt/handlers/chat_handler.py","file_name":"chat_handler.py","file_ext":"py","file_size_in_byte":6482,"program_lang":"python","lang":"en","doc_type":"code","stars":6479,"dataset":"github-code","pt":"57"} +{"seq_id":"17886849765","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout\n\n\nclass MyQWidget(QWidget):\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n topButton = QPushButton(\"Top\")\n bottomButton = QPushButton(\"Bottom\")\n\n layout = QVBoxLayout()\n layout.addWidget(topButton)\n layout.addWidget(bottomButton)\n self.setLayout(layout)\n\n #self.setGeometry(300, 300, 350, 200)\n #self.setWindowTitle(\"QVBoxLayout\")\n self.show()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n mqw = MyQWidget()\n sys.exit(app.exec_())\n\n","repo_name":"12ff806/learning","sub_path":"pyqt5/pyqt_examples_2021/03_QVBoxLayout_PyQt5.py","file_name":"03_QVBoxLayout_PyQt5.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1536190479","text":"# Desarrollar una función para reemplazar todas las apariciones de una palabra por\n# otra en una cadena de caracteres y devolver la cadena obtenida y un entero con la\n# cantidad de reemplazos realizados. Tener en cuenta que sólo deben reemplazarse\n# palabras completas, y no fragmentos de palabras. Escribir también un programa\n# para verificar el comportamiento de la función. \n\ndef reemplazar_palabra(cadena, palabra_original, palabra_nueva):\n # Dividimos la cadena en palabras\n palabras = cadena.split()\n \n # Contador de reemplazos\n reemplazos = 0\n \n # Recorremos las palabras y reemplazamos si es necesario\n for i, palabra in enumerate(palabras):\n if palabra == palabra_original:\n palabras[i] = palabra_nueva\n reemplazos += 1\n \n # Unimos las palabras en una cadena con un espacio entre cada una\n cadena_modificada = ' '.join(palabras)\n \n return cadena_modificada, reemplazos\n\ndef main():\n cadena = input(\"Ingrese una cadena de caracteres: \")\n palabra_original = input(\"Ingrese la palabra a reemplazar: \")\n palabra_nueva = input(\"Ingrese la palabra nueva: \")\n \n resultado, reemplazos = reemplazar_palabra(cadena, palabra_original, palabra_nueva)\n print(resultado, reemplazos)\n \nmain()\n","repo_name":"torrresagus/UADE-Algoritmos-y-Estructura-de-Datos-I","sub_path":"Guia de Trabajos Practicos 2021/Trabajo Práctico 4 - Cadenas de caracteres/Ejercicio 10.py","file_name":"Ejercicio 10.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"515545239","text":"from time import sleep\nimport tm1637\nimport _thread\nfrom machine import Pin\nfrom neopixel import NeoPixel\nimport socket\nimport ujson, network\n\n# 定义RGB控制对象\npin = 15\nrgb_num = 1\nrgb_led = NeoPixel(Pin(pin, Pin.OUT), rgb_num)\n\n# 定义RGB颜色\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nYELLOW = (255, 155, 0)\nCOLORS = [RED, GREEN, YELLOW]\n\nsmg = tm1637.TM1637(clk=Pin(17), dio=Pin(16))\n\n\ndef show_single_color(color, duration):\n for i in range(rgb_num):\n rgb_led[i] = color\n rgb_led.write()\n\n for i in range(duration, 0, -1):\n smg.show(\"%04d\" % i)\n sleep(1)\n\n\ndef set_color(color):\n for i in range(rgb_num):\n rgb_led[i] = color\n rgb_led.write()\n\n\ndef traffic_light(seconds, led_color):\n if led_color == 'red':\n show_single_color(RED, seconds)\n elif led_color == 'green':\n show_single_color(GREEN, seconds)\n elif led_color == 'yellow':\n show_single_color(YELLOW, seconds)\n else:\n print('无效的颜色')\n\n\ndef receive_and_parse():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(('0.0.0.0', 8080))\n\n while True:\n print('等待连接...')\n # 接收数据\n data, addr = sock.recvfrom(1024)\n print('收到数据:', data)\n\n try:\n # 解析JSON数据\n parsed_data = ujson.loads(data.decode())\n\n # 获取解析后的数据\n seconds = parsed_data.get('seconds')\n led_color = parsed_data.get('led')\n\n # 控制红绿灯\n traffic_light(seconds, led_color)\n\n except ValueError:\n print('无效的JSON数据')\n\n except KeyError:\n print('缺少关键字')\n\n\n# 执行数据接收和控制函数\ndef do_connect():\n wlan = network.WLAN(network.STA_IF)\n wlan.active(True)\n if not wlan.isconnected():\n print('connecting to network...')\n # wlan.ifconfig(('192.168.1.9', '255.255.255.0', '192.168.1.2', '192.168.1.2'))\n wlan.connect('ChinaNet-PEs9', '13789569653')\n # wlan.connect('AP', '1234561017')\n i = 1\n led.value(1)\n while not wlan.isconnected():\n print(\"正在链接...{}\".format(i))\n led.value(1)\n i += 1\n time.sleep(1)\n led.value(0)\n print('network config:', wlan.ifconfig())\n\n\ndo_connect()\nreceive_and_parse()\n\n","repo_name":"3469134108/flaskProject","sub_path":"单片机/hld.py","file_name":"hld.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"40642191978","text":"# if statemenfs\n# list = [\"hey\",\"no\",\"run\", \"random\"]\n# for value in list:\n# \tif value == 'no':\n# \t\tprint(value.upper())\n# \telse:\n# \t\tprint(value.title())\n\t\t\n# comoaring things regardless of case\n# car = \"YOU\"\n# if car.lower()==\"you\":\n# \tprint(\"You is not you\")\n\n# cars = [\"hey\", \"something\", \"no\"]\n# if 'hey' in cars:\n# \tprint(\"flush yourself niglet\")\n# if 'me' not in cars:\n# \tprint(\"pepesadge\")\n\nage = 12\nif age < 4:\n\tvalue = 0\nelif age>= 4 and age < 18:\n\tvalue = 12\nelse:\n\tvalue = 15\nprint(f\"Your total uh... cost, I think, is {value}\")\n","repo_name":"mtsoyer/Python","sub_path":"TheBeginningPython/ifstatements.py","file_name":"ifstatements.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"23327721770","text":"#!/usr/bin/env python\nimport argparse\nfrom common import Example, Fact, Rule, Theory, TheoryAssertionRepresentationWithLabel\nimport json\n\nimport problog\nfrom problog.program import PrologString\nfrom problog.core import ProbLog\nfrom problog import get_evaluatable\nfrom problog.formula import LogicFormula, LogicDAG\nfrom problog.sdd_formula import SDD\nfrom problog.engine import NonGroundProbabilisticClause, UnknownClause\nfrom problog.engine_stack import NegativeCycle\n\nimport re\nimport time\n\nfrom utils import parse_statement\n\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\n\nruletaker_variable_nl_to_variable_format = {\"someone\": \"X\", \"something\": \"Y\"}\n\n\nclass Metrics:\n \"\"\"Class to store accuracy and timing related metrics when running an entire theories dataset\n through a theorem proving engine.\"\"\"\n\n def __init__(self):\n self.num_examples = 0\n self.num_true = 0\n self.num_false = 0\n self.num_correct_true = 0\n self.num_correct_false = 0\n self.num_correct = 0\n self.total_elapsed_millisecs = 0\n self.num_true_with_exception = 0\n self.num_false_with_exception = 0\n self.num_correct_true_with_exception = 0\n self.num_correct_false_with_exception = 0\n self.num_incorrect_true_no_exception = 0\n self.num_incorrect_false_no_exception = 0\n self.num_no_gold_label = 0\n self.exception_num_failures = dict()\n\n def update(self, gold_label, engine_label, engine_exception, elapsed_millisecs):\n \"\"\"Update metrics. To be called after processing each example from the dataset.\"\"\"\n self.num_examples += 1\n self.total_elapsed_millisecs += elapsed_millisecs\n if gold_label is None:\n self.num_no_gold_label += 1\n else:\n engine_label_correct = gold_label == engine_label\n if not engine_label_correct:\n exception_msg = \"No Exception\"\n if engine_exception is not None:\n exception_msg = engine_exception\n if engine_exception not in self.exception_num_failures:\n self.exception_num_failures[engine_exception] = 0\n self.exception_num_failures[engine_exception] += 1\n if gold_label:\n self.num_true += 1\n if engine_label:\n self.num_correct_true += 1\n if engine_exception is not None:\n self.num_true_with_exception += 1\n if engine_label:\n self.num_correct_true_with_exception += 1\n else:\n if not engine_label:\n self.num_incorrect_true_no_exception += 1\n else:\n self.num_false += 1\n if not engine_label:\n self.num_correct_false += 1\n if engine_exception is not None:\n self.num_false_with_exception += 1\n if not engine_label:\n self.num_correct_false_with_exception += 1\n else:\n if engine_label:\n self.num_incorrect_false_no_exception += 1\n self.num_correct = self.num_correct_true + self.num_correct_false\n\n def report(self):\n \"\"\"Report summarizing the overall accuracy, and breakdown by True and False (gold)\n labels. Also reports the number of examples that result in exceptions from the\n underlying engine, and timing information.\"\"\"\n if self.num_examples > 0:\n avg_elapsed_secs = (self.total_elapsed_millisecs / self.num_examples) / 1000\n print(f\"Total no. of examples: {self.num_examples}\")\n if self.num_no_gold_label > 0:\n print(f\"Found {self.num_no_gold_label} examples without a gold label\")\n else:\n total_no_of_exceptions = (\n self.num_true_with_exception + self.num_false_with_exception\n )\n print(f\" No. true: {self.num_true}\")\n print(f\" No. correct: {self.num_correct_true}\")\n print(f\" No. of exceptions: {self.num_true_with_exception}\")\n print(\n f\" No. correct with exceptions: {self.num_correct_true_with_exception}\"\n )\n print(\n f\" No. incorrect without exception: {self.num_incorrect_true_no_exception}\"\n )\n print(f\" No. false: {self.num_false}\")\n print(f\" No. correct: {self.num_correct_false}\")\n print(f\" No. of exceptions: {self.num_false_with_exception}\")\n print(\n f\" No. correct with exceptions: {self.num_correct_false_with_exception}\"\n )\n print(\n f\" No. incorrect without exception: {self.num_incorrect_false_no_exception}\"\n )\n print(f\"Total no. correct: {self.num_correct}\")\n print(f\"Total no. with exceptions: {total_no_of_exceptions}\")\n print(f\"Accuracy: {(self.num_correct * 100.0) / self.num_examples}\")\n if total_no_of_exceptions > 0:\n print(\"\\nFailure Breakdown by Exception:\")\n for exception in self.exception_num_failures:\n print(\n f\" {exception}: {self.exception_num_failures[exception]}\"\n )\n print(\n f\"\\nAverage theorem proving time per example: {avg_elapsed_secs} secs\\n\\n\"\n )\n\n\ndef format_argument(arg_as_str):\n \"\"\"Function that takes a string representing a predicate argument and formats it appropriately\n depending on whether it is a constatn or a variable.\n \"\"\"\n arg_as_str = arg_as_str.lower()\n if arg_as_str in ruletaker_variable_nl_to_variable_format.keys():\n # If it's in the mapping, it is a variable, so return an appropriately formatted variable.\n return ruletaker_variable_nl_to_variable_format[arg_as_str]\n # If it's not in the mapping, it is a constant, so return a lower-cased string.\n return arg_as_str\n\n\ndef parse_triple_representation(triple_rep):\n \"\"\"Function that takes string containing a triple representation in RuleTaker format and creates\n a Fact. E.g. input:\n (\\\"cow\\\" \\\"needs\\\" \\\"bear\\\" \\\"+\\\")\n \"\"\"\n fact = None\n triple_rep = triple_rep.strip()\n # Remove enclosing parens ()\n triple_txt = triple_rep[1:-1]\n\n # Extract the parts of the triple by looking for quotes.\n # Replace spaces in predicate/args with underscores to make them valid terms.\n triple_parts = []\n for m in re.finditer(r'\"([^\"]+)\"', triple_txt):\n triple_part = m.group(1).replace(\" \", \"_\")\n triple_parts.append(triple_part)\n\n if len(triple_parts) == 4:\n arg1 = format_argument(triple_parts[0])\n predicate = triple_parts[1]\n arg2 = format_argument(triple_parts[2])\n polarity = triple_parts[3]\n if predicate == \"is\":\n predicate = f\"{predicate}_{arg2}\"\n fact = Fact(polarity, predicate, [arg1])\n else:\n fact = Fact(polarity, predicate, [arg1, arg2])\n return fact\n\n\ndef parse_rule_representation(rule_rep):\n \"\"\"Function that takes string containing a rule in RuleTaker format and creates\n a Rule. E.g. input:\n (((\\\"something\\\" \\\"needs\\\" \\\"cow\\\" \\\"+\\\")) -> (\\\"something\\\" \\\"is\\\" \\\"red\\\" \\\"+\\\"))\n \"\"\"\n rule = None\n rule_rep = rule_rep.strip()\n # Remove enclosing parens ()\n rule_txt = rule_rep[1:-1]\n rule_parts = rule_txt.split(\"->\")\n if len(rule_parts) == 2:\n # LHS is enclosed in parens. Remove ().\n lhs = rule_parts[0].strip()[1:-1]\n rhs = rule_parts[1]\n lhs_facts = []\n lhs_parts = []\n for m in re.finditer(r\"\\([^()]+\\)\", lhs):\n lhs_part = m.group(0)\n lhs_fact = parse_triple_representation(lhs_part)\n if lhs_fact is not None:\n lhs_facts.append(lhs_fact)\n rhs_fact = parse_triple_representation(rhs)\n rule = Rule(lhs_facts, rhs_fact)\n return rule\n\n\ndef call_theorem_prover(\n theorem_prover, instance_id, question_id, theory, assertion, gold_label\n):\n \"\"\"Function that takes a single theory/assertion example and runs it through the theorem prover\n to obtain a label. Returns the obtained label, elapsed time to solve it, and exception returned\n by the engine, if any.\n \"\"\"\n obtained_result = False\n millisecs_elapsed = 0\n print(\"=======ORIGINAL THEORY=========\")\n theory_as_txt = theory.program(theorem_prover)\n print(theory_as_txt)\n theory.preprocess(theorem_prover)\n theory_as_txt = theory.program(theorem_prover)\n if theorem_prover == \"problog\":\n assertion_lf = assertion.logical_form(theorem_prover, False)\n assertion_lf = f\"query({assertion_lf}).\"\n program = f\"{theory_as_txt}\\n{assertion_lf}\"\n print(\"=======PROGRAM FROM PREPROCESSED THEORY=========\")\n print(program)\n print(\"=======EXPECTED LABEL=========\")\n print(f\" {gold_label}\")\n start_millisecs = current_milli_time()\n try:\n lf = LogicFormula.create_from(program) # ground the program\n dag = LogicDAG.create_from(lf) # break cycles in the ground program\n sdd = SDD.create_from(dag)\n result = sdd.evaluate()\n end_millisecs = current_milli_time()\n elapsed_millisecs = end_millisecs - start_millisecs\n result_tuples = [(k, v) for k, v in result.items()]\n obtained_result = result_tuples[0][1] != 0.0\n return obtained_result, elapsed_millisecs, None\n except (NegativeCycle, NonGroundProbabilisticClause, UnknownClause) as e:\n end_millisecs = current_milli_time()\n elapsed_millisecs = end_millisecs - start_millisecs\n print(\n f\"!!!Encountered Exception at instance id {instance_id}, question id {question_id}: {e}\"\n )\n obtained_result = assertion.polarity != \"+\"\n exception_name = str(type(e)).lstrip(\"\")\n return obtained_result, elapsed_millisecs, exception_name\n return obtained_result, elapsed_millisecs, None\n\n\ndef run_theorem_prover(theorem_prover, ip, ip_format, op, report_metrics):\n \"\"\"Function that takes an input file, calls the theorem prover on every example and gets a label.\n Results are written to output file. Metrics are tracked and reported if report_metrics is True.\n \"\"\"\n metrics = Metrics()\n if ip_format == \"current\":\n row_ix = 1\n for ix, line in enumerate(ip.readlines()):\n facts = []\n rules = []\n instance_json = json.loads(line)\n instance = TheoryAssertionRepresentationWithLabel.from_json(instance_json)\n if instance is not None:\n for lf_str in instance.theory_statements:\n statement = parse_statement(lf_str)\n if isinstance(statement, Fact):\n facts.append(statement)\n elif isinstance(statement, Rule):\n rules.append(statement)\n else:\n print(\n f\"Unable to parse statement {lf_str} in row {row_ix} of input jsonl file!\"\n )\n assertion = parse_statement(instance.assertion_statement)\n gold_label = instance.label\n theory = Theory(facts, rules)\n ix = str(row_ix)\n (\n engine_label,\n elapsed_millisecs,\n returned_exception,\n ) = call_theorem_prover(\n theorem_prover, ix, ix, theory, assertion, gold_label\n )\n if report_metrics:\n metrics.update(\n gold_label, engine_label, returned_exception, elapsed_millisecs\n )\n instance_json[\"label\"] = engine_label\n json.dump(instance_json, op)\n op.write(\"\\n\")\n else:\n print(f\"Unexpected input file format in line no. {row_ix}\")\n row_ix += 1\n else:\n # Ruletaker Legacy Jsonl Format\n for ix, line in enumerate(ip.readlines()):\n facts = []\n rules = []\n instance = json.loads(line)\n triples = instance[\"triples\"]\n ip_rules = instance.get(\"rules\", [])\n questions = instance[\"questions\"]\n for triple_key in triples:\n triple_obj = triples[triple_key]\n triple_rep = triple_obj[\"representation\"]\n fact = parse_triple_representation(triple_rep)\n if fact is not None:\n facts.append(fact)\n for rule_key in ip_rules:\n rule_obj = ip_rules[rule_key]\n rule_rep = rule_obj[\"representation\"]\n rule = parse_rule_representation(rule_rep)\n if rule is not None:\n rules.append(rule)\n theory = Theory(facts, rules)\n for question_key in questions:\n question_obj = questions[question_key]\n question_rep = question_obj[\"representation\"]\n assertion = parse_triple_representation(question_rep)\n gold_label = question_obj.get(\"answer\", None)\n (\n engine_label,\n elapsed_millisecs,\n returned_exception,\n ) = call_theorem_prover(\n theorem_prover, ix, question_key, theory, assertion, gold_label\n )\n if report_metrics:\n metrics.update(\n gold_label, engine_label, returned_exception, elapsed_millisecs\n )\n op_obj = {\n **instance,\n **({f\"{theorem_prover}_label\": engine_label}),\n }\n json.dump(op_obj, op)\n op.write(\"\\n\")\n if report_metrics:\n metrics.report()\n\n\ndef main():\n \"\"\"Tool that takes a collection of theory-assertion examples and runs them through a theorem prover.\n Supported input format 1: Jsonl format with json objects represented as per the\n `TheoryAssertionRepresentationWithLabel` class.\n Sample:\n { \"json_class\": \"TheoryAssertionRepresentation\",\n \"theory_statements\": [\n \"1.0::kind('Fiona').\",\n \"1.0::rough('Dave').\",\n \"1.0::smart('Dave').\",\n \"1.0::quiet('Charlie').\",\n \"1.0::kind('Dave').\",\n \"1.0::white('Erin').\",\n \"1.0::young(X) :- white(X).\",\n \"1.0::smart(X) :- big(X), green(X).\",\n \"1.0::kind(X) :- round(X), smart(X).\",\n \"1.0::kind(X) :- quiet(X), round(X).\",\n \"1.0::rough(X) :- round(X), red(X).\"\n \"1.0::kind(X) :- quiet(X).\", \"1.0::furry(X) :- quiet(X), big(X).\"\n ],\n \"assertion_statement\": \"query(1.0::young('Dave').).\"\n }\n Supported input format 2: Ruletaker's legacy Jsonl format (for AI2's internal use with existing RuleTaker datasets)\n Sample (there are additional fields not relevant and not shown here):\n { \"id\": \"AttNoneg-D3-319\", ...\n \"triples\":{\n \"triple1\":\n \"text\":\"Bob is cold.\",\n \"representation\":\"(\\\"Bob\\\" \\\"is\\\" \\\"cold\\\" \\\"+\\\")\"\n },\n \"triple2\": {\n \"text\":\"Erin is nice.\",\n \"representation\":\"(\\\"Erin\\\" \\\"is\\\" \\\"nice\\\" \\\"+\\\")\"\n },\n \"triple3\":{\n \"text\":\"Gary is nice.\",\n \"representation\":\"(\\\"Gary\\\" \\\"is\\\" \\\"nice\\\" \\\"+\\\")\"\n },\n \"triple4\":{\n \"text\":\"Harry is blue.\",\n \"representation\":\"(\\\"Harry\\\" \\\"is\\\" \\\"blue\\\" \\\"+\\\")\"\n }\n },\n \"rules\":{\n \"rule1\":{\n \"text\":\"Blue people are furry.\",\n \"representation\":\"(((\\\"someone\\\" \\\"is\\\" \\\"blue\\\" \\\"+\\\")) -> (\\\"someone\\\" \\\"is\\\" \\\"furry\\\" \\\"+\\\"))\"\n },\n \"rule2\":{\n \"text\":\"Nice people are furry.\",\n \"representation\":\"(((\\\"someone\\\" \\\"is\\\" \\\"nice\\\" \\\"+\\\")) -> (\\\"someone\\\" \\\"is\\\" \\\"furry\\\" \\\"+\\\"))\"\n },\n \"rule3\":{\n \"text\":\"Blue, big people are nice.\",\n \"representation\":\"(((\\\"someone\\\" \\\"is\\\" \\\"blue\\\" \\\"+\\\") (\\\"someone\\\" \\\"is\\\" \\\"big\\\" \\\"+\\\"))\n -> (\\\"someone\\\" \\\"is\\\" \\\"nice\\\" \\\"+\\\"))\"\n },\n \"rule4\":{\n \"text\":\"If someone is cold then they are quiet.\",\n \"representation\":\"(((\\\"someone\\\" \\\"is\\\" \\\"cold\\\" \\\"+\\\"))\n -> (\\\"someone\\\" \\\"is\\\" \\\"quiet\\\" \\\"+\\\"))\"},\n }\n },\n \"questions\":{\n \"Q1\":{\n \"question\":\"Erin is nice.\",\n \"answer\":true,\n ...\n \"representation\":\"(\\\"Erin\\\" \\\"is\\\" \\\"nice\\\" \\\"+\\\")\"\n },\n \"Q2\":{\n \"question\":\"Gary is not nice.\",\n \"answer\":false,\n ...\n \"representation\":\"(\\\"Gary\\\" \\\"is\\\" \\\"nice\\\" \\\"-\\\")\"\n },\n \"Q3\":{\n \"question\":\"Gary is furry.\",\n \"answer\":true,\n \"representation\":\"(\\\"Gary\\\" \\\"is\\\" \\\"furry\\\" \\\"+\\\")\"\n }\n }\n }\n Output jsonl format: Same as above with an additional field \"problog_label\": .\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Tool to run theories through a theorem prover.\"\n )\n parser.add_argument(\n \"--input-file\",\n required=True,\n help=\"Input jsonl file in either the current format or the legacy RuleTaker Jsonl format\",\n )\n parser.add_argument(\n \"--input-format\",\n choices=[\"current\", \"legacy\"],\n default=\"current\",\n help=\"Input file format\",\n )\n parser.add_argument(\n \"--theorem-prover\",\n default=\"problog\",\n help=\"Thorem proving engine to use. Only supported one right now is problog.\",\n )\n parser.add_argument(\n \"--output-file\",\n required=True,\n help=\"Output file containing the theorem prover's output for each theory-assertion instance input. \\\n Output format will be the same as input format, so this will be either a CSV or a jsonl file.\",\n )\n parser.add_argument(\n \"--report-metrics\",\n action=\"store_true\",\n help=\"Flag that will cause metrics (accuracy against gold labels) to be tracked and reported\",\n )\n args = parser.parse_args()\n\n with open(args.input_file, \"r\") as ip, open(args.output_file, \"w\") as op:\n run_theorem_prover(\n args.theorem_prover, ip, args.input_format, op, args.report_metrics\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alexgaskell10/logical_plms","sub_path":"ruletaker/theory_label_generator.py","file_name":"theory_label_generator.py","file_ext":"py","file_size_in_byte":19217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16736094728","text":"# -*- coding: utf-8 -*-\n\nclass ParentTreeNode:\n def __init__(self, val):\n self.val = val\n self.parent, self.left, self.right = None, None, None\n\nclass LowestCommonAncestor2:\n def lowestCommonAncestor2(self, root, A, B):\n dict = {}\n while A is not root:\n dict[A] = True\n A = A.parent\n while B is not root:\n if B in dict:\n return B\n B = B.parent\n return root","repo_name":"jonXue92/PythonGit","sub_path":"Algorithm/chapter5/lowestCommonAncestor2.py","file_name":"lowestCommonAncestor2.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72702724977","text":"from pydrive.auth import GoogleAuth\r\nfrom tkinter import *\r\nfrom tkinter.filedialog import askopenfilename\r\nfrom pathlib import Path\r\n\r\n# def upload(filename):\r\n# # gdrive upload filename\r\n\r\n\r\ndef openfile():\r\n window = Tk()\r\n print(\"Starting Tkinter Open Window\")\r\n\r\n filetypes = [(\"\", \"*\")]\r\n title = \"Find file to backup\"\r\n initialdir = \"C:\\\\\"\r\n\r\n window.fileName = askopenfilename(filetypes=filetypes, initialdir=initialdir, title=title)\r\n window.destroy()\r\n return window.fileName\r\n\r\n# ======================================\r\n# Settings File Load\r\n# ======================================\r\n\r\nsettingspath = Path(\"settings.cfg\")\r\n\r\n# Detects if the settings file is existent and if not it then creates a new fresh one\r\nif not settingspath.exists():\r\n print(\"Creating new settings file\")\r\n settingsfile = open(\"settings.cfg\", \"w\")\r\n settingsfile.write(\"# Setup controls if the program goes into inital start mode, IE it will ask for OAuth from google and location of file\")\r\n settingsfile.write(\"\\n\" + \"# Default file controls the default file path, No need to mess with this just me storing data for later\")\r\n settingsfile.write(\"\\n\" + \"setup = false\")\r\n settingsfile.write(\"\\n\" + \"defaultfile = None\")\r\n settingsfile.close()\r\n\r\nsettingsfile = open(\"settings.cfg\", \"r\")\r\nsettingunparsed = str(settingsfile.read())\r\nsettinglineparse = settingunparsed.split(\"\\n\")\r\n\r\n# Kills the lines with comments on them\r\n# This took too long to figure out how to do do\r\nfor x in range(len(settinglineparse)):\r\n if settinglineparse[x - x][:2] == \"# \":\r\n del settinglineparse[x - x]\r\n\r\n# Assigns the final settings to variables\r\nissetup = settinglineparse[0].split(\" = \", 1)[1].lower()\r\ndefaultfile = settinglineparse[1].split(\" = \", 1)[1] \r\n\r\n# ======================================\r\n# Main Base\r\n# ======================================\r\n\r\n# Custom auth flow to auth\r\ngauth = GoogleAuth()\r\ngauth.LocalWebserverAuth()\r\n\r\nif bool(issetup) == True:\r\n print(\"setup = True\")\r\n\r\n\r\n# if file exists:\r\n# read file name\r\n# upload(filename)\r\n# else:\r\n# openfile()\r\n\r\n# print(openfile())\r\n\r\nprint(issetup)\r\nprint(defaultfile)\r\n\r\n# On start upload file\r\n# Every hour it checks if it has uploaded a file\r\n\r\n\r\n","repo_name":"TylerRajotte/AutoPyBackup","sub_path":"AutoPyBackup/AutoPyBackup.py","file_name":"AutoPyBackup.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35984803862","text":"import re\r\nfrom configuracionLR import *\r\n\r\ndef read_file_content(file_path):\r\n with open(file_path, 'r') as file:\r\n return file.read()\r\n\r\ndef write_to_file(file_path, content):\r\n with open(file_path, 'w') as file:\r\n file.write(content)\r\n\r\ndef verificar_archivo_yalep(nombre_archivo):\r\n errores = []\r\n with open(nombre_archivo) as archivo:\r\n contenido = archivo.read()\r\n \r\n # Verificar si el archivo comienza con el encabezado esperado\r\n if not contenido.startswith(\"/* Configuración del parser para gramática No.1 */\"):\r\n errores.append(\"El archivo no comienza con el encabezado esperado.\")\r\n \r\n # Verificar si el archivo define los tokens esperados\r\n tokens_esperados = {\"ID\", \"PLUS\", \"TIMES\", \"LPAREN\", \"RPAREN\", \"WS\"}\r\n tokens_definidos = set(re.findall(r\"%token\\s+(\\w+)\", contenido))\r\n if tokens_definidos != tokens_esperados:\r\n errores.append(f\"El archivo define los siguientes tokens: {tokens_definidos}. Se esperaba: {tokens_esperados}.\")\r\n \r\n # Verificar si se define la regla de la gramática esperada\r\n regla_esperada = \"expression:\\n expression PLUS term\\n | term\\n;\\nterm:\\n term TIMES factor\\n | factor\\n;\\nfactor:\\n LPAREN expression RPAREN\\n | ID\\n;\"\r\n if regla_esperada not in contenido:\r\n errores.append(\"El archivo no define la regla de la gramática esperada.\")\r\n \r\n # Devolver la lista de errores (vacia si no se encontraron errores)\r\n return errores\r\n\r\n\r\n# Archivos a utilizar\r\nyalex_file = 'slr-1.yal'\r\nyalp_file = 'slr-1.yalp'\r\noutput_file = 'funciones.txt'\r\n\r\n# Uso de la función\r\nerrores = verificar_archivo_yalep(yalp_file)\r\n\r\n# Leer contenido de yalex_file\r\nyalex_content = read_file_content(yalex_file)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Construir el encabezado y pie de página\r\nheader_result, trailer_result, file_content, i = apertura_cerradura(yalex_content)\r\n\r\n# Modificar el contenido del archivo\r\nfile_content = ver_contenido(file_content)\r\n\r\n# Construir las expresiones regulares\r\nregex, errorStack, fin = constructor(file_content, i)\r\n\r\n# Construir los tokens\r\nLEXtokens, errorStack = constructor_tokens(file_content, regex, errorStack, fin+1)\r\n\r\n# Analizar el archivo yalp\r\ntokens, productions_dict, errorStack = fin_yalp(yalp_file, errorStack)\r\n\r\n# Verificar los tokens definidos en LEXtokens\r\ngooTokens = []\r\n\r\nfor token in tokens:\r\n for lex_token in LEXtokens:\r\n evald = evalToken(lex_token)\r\n if token == evald:\r\n gooTokens.append(token)\r\n if token not in gooTokens:\r\n errorStack.append(f\"Token {token} no definido en el YALEX\")\r\n\r\nif len(gooTokens) < len(LEXtokens):\r\n errorStack.append(\"Faltaron Definir tokens en el YAPAR\")\r\n\r\n# Convertir las producciones\r\nconverted_productions = cambiar_valors(productions_dict)\r\n\r\n# Procesar las producciones\r\nstates, transitions = procesados(converted_productions)\r\n\r\n# Graficar los estados y transiciones\r\ngraficar(states, transitions)\r\n\r\n# Funciones para obtener los First y Follow sets\r\ndef cambiar_valors(productions):\r\n converted_productions = {}\r\n for key, value in productions.items():\r\n converted_productions[key] = [prod.split() for prod in value]\r\n return converted_productions\r\n\r\nconverted_prod = cambiar_valors(productions_dict)\r\nfirst = primera_funcion(converted_prod)\r\nfollow = siguiente_funcion(converted_prod, first)\r\n\r\n# Escribir el contenido en el archivo de salida\r\noutput_content = \"\\n\"\r\nfor non_terminal, first_set in first.items():\r\n output_content += f\"{non_terminal}: {first_set}\\n\"\r\noutput_content += \"\\n\"\r\nfor non_terminal, follow_set in follow.items():\r\n output_content += f\"{non_terminal}: {follow_set}\\n\"\r\n\r\nwrite_to_file(output_file, output_content)\r\n","repo_name":"KennethGalvez/LabE-LP","sub_path":"newMain.py","file_name":"newMain.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71478774579","text":"class Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n if not nums:\n return 0\n \n currentSum = maxSum = nums[0]\n \n for num in nums[1:]:\n currentSum = max(num, currentSum + num)\n maxSum = max(maxSum, currentSum)\n return maxSum\n ","repo_name":"kazziken/LeetCodeProgression","sub_path":"53-maximum-subarray/53-maximum-subarray.py","file_name":"53-maximum-subarray.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34327847379","text":"from traits.api import Any, on_trait_change, DelegatesTo, List\nfrom pyface.tasks.task_layout import TaskLayout, HSplitter\n\nfrom pychron.processing.tasks.analysis_edit.analysis_edit_task import AnalysisEditTask\nfrom pychron.processing.tasks.browser.browser_task import BaseBrowserTask\nfrom pychron.processing.tasks.browser.util import browser_pane_item\nfrom pychron.processing.tasks.repository.panes import RepositoryPane\nfrom pychron.processing.repository.geochron_repo import GeochronRepository\nfrom pychron.processing.repository.igsn import IGSN\n\n\n\n#============= standard library imports ========================\n#============= local library imports ==========================\n\nclass RepositoryTask(AnalysisEditTask):\n name = 'Repository'\n repository = Any\n igsn = Any\n\n igsn_enabled = DelegatesTo('igsn', prefix='enabled')\n repo_enabled = DelegatesTo('repository', prefix='enabled')\n auto_show_unknowns_pane = False\n\n tool_bars = List\n\n def _selected_projects_changed(self, old, new):\n project = ''\n if new:\n project = new[0].name\n\n self.igsn.project = project\n BaseBrowserTask._selected_projects_changed(self, old, new)\n\n def _selected_samples_changed(self, new):\n sample = ''\n if new:\n sample = new[0].name\n self.igsn.sample = sample\n\n def _repository_default(self):\n return GeochronRepository()\n\n def _igsn_default(self):\n return IGSN()\n\n def create_central_pane(self):\n return RepositoryPane(model=self)\n\n def create_dock_panes(self):\n #ps = AnalysisEditTask.create_dock_panes(self)\n #ps.extend([BrowserPane(model=self)])\n ps = [self._create_browser_pane(analyses_defined='0')]\n return ps\n\n def _save_to_db(self):\n \"\"\"\n save the sample igsn to the database\n \"\"\"\n db = self.manager.db\n with db.session_ctx():\n s = self.selected_samples\n p = self.selected_projects\n dbsample = db.get_sample(s.name, project=p.name)\n if dbsample is not None:\n dbsample.igsn = s.igsn\n else:\n msg = 'Sample: {}, Project: {} \\\n not found in database'.format(s.name, p.name)\n self.warning_dialog(msg)\n\n self.info('Sample: {}, Project: {}. IGSN set to {}'.format(s.name,\n p.name,\n s.igsn))\n\n #===============================================================================\n # handlers\n #===============================================================================\n @on_trait_change('igsn:new_igsn')\n def _new_igsn(self, new):\n \"\"\"\n associate the new igsn with the current sample and save to the\n database\n \"\"\"\n sample = self.igsn.sample\n project = self.igsn.project\n self.debug('Retrieved new IGSN:{} for project: {} sample: {}'.format(new, sample, project))\n\n self.selected_samples.igsn = new\n self._save_to_db()\n\n # @on_trait_change('igsn:[sample, username, password]')\n # def _update_igsn(self):\n # self.igsn_enabled = all([getattr(self.igsn, a)\n # for a in ('sample', 'username', 'password')])\n #\n # @on_trait_change('repository:enabl')\n # def _update_repo(self):\n # self.repo_enabled = all([getattr(self.repository, a)\n # for a in ('username', 'password')])\n #===============================================================================\n # defaults\n #===============================================================================\n def _default_layout_default(self):\n return TaskLayout(id='pychron.repository',\n left=HSplitter(\n browser_pane_item(),\n )\n # left=HSplitter(\n\n # PaneItem('pychron.browser'),\n # Splitter(\n # Tabbed(\n # PaneItem('pychron.processing.unknowns'),\n # # PaneItem('pychron.processing.figures.plotter_options')\n # ),\n # # Tabbed(\n # # PaneItem('pychron.processing.controls'),\n # # PaneItem('pychron.processing.editor'),\n # # ),\n # orientation='vertical'\n # )\n # ),\n\n )\n\n #============= EOF =============================================\n","repo_name":"INGPAN/pychron","sub_path":"pychron/processing/tasks/repository/respository_task.py","file_name":"respository_task.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74127996339","text":"import os\nimport shutil\nimport subprocess\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('input_dir', help='input mp4 video path')\nargs = parser.parse_args()\n\ndef resume(input_dir, parent_dir, format):\n output_dir = os.path.join(parent_dir, 'resume_imgs')\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n files = os.listdir(input_dir)\n files.sort()\n i = 1\n with open(parent_dir + '/timestamp.txt', 'r') as tf:\n for f in files:\n file_name = tf.readline()[:-1]\n\n output_name = file_name + format\n\n input_img = os.path.join(os.path.abspath(input_dir), f)\n output_img = os.path.join(os.path.abspath(output_dir), output_name)\n shutil.copy(input_img, output_img)\n print('{}/{}'.format(i, len(files)))\n i += 1\n\ninput_dir = args.input_dir\nparent_dir = os.path.abspath(os.path.join(input_dir, os.pardir))\ndecompress_dir = os.path.join(parent_dir, 'decompress_imgs')\nformat = '.png'\n\nif not os.path.exists(decompress_dir):\n os.makedirs(decompress_dir)\n\nsubprocess.run(['ffmpeg', '-i', input_dir, decompress_dir + '/%06d' + format])\nresume(decompress_dir, parent_dir, format)\nsubprocess.run(['rm', '-rf', decompress_dir])","repo_name":"liuzexi256/data_process","sub_path":"decompress_img.py","file_name":"decompress_img.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20951171270","text":"# программа, составляющая расписание занятий (45 мин занятие, 10 мин перерыв)\nfrom datetime import datetime, timedelta\ntime_start = datetime.strptime(input(), '%H:%M')\ntime_finish = datetime.strptime(input(), '%H:%M')\nresult = []\nclasses = (time_finish - time_start) // timedelta(minutes=45)\nif classes > 0:\n for _ in range(classes):\n finish_class = time_start + timedelta(minutes=45)\n if finish_class <= time_finish:\n result.append([time_start.strftime('%H:%M') + ' - ' + finish_class.strftime('%H:%M')])\n time_start = finish_class + timedelta(minutes=10)\n else:\n break\n for res in result:\n print(*res)\n\n","repo_name":"AvorNika/ProfessionalCourse","sub_path":"Module datetime, time/3.4(22).py","file_name":"3.4(22).py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27016933102","text":"import time\r\n\r\n# initial prompting + title\r\nprint(\"Inflation Calculator\\n\\n\\n\\n\")\r\ntime.sleep(1)\r\n\r\nyear = int(input(\"Please input the year you would like to calculate the personal interest for: \"))\r\npre = year - 1\r\n\r\nexcat = int(input(\"Please input the number of expenditure categories: \"))\r\n\r\n# constants\r\ncount = 0\r\ntocuryearex = 0\r\ntopreyearex = 0\r\n\r\n# input loop\r\nwhile count != excat:\r\n curyearex = int(input(\"Please enter the expenses for %d : \" % year))\r\n preyearex = int(input(\"Please enter the expenses for %d : \" % pre))\r\n count = count + 1\r\n\r\n # current and previous year accumulators\r\n tocuryearex = tocuryearex + curyearex\r\n topreyearex = topreyearex + preyearex\r\n\r\n\r\n# the formula\r\ninflation = ((tocuryearex - topreyearex) / tocuryearex) * 100\r\n\r\n\r\n# making it only have 2 decimal places\r\nfinal = inflation // 1\r\nprint(\"The inflation rate for {} is {}%.\".format(year, final))\r\n\r\n# inflation level\r\nif final < 3:\r\n print(\"Inflation rate level: low\")\r\nelif 3 <= final < 5:\r\n print(\"Inflation rate level: moderate\")\r\nelif 5 <= final < 10:\r\n print(\"Inflation rate level: high\")\r\nelif final >= 10:\r\n print(\"Inflation rate level: hyper\")\r\n\r\n# if something somehow goes wrong\r\nelse:\r\n print(\"Unknown levels of inflation\")\r\n","repo_name":"M4DM4N56/uwo-inflation-calculator","sub_path":"Assign1.py","file_name":"Assign1.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43019404090","text":"class Solution:\n def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:\n res = 0\n intervals.sort()\n prev = intervals[0]\n for i in range(1, len(intervals)):\n if intervals[i][0] < prev[1]:\n res += 1\n if intervals[i][1] < prev[1]:\n prev = intervals[i]\n else:\n prev = intervals[i]\n return res\n","repo_name":"rameshpav1321/daily_leetcode","sub_path":"0435.non_overlapping_intervals.py","file_name":"0435.non_overlapping_intervals.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"369706952","text":"from sympy import symbols\nfrom sympy import integrate\nfrom sympy import *\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import legend\nimport numpy as np\nfrom tkinter import messagebox\n\ndef ecuacionD(i, s, fx, resp):\n\n if(i == \"\"):\n messagebox.showerror(\"Error\", \"Ingrese el limite inferior por favor\")\n elif(s == \"\"):\n messagebox.showerror(\"Error\", \"Ingrese el limite superior por favor\")\n elif(fx == \"\"):\n messagebox.showerror(\"Error\", \"Ingrese una integral por favor\")\n \n dx = symbols('x') #Diferencial\n fx = sympify(fx)\n \n print(\"limite inferior: \" + i)\n print(\"limite superior: \" + s)\n pprint(fx)\n\n if(resp == \"Fraccionario.\"):\n inte = integrate(fx, (dx, i, s)) #Fraccionario\n print(\"El resultado de la integral es: \")\n pprint(inte)\n elif(resp == \"Decimales.\"):\n inte = integrate(fx, (dx, i, s)).evalf(3) #Decimal\n print(\"El resultado de la integral es: \")\n pprint(inte)\n else:\n messagebox.showerror(\"Error\", \"Seleccione una opcion por favor\")\n\n return inte\n\ndef graficaES(fx):\n\n fx = sympify(fx)\n s = plot(fx, legend = True, show = False)#Grafica\n s[0].line_color = 'orange'\n s.show()","repo_name":"Code-Jathon/IGS-CALCULATOR","sub_path":"integralesDefinidas.py","file_name":"integralesDefinidas.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41102593484","text":"#-*- coding: utf-8 -*-\n\nimport codecs\nimport json\nimport re\n\nfrom docparser import HTMLParser\nfrom utils import TextUtils\nfrom ner import NERTagger\n\n\nclass DingZengRecord(object):\n def __init__(self, ZengFaDuiXiang,FaXingFangShi,ShuLiang,JinE,ShuoDingQi,RenGouFangShi):\n #增发对象\n self.ZengFaDuiXiang = ZengFaDuiXiang\n #增发数量\n self.ShuLiang = ShuLiang\n #增发金额\n self.JinE = JinE\n #锁定期\n self.ShuoDingQi = ShuoDingQi\n #认购方式\n self.RenGouFangShi = RenGouFangShi\n\n def __str__(self):\n return json.dumps(self.__dict__, ensure_ascii=False)\n\n def normalize_num(self, text):\n coeff = 1.0\n if '亿' in text:\n coeff *= 100000000\n if '万' in text:\n coeff *= 10000\n if '千' in text or '仟' in text:\n coeff *= 1000\n if '百' in text or '佰' in text:\n coeff *= 100\n if '%' in text:\n coeff *= 0.01\n try:\n number = float(TextUtils.extract_number(text))\n number_text = '%.4f' % (number * coeff)\n if number_text.endswith('.0'):\n return number_text[:-2]\n elif number_text.endswith('.00'):\n return number_text[:-3]\n elif number_text.endswith('.000'):\n return number_text[:-4]\n elif number_text.endswith('.0000'):\n return number_text[:-5]\n else:\n if '.' in number_text:\n idx = len(number_text)\n while idx > 1 and number_text[idx-1] == '0':\n idx -= 1\n number_text = number_text[:idx]\n return number_text\n except:\n return text\n\n def normalize(self):\n if self.ShuLiang is not None:\n self.ShuLiang = self.normalize_num(self.ShuLiang)\n if self.JinE is not None:\n self.JinE = self.normalize_num(self.JinE)\n\n def to_result(self):\n self.normalize()\n return \"%s\\t%s\\t%s\\t%s\\t%s\" % (\n self.ZengFaDuiXiang if self.ZengFaDuiXiang is not None else '',\n self.ShuLiang if self.ShuLiang is not None else '',\n self.JinE if self.JinE is not None else '',\n self.ShuoDingQi if self.ShuoDingQi is not None else '',\n self.RenGouFangShi if self.RenGouFangShi is not None else '')\n\n\nclass DingZengExtractor(object):\n def __init__(self, config_file_path, ner_model_dir_path, ner_blacklist_file_path):\n self.html_parser = HTMLParser.HTMLParser()\n self.config = None\n self.ner_tagger = NERTagger.NERTagger(ner_model_dir_path, ner_blacklist_file_path)\n self.com_abbr_dict = {}\n self.com_full_dict = {}\n self.com_abbr_ner_dict = {}\n\n self.RenGouFangShi = None\n\n with codecs.open(config_file_path, encoding='utf-8', mode='r') as fp:\n self.config = json.loads(fp.read())\n self.table_dict_field_pattern_dict = {}\n for table_dict_field in self.config['table_dict']['fields']:\n field_name = table_dict_field['fieldName']\n if field_name is None:\n continue\n convert_method = table_dict_field['convertMethod']\n if convert_method is None:\n continue\n pattern = table_dict_field['pattern']\n if pattern is None:\n continue\n col_skip_pattern = None\n if 'colSkipPattern' in table_dict_field:\n col_skip_pattern = table_dict_field['colSkipPattern']\n row_skip_pattern = None\n if 'rowSkipPattern' in table_dict_field:\n row_skip_pattern = table_dict_field['rowSkipPattern']\n self.table_dict_field_pattern_dict[field_name] = \\\n TableDictFieldPattern(field_name=field_name, convert_method=convert_method,\n pattern=pattern, col_skip_pattern=col_skip_pattern,\n row_skip_pattern=row_skip_pattern)\n\n \n\n def extract_from_table_dict(self, table_dict):\n rs = []\n if table_dict is None or len(table_dict) <= 0:\n return rs\n row_length = len(table_dict)\n field_col_dict = {}\n skip_row_set = set()\n # 1. 假定第一行是表头部分则尝试进行规则匹配这一列是哪个类型的字段\n # 必须满足 is_match_pattern is True and is_match_col_skip_pattern is False\n head_row = table_dict[0]\n col_length = len(head_row)\n for i in range(col_length):\n text = head_row[i]\n for (field_name, table_dict_field_pattern) in self.table_dict_field_pattern_dict.items():\n if table_dict_field_pattern.is_match_pattern(text) and \\\n not table_dict_field_pattern.is_match_col_skip_pattern(text):\n if field_name not in field_col_dict:\n field_col_dict[field_name] = i\n # 逐行扫描这个字段的取值,如果满足 row_skip_pattern 则丢弃整行 row\n for j in range(1, row_length):\n try:\n text = table_dict[j][i]\n if table_dict_field_pattern.is_match_row_skip_pattern(text):\n skip_row_set.add(j)\n except KeyError:\n pass\n if len(field_col_dict) <= 0:\n return rs\n # 2. 遍历每个有效行,获取 record\n for row_index in range(1, row_length):\n if row_index in skip_row_set:\n continue\n record = DingZengRecord(None, None, None, None, None, self.RenGouFangShi)\n for (field_name, col_index) in field_col_dict.items():\n try:\n text = table_dict[row_index][col_index]\n if field_name == 'ZengFaDuiXiang':\n record.ZengFaDuiXiang = self.table_dict_field_pattern_dict.get(field_name).convert(text)\n elif field_name == 'ShuLiang':\n record.ShuLiang = self.table_dict_field_pattern_dict.get(field_name).convert(text)\n elif field_name == 'JinE':\n record.JinE = self.table_dict_field_pattern_dict.get(field_name).convert(text)\n else:\n pass\n except KeyError:\n pass\n rs.append(record)\n return rs\n\n def extract_from_paragraphs2(self, paragraphs):\n record_list = []\n return record_list\n\n \n def extract_from_paragraphs(self, paragraphs):\n record_list = []\n change_records = []\n for para in paragraphs:\n if para != \"\":\n change_records_para = self.extract_from_paragraph(para)\n change_records += change_records_para\n for record in change_records:\n record_list.append(record)\n return record_list\n\n def extract_from_paragraph(self, paragraph):\n #tag_res = self.ner_tagger.ner(paragraph, self.com_abbr_ner_dict)\n #tagged_str = tag_res.get_tagged_str()\n self.extract_RenGouFangShi(paragraph)\n return []\n\n def extract_RenGouFangShi(self, paragraph):\n if paragraph.find(\"现金\") != -1:\n self.RenGouFangShi = \"现金\"\n return \"\"\n\n def extract(self, html_file_path):\n rs = []\n paragraphs = self.html_parser.parse_content(html_file_path)\n rs_paragraphs = self.extract_from_paragraphs(paragraphs)\n for table_dict in self.html_parser.parse_table(html_file_path):\n rs_table = self.extract_from_table_dict(table_dict)\n if len(rs_table) > 0:\n if len(rs) > 0:\n #self.mergeRecord(rs, rs_table)\n break\n else:\n rs.extend(rs_table)\n # 2. 如果没有 Table Dict 则解析文本部分\n if len(rs) <= 0:\n return rs_paragraphs\n return rs\n\nclass TableDictFieldPattern(object):\n def __init__(self, field_name, convert_method, pattern, col_skip_pattern, row_skip_pattern):\n self.field_name = field_name\n self.convert_method = convert_method\n self.pattern = None\n if pattern is not None and len(pattern) > 0:\n self.pattern = re.compile(pattern)\n self.col_skip_pattern = None\n if col_skip_pattern is not None and len(col_skip_pattern) > 0:\n self.col_skip_pattern = re.compile(col_skip_pattern)\n self.row_skip_pattern = None\n if row_skip_pattern is not None and len(row_skip_pattern) > 0:\n self.row_skip_pattern = re.compile(row_skip_pattern)\n\n def is_match_pattern(self, text):\n if self.pattern is None:\n return False\n match = self.pattern.search(text)\n return True if match else False\n\n def is_match_col_skip_pattern(self, text):\n if self.col_skip_pattern is None:\n return False\n match = self.col_skip_pattern.search(text)\n return True if match else False\n\n def is_match_row_skip_pattern(self, text):\n if self.row_skip_pattern is None:\n return False\n match = self.row_skip_pattern.search(text)\n return True if match else False\n\n def get_field_name(self):\n return self.field_name\n\n def convert(self, text):\n if self.convert_method is None:\n return self.default_convert(text)\n elif self.convert_method == 'getStringFromText':\n return self.getStringFromText(text)\n elif self.convert_method == 'getDateFromText':\n return self.getDateFromText(text)\n elif self.convert_method == 'getLongFromText':\n return self.getLongFromText(text)\n elif self.convert_method == 'getDecimalFromText':\n return self.getDecimalFromText(text)\n elif self.convert_method == 'getDecimalRangeFromTableText':\n return self.getDecimalRangeFromTableText(text)\n else:\n return self.default_convert(text)\n\n @staticmethod\n def default_convert(text):\n return text\n\n @staticmethod\n def getStringFromText(text):\n return text\n\n @staticmethod\n def getDateFromText(text):\n strList = text.split(\"至\")\n if len(strList) < 2 and (\"月\" in text or \"年\" in text or \"/\" in text or \".\" in text):\n strList = re.split(\"-|—|~\", text)\n return strList[-1]\n\n @staticmethod\n def getLongFromText(text):\n return TextUtils.remove_comma_in_number(text)\n\n @staticmethod\n def getDecimalFromText(text):\n return text\n\n @staticmethod\n def getDecimalRangeFromTableText(text):\n return text","repo_name":"ericsami/data-science-lessons","sub_path":"extraction/extraction/extract/DingZengExtractor.py","file_name":"DingZengExtractor.py","file_ext":"py","file_size_in_byte":10862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28706621498","text":"import sys, pygame\npygame.init()\n\n# Window Size\nsize = width, height = 800, 400\n\n# This actually indicates where the ball should move in one iteration\nspeed = [1, 1]\nbackground = 255, 255, 255\n# Set size\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Bouncing Ball\")\nball = pygame.image.load(\"ball.png\")\n# Turn the image into a rectangle in PyGame\nball_rect = ball.get_rect()\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n # Each time, the ball will move according to speed [x, y]\n ball_rect = ball_rect.move(speed)\n # Bounce Effect\n if ball_rect.left < 0 or ball_rect.right > width:\n speed[0] = -speed[0]\n if ball_rect.top < 0 or ball_rect.bottom > height:\n speed[1] = -speed[1]\n # Fill screen with white background\n screen.fill(background)\n screen.blit(ball, ball_rect)\n #Update display\n pygame.display.flip()\n","repo_name":"amithr/Intro-to-PyGame-Part-1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"43947766202","text":"import itertools\r\nfrom pyspark.sql import SparkSession\r\nimport sys\r\nimport time\r\nfrom graphframes import GraphFrame\r\nfrom pyspark import SparkConf, SparkContext\r\n\r\nif __name__ == '__main__':\r\n start_time = time.time()\r\n # filter_threshold = sys.argv[1]\r\n # input_file_path = sys.argv[2]\r\n # output_file_path = sys.argv[3]\r\n filter_threshold = '7'\r\n input_file_path = 'data/ub_sample_data.csv'\r\n output_file_path = 'task1.txt'\r\n configuration = SparkConf().set(\"spark.driver.memory\", \"4g\").set(\"spark.executor.memory\", \"4g\")\r\n sc = SparkContext.getOrCreate(configuration)\r\n sc.setLogLevel(\"WARN\")\r\n ss = SparkSession.builder.config('spark.driver.memory', '4G').config('spark.executor.memory', '4G').getOrCreate()\r\n input_lines = sc.textFile(input_file_path).map(lambda x : x.split(',')).map(lambda x:(x[0], x[1])).filter(lambda x: x[0]!= \"user_id\").groupByKey().mapValues(lambda x: list(x))\r\n ub_dict = input_lines.collectAsMap()\r\n \r\n edges = []\r\n points = set()\r\n for x in list(itertools.combinations(ub_dict.keys(), 2)):\r\n if len(set(ub_dict[x[0]]).intersection(set(ub_dict[x[1]]))) >= int(filter_threshold):\r\n edges.append(x)\r\n edges.append((x[1],x[0]))\r\n points.add(x[0])\r\n points.add(x[1])\r\n points_df = sc.parallelize(list(points)).map(lambda x:(x,))\r\n points_df = ss.createDataFrame(points_df, ['id'])\r\n edges_df = sc.parallelize(edges)\r\n edges_df = ss.createDataFrame(edges_df, schema=['src', 'dst'])\r\n graph = GraphFrame(points_df, edges_df)\r\n lpa_graph = graph.labelPropagation(maxIter=5)\r\n communities = lpa_graph.rdd.map(lambda x: (x[1],x[0])).groupByKey().map(lambda x: sorted(list(x[1]))).sortBy(lambda x: (len(x), x))\r\n\r\n result = communities.collect()\r\n # output\r\n with open(output_file_path, 'w+') as output_file:\r\n for line in result:\r\n output_file.writelines(str(line)[1:-1] + \"\\n\")\r\n output_file.close()\r\n print('Duration:', (time.time()-start_time))\r\n","repo_name":"alvinzhou66/data_mining_assignments","sub_path":"4_graph_mining/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26548281231","text":"class Node:\n def __init__(self,info,forward_link = None,backward_link = None):\n self.FORWARD_LINK = forward_link\n self.info = info\n self.BACKWARD_LINK = backward_link\n\nclass DoublyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n self.current_ptr= None\n self.size = 0\n self.temp_list = [] # Note required. Added here to verify if doubly linked list works as expected\n \n def __len__(self):\n return self.size\n \n def isEmpty(self):\n return self.size == 0\n \n def insert_item_at_head(self,item):\n if self.head == None:\n newNode = Node(item)\n self.head = newNode\n self.tail = newNode\n self.size += 1\n self.temp_list.append(item)\n else:\n newNode = Node(item)\n newNode.FORWARD_LINK = self.head\n self.head.BACKWARD_LINK = newNode\n self.head = newNode\n newNode.BACKWARD_LINK = None\n self.size += 1\n self.temp_list.append(item)\n return newNode\n\n def insert_item_at_tail(self,item): # similar approach can be followed for insertion at any given location LOC\n if self.tail == None:\n newNode = Node(item)\n self.head = newNode\n self.tail = newNode\n self.size += 1\n self.temp_list.append(item)\n else:\n newNode = Node(item)\n self.tail.FORWARD_LINK = newNode\n newNode.BACKWARD_LINK = self.tail\n newNode.FORWARD_LINK = None\n self.tail = newNode\n self.size += 1\n self.temp_list.append(item)\n return newNode\n\n def insert_item_between_nodes(self,item,LOCA,LOCB): # LOCA = predecessor, LOCB = Successor\n newNode = Node(item)\n newNode.FORWARD_LINK = LOCB\n newNode.BACKWARD_LINK = LOCA\n LOCA.FORWARD_LINK = newNode\n LOCB.BACKWARD_LINK = newNode\n return newNode\n \n def traverse_list(self):\n if self.head == None:\n print('Cant traverse. List is Empty !')\n temp_head = self.head\n while self.head != None:\n print(self.head.info)\n self.head = self.head.FORWARD_LINK\n self.head = temp_head\n\n#-Author - Anantvir Singh-----Reference = DS ALgo by Michael T. Goodrich et al\nclass PositionalList(DoublyLinkedList):\n\n #---------------Each node is represented by an abstraction called Position, which returns a user friendly position object encapsulating a node ------------------\n class Position: # Nested class\n def __init__(self,container,node):\n self._container = container\n self._node = node\n \n def info(self):\n return self._node.info\n \n def __eq__(self,other):\n return type(other) is type(self) and other.node is self._node\n\n #--------------------------Validate if position given by user is correct ---------------------------------\n def _validate(self,p):\n if not isinstance(p,self.Position):\n raise TypeError('p is not of type Position !')\n if p._container is not self:\n raise ValueError('p does not belong to this container')\n if p.node.FORWARD_LINK is None:\n raise ValueError('p is not valid. It is a sentinel !')\n return p._node\n \n #--------------Utility method to create a new Position object(Wrap node in this object) and return it-------------------------------\n def _make_position(self,node):\n if node is self.head or node is self.tail:\n return None\n else:\n return self.Position(self,node) # returns position of node given to this method\n\n #---------------Accessor methods------------------------------------------------------------------------\n def first(self): # returns position of 1st node i.e node after the header\n return self._make_position(self.head.FORWARD_LINK)\n \n def last(self):\n return self._make_position(self.tail.BACKWARD_LINK)\n \n def before(self,p):\n node = self._validate(p)\n return self._make_position(node)\n \n def after(self,p):\n node = self._validate(p)\n return self._make_position(node)\n \n def __iter__(self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.info()\n cursor = self.after(cursor)\n\n #-----------------------List modification methods----------------------------\n \n def insert_item_between_nodes(self,item,LOCA,LOCB):\n node = super().insert_item_between_nodes(item,LOCA,LOCB)\n return self._make_position(node)\n\n def insert_in_front(self,item):\n return self.insert_item_between_nodes(item,self.head,self.head.FORWARD_LINK)\n \n def insert_at_last(self,item):\n return self.insert_item_between_nodes(item,self.tail,self.tail.BACKWARD_LINK)\n\n def add_before(self,position,item): # insert 'item' before 'position'\n node_for_given_position = self._validate(position)\n return self.insert_item_between_nodes(item,node_for_given_position.BACKWARD_LINK,node_for_given_position)\n \n # Similarly more methods can be implemented like 'add_after' or other modification methods also called 'mutators'\n \n","repo_name":"anantvir/DataStructures_LinkedLists","sub_path":"positional_linked_list.py","file_name":"positional_linked_list.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"328603752","text":"#!/usr/bin/env python3\n\nimport os\nimport pathlib\nimport sys\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport torch\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--inputDir\", help=\"input folder\", type=str)\n parser.add_argument(\"-o\", \"--outputDir\", help=\"output folder\", type=str)\n parser.add_argument(\"-y\", \"--year\", help=\"year of the simulation\", type=int)\n parser.add_argument(\"--energyStart\", help=\"energy start\", type=float)\n parser.add_argument(\"--energyEnd\", help=\"energy end\", type=float)\n parser.add_argument(\"--batchSize\", help=\"batch size\", type=int)\n parser.add_argument(\"--numEpochs\", help=\"number of epochs\", type=int)\n return parser.parse_args()\n\n\ndef check_args(args):\n if not os.path.isdir(args.inputDir):\n print(f\"{args.inputDir} is not a directory\")\n sys.exit(1)\n if not os.path.isdir(args.outputDir):\n print(f\"{args.outputDir} is not a directory, creating it...\")\n pathlib.Path(args.outputDir).mkdir(parents=True, exist_ok=True)\n\n # Creating the subfolders for the plots and the model\n pathlib.Path(f\"{args.outputDir}/model\").mkdir(parents=True, exist_ok=True)\n pathlib.Path(f\"{args.outputDir}/plots\").mkdir(parents=True, exist_ok=True)\n return\n\n\ndef format_duration(seconds):\n \"\"\"Format a duration given in seconds to a string D-HH:MM:SS\"\"\"\n days = seconds // 86400\n hours = seconds // 3600\n minutes = (seconds % 3600) // 60\n seconds = seconds % 60\n return f\"{days}-{hours:02d}:{minutes:02d}:{seconds:02d}\"\n\n\ndef load_data(args, is_train=True):\n if is_train:\n print(\"Loading training data...\")\n else:\n print(\"Loading testing data...\")\n\n E_range = np.around(\n np.arange(args.energyStart, args.energyEnd, 0.1),\n decimals=1,\n )\n # Initialize an empty dataframe for both gammas and protons\n df = pd.DataFrame()\n # Load the data\n for primary in [\"gamma\", \"proton\"]:\n # Initialize an empty dataframe for the current primary particle\n dataFrame = pd.DataFrame()\n print(f\"Loading {primary} data...\")\n for E in E_range:\n fileName = f\"{args.inputDir}/{primary}/{args.year}/hdf5/{primary}_2012_E{E}_{'train' if is_train else 'test'}.hdf5\"\n # Do a few checks\n if E >= 7.0:\n print(f\"Skipping E = {E}\")\n continue\n if not os.path.isfile(fileName):\n print(f\"{os.path.basename(fileName)} not found\")\n continue\n\n # Concatenate the dataframe with the next dataset\n dataFrame = pd.concat(\n [\n dataFrame,\n pd.read_hdf(\n fileName,\n key=\"data\",\n ),\n ],\n ignore_index=True,\n )\n\n # Assign output values based on the primary particle\n dataFrame[\"output\"] = (\n np.ones(len(dataFrame)) if primary == \"gamma\" else np.zeros(len(dataFrame))\n )\n\n # Append the data to the main dataframe\n df = pd.concat(\n [df, dataFrame],\n ignore_index=True,\n )\n\n print(\"Total events\", len(df))\n # Print how many gammas and proton events there are and the ratio\n gammaEvents = len(df[df[\"output\"] == 1])\n protonEvents = len(df[df[\"output\"] == 0])\n print(\"Gamma events\", gammaEvents)\n print(\"Proton events\", protonEvents)\n print(\"Ratio Gamma/Proton\", gammaEvents / protonEvents)\n\n # Check if there are any invalid values\n if np.sum(np.logical_not(np.isfinite(df[\"output\"].values))):\n print(df[\"output\"].values[np.logical_not(np.isfinite(df[\"output\"].values))])\n print(np.sum(np.logical_not(np.isfinite(df[\"output\"].values))))\n print(\"Invalid output values\")\n exit()\n\n # Shuffle the dataframe (if desired)\n df = df.sample(frac=1).reset_index(drop=True)\n return df\n\n\ndef make_input_tensors(df):\n # MapHLCq\n MapHLCq = np.array((df[\"MapHLCq\"].values).tolist()).astype(float)\n check_if_map_is_valid(MapHLCq)\n MapHLCq_tensor = torch.from_numpy(MapHLCq).view(-1, 1, 10, 10, 2).float()\n\n # MapSLCq\n MapSLCq = np.array((df[\"MapSLCq\"].values).tolist()).astype(float)\n check_if_map_is_valid(MapSLCq)\n SumMapSLCq = np.sum(MapSLCq, axis=(1, 2, 3))\n SumMapSLCq_tensor = torch.from_numpy(SumMapSLCq).view(-1, 1).float()\n\n # MapHLCt\n MapHLCt = np.array((df[\"MapHLCt\"].values).tolist()).astype(float) * 1e3\n check_if_map_is_valid(MapHLCt)\n\n # MapSLCt\n MapSLCt = np.array((df[\"MapSLCt\"].values).tolist()).astype(float) * 1e3\n check_if_map_is_valid(MapSLCt)\n\n # ArrayTime\n ArrayTime = np.zeros((len(df), 1))\n\n for i in range(MapHLCt.shape[0]):\n # Set the starting time of each time map to 1\n if np.sum(MapHLCt[i] != 0.0):\n MapHLCt[i][MapHLCt[i] != 0.0] -= (\n np.amin(MapHLCt[i][MapHLCt[i] != 0.0]) + 1.0\n )\n # # Get the minimum time and the maximum time of HLC and SLC time maps\n # mapTime = np.concatenate((MapHLCt[i], MapSLCt[i]), axis=0)\n # if np.sum(mapTime[mapTime != 0.0]):\n # maxTime = np.amax(mapTime[mapTime != 0.0])\n # minTime = np.amin(mapTime[mapTime != 0.0])\n # # Set the ArrayTime to the time interval\n # ArrayTime[i] = maxTime - minTime\n\n # ArrayTime_tensor = torch.from_numpy(ArrayTime).view(-1, 1).float()\n\n MapHLCt_tensor = torch.from_numpy(MapHLCt).view(-1, 1, 10, 10, 2).float()\n\n log10_S125_tensor = (\n torch.tensor(df[\"Laputop3s3s_Log10_S125\"].values).view(-1, 1).float()\n )\n zenith_tensor = torch.tensor(df[\"Laputop3s3s_zenith\"].values).view(-1, 1).float()\n beta_tensor = torch.tensor(df[\"Laputop3s3s_beta\"].values).view(-1, 1).float()\n\n fccInput_tensor = torch.cat(\n (\n log10_S125_tensor,\n zenith_tensor,\n beta_tensor,\n SumMapSLCq_tensor,\n # ArrayTime_tensor,\n ),\n dim=1,\n )\n\n check_if_tensor_is_valid(fccInput_tensor)\n\n output_tensor = torch.tensor(df[\"output\"].values).view(-1, 1).float()\n weights = torch.tensor(df[\"weights\"].values).view(-1, 1).float()\n\n tensor_dict = {\n \"MapHLCq\": MapHLCq_tensor,\n \"MapHLCt\": MapHLCt_tensor,\n \"fccInput\": fccInput_tensor,\n \"output\": output_tensor,\n \"weights\": weights,\n }\n return tensor_dict\n\n\ndef check_if_map_is_valid(Map):\n if np.sum(np.logical_not(np.isfinite(Map))):\n for i, el in enumerate(Map):\n if np.sum(np.logical_not(np.isfinite(el))):\n print(el)\n print(el[np.logical_not(np.isfinite(el))])\n print(np.sum(np.logical_not(np.isfinite(el))))\n print(\"Invalid MAP values\")\n print(\"Index\", i)\n print(el.shape)\n exit()\n Map[np.logical_not(np.isfinite(Map))] = 0.0\n return\n\n\ndef check_if_tensor_is_valid(tensor):\n if torch.sum(torch.logical_not(torch.isfinite(tensor))):\n print(tensor[torch.logical_not(torch.isfinite(tensor))])\n print(torch.sum(torch.logical_not(torch.isfinite(tensor))))\n print(\"Invalid input tensor2\")\n sys.exit(1)\n return\n\n\ndef plot_results(\n training_results,\n outputDir,\n):\n train_losses = training_results[\"train_losses\"]\n val_losses = training_results[\"val_losses\"]\n test_losses = training_results[\"test_losses\"]\n #\n train_accuracies = training_results[\"train_accuracies\"]\n val_accuracies = training_results[\"val_accuracies\"]\n test_accuracies = training_results[\"test_accuracies\"]\n num_epochs = len(train_losses)\n\n plt.figure(figsize=(10, 4))\n\n plt.subplot(1, 2, 1)\n plt.plot(range(1, num_epochs + 1), train_losses, label=\"Train Loss\")\n plt.plot(range(1, num_epochs + 1), val_losses, label=\"Validation Loss\")\n plt.plot(range(1, num_epochs + 1), test_losses, label=\"Test Loss\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.title(\"Training and Testing Losses\")\n plt.legend()\n\n plt.subplot(1, 2, 2)\n plt.plot(range(1, num_epochs + 1), train_accuracies, label=\"Train Accuracy\")\n plt.plot(range(1, num_epochs + 1), val_accuracies, label=\"Validation Accuracy\")\n plt.plot(range(1, num_epochs + 1), test_accuracies, label=\"Test Accuracy\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.title(\"Training and Testing Accuracies\")\n plt.legend()\n\n plt.tight_layout()\n plt.savefig(f\"{outputDir}/plots/training_results.png\")\n plt.close()\n","repo_name":"fedbont94/CNN_gamma_hadron","sub_path":"utils/utils_functions.py","file_name":"utils_functions.py","file_ext":"py","file_size_in_byte":8647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13465062735","text":"def solution(s):\n answer = []\n # 밖에있는 {{}}를 없애준다.\n s = s[2:-2]\n # 숫자만 빼서 리스트 형태로 저장한다.\n s = s.split(\"},{\")\n \n # 오름차순 정렬\n s.sort(key = len)\n \n for i in s:\n i_list = i.split(',')\n for j in i_list:\n # 기존에 추가된 숫자 거르기\n if int(j) not in answer:\n answer.append(int(j))\n return answer","repo_name":"Hyeok95/Algorithms-Study","sub_path":"유승아/Level 2/3주차/튜플.py","file_name":"튜플.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"38866463664","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport random\nimport numpy as np\nimport time\nimport logging\nimport os\nimport copy\nimport pandas as pd\nfrom torch import Tensor\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom tqdm import tqdm\nimport zipfile\nimport json\nfrom io import BytesIO\n\n\n#######################################################################################################\ndef set_seed(seed):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed) # 为CPU设置种子用于生成随机数,以使得结果是确定的\n torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n\ndef d2s(dt, time=False):\n if time is False:\n return dt.strftime(\"%Y_%m_%d\")\n else:\n return dt.strftime(\"%Y_%m_%d_%H_%M_%S\")\n\n\ndef timer(func):\n def deco(*args, **kwargs):\n start_time = time.time()\n res = func(*args, **kwargs)\n end_time = time.time()\n logging.info(\"Function {} run {:.2f}s.\".format(func.__name__, end_time - start_time))\n return res\n\n return deco\n\n\nclass FGM():\n def __init__(self, model):\n self.model = model\n self.backup = {}\n\n def attack(self, epsilon=0.6, emb_name='word_embeddings.'):\n # emb_name这个参数要换成你模型中embedding的参数名\n for name, param in self.model.named_parameters():\n if param.requires_grad and emb_name in name and \"video_embeddings.word_embeddings\" not in name:\n self.backup[name] = param.data.clone()\n norm = torch.norm(param.grad)\n if norm != 0 and not torch.isnan(norm):\n r_at = epsilon * param.grad / norm\n param.data.add_(r_at)\n\n def restore(self, emb_name='word_embeddings.'):\n # emb_name这个参数要换成你模型中embedding的参数名\n for name, param in self.model.named_parameters():\n if param.requires_grad and emb_name in name and \"video_embeddings.word_embeddings\" not in name:\n assert name in self.backup\n param.data = self.backup[name]\n self.backup = {}\n\n\nclass EMA():\n def __init__(self, model, decay):\n self.model = model\n self.decay = decay\n self.shadow = {}\n self.backup = {}\n\n def register(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n self.shadow[name] = param.data.clone()\n\n def update(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n new_average = (1.0 - self.decay) * param.data + self.decay * self.shadow[name]\n self.shadow[name] = new_average.clone()\n\n def apply_shadow(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n self.backup[name] = param.data\n param.data = self.shadow[name]\n\n def restore(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n assert name in self.backup\n param.data = self.backup[name]\n self.backup = {}\n\n\nclass AWP:\n def __init__(\n self,\n model,\n optimizer,\n adv_param=\"weight\",\n adv_lr=1,\n adv_eps=0.2,\n start_epoch=0,\n adv_step=1,\n device=None,\n scaler=None\n ):\n self.model = model\n self.optimizer = optimizer\n self.adv_param = adv_param\n self.adv_lr = adv_lr\n self.adv_eps = adv_eps\n self.start_epoch = start_epoch\n self.adv_step = adv_step\n self.device = device\n self.backup = {}\n self.backup_eps = {}\n\n def attack_backward(self, batch, epoch):\n if (self.adv_lr == 0) or (epoch < self.start_epoch):\n return None\n\n self._save() \n for i in range(self.adv_step):\n self._attack_step()\n input_ids = batch[\"input_ids\"].to(self.device)\n attention_mask = batch[\"attention_mask\"].to(self.device)\n ###\n video_input = batch[\"video_input\"].to(self.device)\n video_mask = batch[\"video_mask\"].to(self.device)\n ###\n labels = batch[\"labels\"].to(self.device)\n _, adv_loss = self.model(input_ids=input_ids, attention_mask=attention_mask, video_input=video_input, \\\n video_mask=video_mask, labels=labels)\n self.optimizer.zero_grad()\n adv_loss.backward()\n \n self._restore()\n\n def _attack_step(self):\n e = 1e-6\n for name, param in self.model.named_parameters():\n if param.requires_grad and param.grad is not None and self.adv_param in name:\n norm1 = torch.norm(param.grad)\n norm2 = torch.norm(param.data.detach())\n if norm1 != 0 and not torch.isnan(norm1):\n r_at = self.adv_lr * param.grad / (norm1 + e) * (norm2 + e)\n param.data.add_(r_at)\n param.data = torch.min(\n torch.max(param.data, self.backup_eps[name][0]), self.backup_eps[name][1]\n )\n # param.data.clamp_(*self.backup_eps[name])\n\n def _save(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad and param.grad is not None and self.adv_param in name:\n if name not in self.backup:\n self.backup[name] = param.data.clone()\n grad_eps = self.adv_eps * param.abs().detach()\n self.backup_eps[name] = (\n self.backup[name] - grad_eps,\n self.backup[name] + grad_eps,\n )\n\n def _restore(self,):\n for name, param in self.model.named_parameters():\n if name in self.backup:\n param.data = self.backup[name]\n self.backup = {}\n self.backup_eps = {}\n\n\nclass PGD():\n def __init__(self, model, emb_name=\"word_embeddings.\", epsilon=1.0, alpha=0.3):\n # emb_name这个参数要换成你模型中embedding的参数名\n self.model = model\n self.emb_name = emb_name\n self.epsilon = epsilon\n self.alpha = alpha\n self.emb_backup = {}\n self.grad_backup = {}\n\n def attack(self, is_first_attack=False):\n for name, param in self.model.named_parameters():\n if param.requires_grad and self.emb_name in name:\n if is_first_attack:\n self.emb_backup[name] = param.data.clone()\n norm = torch.norm(param.grad)\n if norm != 0:\n r_at = self.alpha * param.grad / norm\n param.data.add_(r_at)\n param.data = self.project(name, param.data, self.epsilon)\n\n def restore(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad and self.emb_name in name:\n assert name in self.emb_backup\n param.data = self.emb_backup[name]\n self.emb_backup = {}\n\n def project(self, param_name, param_data, epsilon):\n r = param_data - self.emb_backup[param_name]\n if torch.norm(r) > epsilon:\n r = epsilon * r / torch.norm(r)\n return self.emb_backup[param_name] + r\n\n def backup_grad(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad and param.grad is not None:\n self.grad_backup[name] = param.grad.clone()\n\n def restore_grad(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad and param.grad is not None:\n param.grad = self.grad_backup[name]\n\n\nclass F1_Loss(nn.Module):\n '''Calculate F1 score. Can work with gpu tensors\n \n The original implmentation is written by Michal Haltuf on Kaggle.\n \n Returns\n -------\n torch.Tensor\n `ndim` == 1. epsilon <= val <= 1\n \n Reference\n ---------\n - https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric\n - https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score\n - https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6\n - http://www.ryanzhang.info/python/writing-your-own-loss-function-module-for-pytorch/\n '''\n def __init__(self, num_labels=2, epsilon=1e-7):\n super().__init__()\n self.epsilon = epsilon\n self.num_labels = num_labels\n \n def forward(self, y_pred, y_true):\n assert y_pred.ndim == 2\n assert y_true.ndim == 1\n y_true = F.one_hot(y_true, self.num_labels).to(torch.float32)\n y_pred = F.softmax(y_pred, dim=1)\n \n tp = (y_true * y_pred).sum(dim=0).to(torch.float32)\n tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).to(torch.float32)\n fp = ((1 - y_true) * y_pred).sum(dim=0).to(torch.float32)\n fn = (y_true * (1 - y_pred)).sum(dim=0).to(torch.float32)\n\n precision = tp / (tp + fp + self.epsilon)\n recall = tp / (tp + fn + self.epsilon)\n\n f1 = 2* (precision*recall) / (precision + recall + self.epsilon)\n f1 = f1.clamp(min=self.epsilon, max=1-self.epsilon)\n return 1 - f1.mean()\n\n# f1_loss = F1_Loss().cuda()\n\n#######################################################################################################\nfrom category_id_map import category_id_to_lv2id, lv2id_to_lv1id\n\n\n@timer\ndef prepare_training_data(data_path, tokenizer, fix_length=256):\n with open(data_path, 'r', encoding='utf8') as f:\n anns = json.load(f)\n # anns = anns[:1000]\n training_samples = []\n for item in anns:\n vid = item[\"id\"]\n title = item[\"title\"]\n asr = item[\"asr\"]\n ocr = \"\"\n for o in item[\"ocr\"]:\n ocr += o[\"text\"]\n input_t = tokenizer.encode_plus(\n title,\n add_special_tokens=False,\n ).input_ids\n input_a = tokenizer.encode_plus(\n asr,\n add_special_tokens=False,\n ).input_ids\n input_o = tokenizer.encode_plus(\n ocr,\n add_special_tokens=False,\n ).input_ids\n while len(input_t) + len(input_a) + len(input_o) + 5 >= fix_length:\n if len(input_t) >= len(input_a) and len(input_t) >= len(input_o):\n input_t.pop()\n elif len(input_a) >= len(input_t) and len(input_a) >= len(input_o):\n input_a.pop()\n # del(input_a[0])\n else:\n input_o.pop()\n input_ids = [tokenizer.cls_token_id] + [tokenizer.convert_tokens_to_ids(\"[T]\")] + input_t + [tokenizer.convert_tokens_to_ids(\"[A]\")] \\\n + input_a + [tokenizer.convert_tokens_to_ids(\"[O]\")] + input_o + [tokenizer.sep_token_id]\n # logging.info(len(input_ids))\n attention_mask = [1] * len(input_ids)\n labels = category_id_to_lv2id(item[\"category_id\"])\n training_samples.append({\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"vid\": vid,\n \"labels\": labels,\n \"fold\": int(item[\"fold\"]),\n })\n # assert len(input_ids) == fix_length\n return training_samples\n\n\ndef fetch_score(predictions, labels):\n # prediction and labels are all level-2 class ids\n\n lv1_preds = [lv2id_to_lv1id(lv2id) for lv2id in predictions]\n lv1_labels = [lv2id_to_lv1id(lv2id) for lv2id in labels]\n\n lv2_f1_micro = f1_score(labels, predictions, average='micro')\n lv2_f1_macro = f1_score(labels, predictions, average='macro')\n lv1_f1_micro = f1_score(lv1_labels, lv1_preds, average='micro')\n lv1_f1_macro = f1_score(lv1_labels, lv1_preds, average='macro')\n mean_f1 = (lv2_f1_macro + lv1_f1_macro + lv1_f1_micro + lv2_f1_micro) / 4.0\n\n eval_results = {'lv1_acc': accuracy_score(lv1_labels, lv1_preds),\n 'lv2_acc': accuracy_score(labels, predictions),\n 'lv1_f1_micro': lv1_f1_micro,\n 'lv1_f1_macro': lv1_f1_macro,\n 'lv2_f1_micro': lv2_f1_micro,\n 'lv2_f1_macro': lv2_f1_macro,\n 'mean_f1': mean_f1}\n return eval_results\n\n\n@timer\ndef prepare_testing_data(data_path, tokenizer, fix_length=256):\n with open(data_path, 'r', encoding='utf8') as f:\n anns = json.load(f)\n training_samples = []\n for item in anns:\n vid = item[\"id\"]\n title = item[\"title\"]\n asr = item[\"asr\"]\n ocr = \"\"\n for o in item[\"ocr\"]:\n ocr += o[\"text\"]\n input_t = tokenizer.encode_plus(\n title,\n add_special_tokens=False,\n ).input_ids\n input_a = tokenizer.encode_plus(\n asr,\n add_special_tokens=False,\n ).input_ids\n input_o = tokenizer.encode_plus(\n ocr,\n add_special_tokens=False,\n ).input_ids\n while len(input_t) + len(input_a) + len(input_o) + 5 >= fix_length:\n if len(input_t) >= len(input_a) and len(input_t) >= len(input_o):\n input_t.pop()\n elif len(input_a) >= len(input_t) and len(input_a) >= len(input_o):\n input_a.pop()\n # del(input_a[0])\n else:\n input_o.pop()\n input_ids = [tokenizer.cls_token_id] + [tokenizer.convert_tokens_to_ids(\"[T]\")] + input_t + [tokenizer.convert_tokens_to_ids(\"[A]\")] \\\n + input_a + [tokenizer.convert_tokens_to_ids(\"[O]\")] + input_o + [tokenizer.sep_token_id]\n # logging.info(len(input_ids))\n attention_mask = [1] * len(input_ids)\n training_samples.append({\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"vid\": vid,\n \"fold\": int(item[\"fold\"]),\n })\n # assert len(input_ids) == fix_length\n return training_samples\n\n\n@timer\ndef prepare_predict_data(data_path, tokenizer, fix_length=256):\n with open(data_path, 'r', encoding='utf8') as f:\n anns = json.load(f)\n training_samples = []\n for item in anns:\n vid = item[\"id\"]\n title = item[\"title\"]\n asr = item[\"asr\"]\n ocr = \"\"\n for o in item[\"ocr\"]:\n ocr += o[\"text\"]\n input_t = tokenizer.encode_plus(\n title,\n add_special_tokens=False,\n ).input_ids\n input_a = tokenizer.encode_plus(\n asr,\n add_special_tokens=False,\n ).input_ids\n input_o = tokenizer.encode_plus(\n ocr,\n add_special_tokens=False,\n ).input_ids\n while len(input_t) + len(input_a) + len(input_o) + 5 >= fix_length:\n if len(input_t) >= len(input_a) and len(input_t) >= len(input_o):\n input_t.pop()\n elif len(input_a) >= len(input_t) and len(input_a) >= len(input_o):\n input_a.pop()\n else:\n input_o.pop()\n input_ids = [tokenizer.cls_token_id] + [tokenizer.convert_tokens_to_ids(\"[T]\")] + input_t + [tokenizer.convert_tokens_to_ids(\"[A]\")] \\\n + input_a + [tokenizer.convert_tokens_to_ids(\"[O]\")] + input_o + [tokenizer.sep_token_id]\n # logging.info(len(input_ids))\n attention_mask = [1] * len(input_ids)\n training_samples.append({\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"vid\": vid,\n })\n # assert len(input_ids) == fix_length\n return training_samples","repo_name":"TelmaZzzz/WXData","sub_path":"src/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":15885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"30750262986","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycle(self, head: Optional[ListNode]) -> bool:\n if not head or not head.next:\n return False\n\n fast = head\n slow = head\n\n while True:\n if not fast.next or not fast.next.next:\n return False\n\n fast = fast.next.next\n slow = slow.next\n\n if fast == slow:\n return True\n \n\n\"\"\"\nidea:\nfast pointer jumps by 2, slow jumps by one.\nif fast and slow ever meet there is a cycle\nif fast.next or fast.next.next is ever None there is no cycle\n\"\"\"","repo_name":"darakcheev00/Leetcode-Blind75","sub_path":"easy/141_linked_list_cycle.py","file_name":"141_linked_list_cycle.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43399135318","text":"#!/usr/bin/python3\n\"\"\" This is a say my name module \"\"\"\n\n\ndef say_my_name(first_name, last_name=\"\"):\n \"\"\"\n Print the given first and last name.\n\n Args:\n first_name (str): The first name.\n last_name (str, optional): The last name. Defaults to an empty string.\n\n Raises:\n TypeError: If `first_name` is not a string.\n TypeError: If `last_name` is not a string.\n\n Prints:\n The message \"My name is \". If `last_name` is not provided,\n the message will only contain the first name.\n\n \"\"\"\n if not isinstance(first_name, str):\n raise TypeError('first_name must be a string')\n if not isinstance(last_name, str):\n raise TypeError('last_name must be a string')\n print('My name is', first_name, last_name)\n\n","repo_name":"William9701/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/3-say_my_name.py","file_name":"3-say_my_name.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26466104426","text":"from django.db import transaction\nfrom .models import *\nfrom django.db.models import F\n\ndef trasactionfunc(totrasaction):\n try:\n with transaction.atomic():\n Audio.objects.filter(id__in=totrasaction['audio']).update(counter=F('counter') + 1)\n Video.objects.filter(id__in=totrasaction['video']).update(counter=F('counter') + 1)\n Text.objects.filter(id__in=totrasaction['text']).update(counter=F('counter') + 1)\n except:\n transaction.rollback()\n else:\n return True\n return False","repo_name":"lightarhont/project11","sub_path":"api/trasactionfunc.py","file_name":"trasactionfunc.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18246322528","text":"from os import path\n\nimport pandas as pd\nfrom ray import air\nfrom ray import tune\nfrom scvi.model import SCVI\n\nfrom utils_SCVI import get_data\n\n\ndef grid_search(params, device):\n '''\n Function to start grid search\n on GPU\n '''\n \n params['device'] = device\n \n reporter = tune.CLIReporter(max_report_frequency=60)\n \n # Starts grid search using RayTune\n tuner = tune.Tuner(tune.with_resources(trainable,\n {\"cpu\":2, \"gpu\":1}), \n param_space = params, \n tune_config = tune.tune_config.TuneConfig(reuse_actors = False),\n run_config=air.RunConfig(name=params['gene_likelihood'], verbose=1, progress_reporter=reporter))\n \n results = tuner.fit()\n \n\ndef trainable(config_dict):\n '''\n Function to load data\n and train SCVI model\n reporting train and validation loss\n '''\n \n # Load data\n train_data, val_data = get_data(config_dict['data_path'], config_dict['multi_batch'])\n \n train_data = train_data.copy()\n \n # Set up data\n SCVI.setup_anndata(train_data, layer=\"counts\", batch_key=\"batch\")\n \n model = SCVI(train_data, n_hidden=config_dict['n_hidden'], n_latent=config_dict['n_latent'], \n n_layers=config_dict['n_layers'], dropout_rate=config_dict['dropout_rate'], \n gene_likelihood=config_dict['gene_likelihood'], latent_distribution=config_dict['latent_distribution'])\n \n # Train model\n model.train(max_epochs=config_dict['max_epochs'], use_gpu=config_dict['use_gpu'], \n train_size=config_dict['train_size'], batch_size=config_dict['batch_size'], early_stopping=config_dict['early_stopping'])\n \n # Save train and validation loss\n config_dict['train_loss'] = model.history['reconstruction_loss_train'].iloc[-1].reconstruction_loss_train\n \n config_dict['val_loss'] = model.get_reconstruction_error(val_data)['reconstruction_loss']\n \n config_dict.pop('device')\n file_path = config_dict['file_path']\n config_dict.pop('file_path')\n config_dict.pop('data_path')\n config_dict.pop('multi_batch')\n \n print(model.history['reconstruction_loss_train'])\n\n # Save results of config on dataframe\n for key, value in config_dict.items():\n config_dict[key] = [config_dict[key]]\n \n df = pd.DataFrame(config_dict) \n \n # Store results (if file already exists, append the results otherwise create the .csv file)\n df.to_csv(file_path, mode='a', sep='#', index=False, \n header=False if path.exists(file_path) else True)\n \n","repo_name":"Jek9884/CHL-CellEncoder","sub_path":"SCVI/grid_SCVI.py","file_name":"grid_SCVI.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"14901805182","text":"from django.shortcuts import render, render_to_response\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom mongo_models import *\nimport pymongo\n\n\n# Create your views here.\ndef index(request):\n\n # query all authors in databse\n # primary sort key should go last!\n author_list = connection.Author.find().sort('firstname').sort('lastname')\n return render(request, 'index.html', {\"author_list\": author_list})\n\n\ndef add_author(request):\n\n a = connection.Author()\n a.firstname = request.GET.get(\"firstname\")\n a.lastname = request.GET.get(\"lastname\")\n\n # make list of genres\n genres = request.GET.get(\"genres\").split(',')\n a.genres = genres\n\n # make list of phone numbers to list\n number_1 = request.GET.get('number_1')\n type_1 = request.GET.get('type_1')\n number_2 = request.GET.get('number_2')\n type_2 = request.GET.get('type_2')\n phone = [{type_1: number_1}, {type_2: number_2}]\n\n a.phone = phone\n a.save()\n\n return HttpResponseRedirect(reverse('mongo:index'))\n\n\ndef show_author(request, author_id):\n # get author object\n author = connection.Author.one({\"_id\": to_mongo_id(author_id)})\n # now get related publications (note should really be an embedded field but this is for demo purposes\n pubs = connection.Publication.find({'author': author_id})\n pubs = to_django_context(pubs)\n\n request.session['author_id'] = author_id\n return render(request, 'author.html', {\"author\": author, \"publications\": pubs})\n\n\ndef edit_author(request, author_id):\n #get the author object\n a = connection.Author.one({\"_id\": to_mongo_id(author_id)})\n request.session['author_id'] = author_id\n\n if(request.method=='POST'):\n #a.pop('_id')\n a.firstname = request.POST.get(\"firstname\")\n a.lastname = request.POST.get(\"lastname\")\n\n # make list of genres\n genres = request.POST.get(\"genres\").split(',')\n a.genres = genres\n\n # make list of phone numbers to list\n number_1 = request.POST.get('number_1')\n type_1 = request.POST.get('type_1')\n number_2 = request.POST.get('number_2')\n type_2 = request.POST.get('type_2')\n phone = [{type_1: number_1}, {type_2: number_2}]\n\n a.phone = phone\n a.save()\n return HttpResponseRedirect(reverse('mongo:index'))\n\n return render(request, 'edit_author.html', {\"author\": a})\n\n\ndef delete_author(request, author_id):\n a = connection.Author.one({\"_id\": to_mongo_id(author_id)})\n # now delete related publications\n for pub in connection.Publication.find({\"author\": author_id}):\n pub.delete()\n # now delete author\n a.delete()\n\n return HttpResponseRedirect(reverse('mongo:index'))\n\n\ndef add_address(request):\n a = connection.Author.one({\"_id\": to_mongo_id(request.session['author_id'])})\n x = {'number': request.GET['house_number'], 'street': request.GET['street'], 'town': request.GET['town']}\n a.address.append(x)\n a.save()\n return HttpResponseRedirect(reverse('mongo:show_author', kwargs={'author_id': request.session['author_id']}))\n\ndef add_publication(request):\n author_id = request.GET.get('author_id')\n pub = connection.Publication()\n pub.author = author_id\n pub.title = request.GET.get('title')\n pub.save()\n\n return HttpResponseRedirect(reverse('mongo:show_author', kwargs={'author_id': author_id}))\n\n\ndef delete_publication(request, publication_id):\n connection.Publication.one({\"_id\": to_mongo_id(publication_id)}).delete()\n\n return HttpResponseRedirect(reverse('mongo:show_author', kwargs={'author_id': request.session['author_id']}))","repo_name":"shaw2thefloor/mongo_db_testing","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39145274235","text":"health = int(input())\ncommand = input()\n\ninitial_health = health\nV = []\nremaining_health = 0\n\nwhile command != \"end\":\n virus = command\n v_power = sum(list(map(ord, virus))) // 3\n defeat_time = v_power * len(virus) #seconds\n if V.count(virus) == 1:\n defeat_time //= 3\n V.append(virus)\n health -= defeat_time\n print(f'Virus {virus}: {v_power} => {defeat_time} seconds')\n\n if health > 0:\n print(f\"{virus} defeated in {defeat_time // 60}m {defeat_time % 60}s.\")\n print(f'Remaining health: {int(health)}')\n health = int(health * 1.2)\n else:\n print(\"Immune System Defeated.\")\n break\n\n if health > initial_health:\n health = initial_health\n remaining_health = health\n\n command = input()\nelse:\n print(f\"Final Health: {remaining_health:.0f}\")\n","repo_name":"simonen/PythonFundamentals","sub_path":"Dictionaries/Dictionaries and Lists - More Exercises/03. Immune System.py","file_name":"03. Immune System.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3567936662","text":"#git@github.com:ALLVIS-II/Project-week-7.git\n\nitems = []\nitem = input(\"Enter an item('STOP' to quit):\")\nwhile item.upper() != 'STOP':\n while item.capitalize() in items:\n print('Error:', item, 'already entered.')\n item = input('Enter another item:')\n if item.upper() != 'STOP':\n items.append(item.capitalize())\n if len(items) == 1:\n print('You have 1 item')\n else:\n print('You have', len(items), 'items')\n item = input(\"Enter an item('STOP' to quit):\")\n num = 1\n for item in items:\n print('' + str(num)+'.'+ item)\n num += 1\n","repo_name":"ALLVIS-II/Project-week-7","sub_path":"week 7 group project.py","file_name":"week 7 group project.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32448763891","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.db import IntegrityError\n# from django.contrib.auth.views import auth_login\nfrom cart.models import Cart\nfrom profile.models import Profile\nfrom profile.forms import ProfileEditForm, UserEmailNameForm,\\\n ProfileCreateForm, UserCreateForm, UserAuthenticationLoginForm\n# import re\n\n\"\"\"\ndef profile_login(request):\n if request.method == 'POST':\n user_auth_form = UserAuthenticationLoginForm(request.POST)\n if user_auth_form.is_valid():\n user_auth = user_auth_form.cleaned_data\n user = authenticate(request, username=user_auth['username'], password=user_auth['password'])\n if user is not None:\n login(request, user)\n return redirect('shop:index')\n messages.add_message(request, messages.ERROR, 'username or password is wrong!')\n else:\n user_auth_form = UserAuthenticationLoginForm()\n\n return render(request, 'profile_login.html', {'user_auth_form': user_auth_form})\n\"\"\"\n\n\ndef user_login_signup(request):\n if request.method == 'POST':\n user_create_form = UserCreateForm()\n user_auth_form = UserAuthenticationLoginForm()\n\n else:\n print('before form')\n user_create_form = UserCreateForm()\n user_auth_form = UserAuthenticationLoginForm()\n\n return render(request, 'profile/templates/profile_login.html', context={\n 'user_create_form': user_create_form,\n 'user_auth_form': user_auth_form,\n })\n\n\ndef user_signup(request):\n if request.method == 'POST':\n user_create_form = UserCreateForm(data=request.POST)\n\n if user_create_form.is_valid():\n try:\n new_user = User.objects.create(\n username=user_create_form.cleaned_data['username'],\n password=user_create_form.cleaned_data['password1'],\n email=user_create_form.cleaned_data['email']\n )\n except IntegrityError:\n messages.add_message(request, messages.ERROR, 'نام کاربری مورد نظر شما قبلا ثبت شده')\n return redirect('profile:login_signup')\n\n new_profile = Profile.objects.create(user=new_user)\n try:\n Cart.objects.create(profile=new_profile)\n except IntegrityError:\n # Cart.objects.get(profile=new_profile)\n pass\n\n login(request, new_user, backend='django.contrib.auth.backends.ModelBackend')\n return redirect('profile:profile_view', username=new_user.username)\n\n else:\n messages.add_message(request, messages.ERROR, 'اطلاعات را به درستی وارد کنید')\n return redirect('profile:login_signup')\n\n\ndef user_login(request):\n if request.method == 'POST':\n user_auth_form = UserAuthenticationLoginForm(data=request.POST)\n\n if user_auth_form.is_valid():\n user_auth = user_auth_form.cleaned_data\n print(user_auth)\n# user = authenticate(request, username=user_auth['username_or_email_login'], password=user_auth['password'])\n try:\n user = User.objects.get(username=user_auth['username_or_email_login'], password=user_auth['password'])\n print(user)\n except User.DoesNotExist:\n messages.add_message(request, messages.INFO, 'نام کاربری یا رمز عبور اشتباه است')\n return redirect('profile:login_signup')\n\n user = User.objects.get(username=user_auth['username_or_email_login'], password=user_auth['password'])\n print(user)\n\n if user is not None:\n login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n return redirect('shop:index')\n\n try:\n user = User.objects.get(email=user_auth['username_or_email_login'])\n login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n return redirect('shop:index')\n\n except User.DoesNotExist:\n messages.add_message(request, messages.ERROR, 'اطلاعات وارد شده اشتباه است!')\n return redirect('profile:login_signup')\n else:\n return redirect('profile:login_signup')\n\n\ndef profile_login(request):\n pass\n\n\n@login_required\ndef profile_view(request, username=None):\n return render(request, 'profile/templates/profile_view.html')\n\n\n@login_required\ndef profile_edit(request, username):\n initial_profile_form_data = {\n 'phone': Profile.phone,\n 'address': Profile.address,\n 'picture': Profile.picture,\n }\n # email_regex = '^(\\w|\\.|\\_|\\-)+[@](\\w|\\_|\\-|\\.)+[.]\\w{2,3}$'\n\n if request.method == 'POST':\n profile_edit_form = ProfileEditForm(data=request.POST, files=request.FILES)\n user_email_name_form = UserEmailNameForm(data=request.POST)\n print('error: ', user_email_name_form.errors, ' ', profile_edit_form.errors)\n if profile_edit_form.is_valid() and user_email_name_form.is_valid():\n current_user = User.objects.get(username=username)\n current_user.first_name = user_email_name_form.cleaned_data['first_name']\n current_user.last_name = user_email_name_form.cleaned_data['last_name']\n current_user.email = user_email_name_form.cleaned_data['email']\n try:\n current_profile = current_user.profile\n current_profile.phone = profile_edit_form.cleaned_data['phone']\n current_profile.address = profile_edit_form.cleaned_data['address']\n current_profile.picture = profile_edit_form.cleaned_data['picture']\n except Profile.DoesNotExist:\n current_profile = Profile.objects.create(user=current_user,\n phone=profile_edit_form.cleaned_data['phone'],\n address=profile_edit_form.cleaned_data['address'],\n picture=profile_edit_form.cleaned_data['picture'])\n current_profile.save()\n current_user.save()\n print('all saved')\n try:\n Cart.objects.get(profile=current_profile)\n except Cart.DoesNotExist:\n Cart.objects.create(profile=current_profile)\n\n return redirect('profile:profile_view', username=current_user.username)\n\n else:\n try:\n request.user.profile\n profile_edit_form = ProfileEditForm(initial=initial_profile_form_data)\n except Profile.DoesNotExist:\n profile_edit_form = ProfileEditForm()\n # email_form = EmailUserForm({'email': username}) if re.search(email_regex, username) else EmailUserForm()\n user = User.objects.get(username=username)\n\n user_email_name_form = UserEmailNameForm(initial={\n 'email': user.email if user.email else '',\n 'first_name': user.first_name if user.first_name else '',\n 'last_name': user.last_name if user.last_name else ''\n })\n \"\"\"\n else:\n user_email_name_form = UserEmailNameForm()\n \"\"\"\n\n return render(request, 'profile/templates/profile_edit.html', context={'profile_edit_form': profile_edit_form,\n 'email_form': user_email_name_form})\n\n\n@login_required\ndef user_logout(request, username=None):\n logout(request)\n return redirect('shop:index')\n","repo_name":"ebikdeli/electroshop","sub_path":"profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"36533079721","text":"class Solution:\r\n def findKthLargest(self, nums: List[int], k: int) -> int:#without sorting\r\n def quickselect(l, r, k):\r\n if l == r:\r\n return nums[l]\r\n pivot = random.randint(l, r)\r\n pivot = partition(l, r, pivot)\r\n if k == pivot:\r\n return nums[k]\r\n elif k < pivot:\r\n return quickselect(l, pivot - 1, k)\r\n else:\r\n return quickselect(pivot + 1, r, k)\r\n def partition(l, r, pivot):\r\n pivot_val = nums[pivot]\r\n nums[pivot], nums[r] = nums[r], nums[pivot]\r\n store_index = l\r\n for i in range(l, r):\r\n if nums[i] < pivot_val:\r\n nums[store_index], nums[i] = nums[i], nums[store_index]\r\n store_index += 1\r\n nums[store_index], nums[r] = nums[r], nums[store_index]\r\n return store_index\r\n return quickselect(0, len(nums) - 1, len(nums) - k)\r\n","repo_name":"Alex4210987/leetcode-solutions","sub_path":"215.py","file_name":"215.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72656562417","text":"import matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport constants\n\n'''\nValidate the card number based on:\n1. the Luhn algorithm for Mastercards\n2. the special formatting rules for PLCC cards\n'''\ndef validate_card_num(card_num):\n # Replace all spaces to avoid inconsistency\n card_num = card_num.replace(\" \", \"\")\n\n # Checks if the card number satisfies the conditions for each credit card type\n found_mastercard = len(card_num) == 16 and is_mastercard_num(card_num)\n found_plcc = len(card_num) == 11 and is_plcc_num(card_num)\n\n if found_mastercard:\n return card_num, True, constants.MASTERCARD\n elif found_plcc:\n return card_num, True, constants.PLCC\n else:\n return card_num, False, constants.INVALID\n\n'''\nRuns the Luhn algorithm on the card number\nto see if the credit card is a Mastercard.\n'''\ndef is_mastercard_num(card_num):\n try:\n digits = list(map(int, card_num))\n except:\n return False\n\n evens = sum(digit for digit in digits[-1::-2])\n odds = sum(constants.LUHN_ODD_LOOKUP[digit] for digit in digits[-2::-2])\n\n return (evens + odds) % 10 == 0\n\n'''\nChecks the special formatting rules for the PLCC\nto see if the credit card is a PLCC.\n\n(For now, this is just checking if all the characters in the\ninput string are digits)\n'''\ndef is_plcc_num(card_num):\n try:\n s = list(map(int, card_num))\n except:\n return False\n\n return True\n\n'''\nApply OpenCV image operations to prepare the credit card image\nto be fed into Google Tesseract.\n'''\ndef prepare_img_for_tesseract(img):\n # Apply Otsu binary thresholding to create a black-white img.\n ret, img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)\n\n # Now the card text is white on a black background.\n # Invert the image so that the text is black on a white background.\n img = 255 - img\n\n # Apply Gaussian filtering to remove image noise.\n kernel_size = 3\n kernel = np.ones((kernel_size, kernel_size), np.float32) / \\\n kernel_size/kernel_size\n img = cv2.filter2D(img, -1, kernel)\n\n return img\n\n'''\nLoads an image based on the given image filename\nas a grayscale image.\n'''\ndef load_img_as_grayscale(img_file):\n return cv2.imread(img_file, 0)\n\n'''\nVerifies that the credit card is oriented correctly (ie. not rotated sideways). \nIf not, rotate the image so that the credit card number\nis upright. (see images in the images folder for examples of rotated card images)\n'''\ndef verify_card_orientation(img):\n rows, cols = img.shape[0:2]\n if rows > cols:\n return np.rot90(img)\n else:\n return img\n\n'''\nDraws the image so that as the credit card image is processed,\nyou can see how it looks in each step.\n'''\ndef draw(img):\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n plt.imshow(img)\n plt.show()","repo_name":"brian-yang/RecognizeJCPCards","sub_path":"python/short-term/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70123480820","text":"# Source: https://youtu.be/3Q_oYDQ2whs\n# Company: Google\n# Question: 2 persons calender is given, and their working hours.\n# Search thru 2 persons calender and find available hours for 2 person to have a meeting.\n\n# Details: 1 persons ending time can be accepted such as if it ends in [10:00,11.30], meeting can start at 11.30. It is military time. Calendar is sorted.\n\ninputPerson1 = [[[\"10:00\",\"11:30\"],[\"12:30\",\"13:00\"],[\"14:00\",\"15:30\"],[\"15:30\",\"16:45\"]],[\"10:00\",\"18:00\"]]\ninputPerson2 = [[[\"9:15\",\"10:15\"],[\"12:15\",\"13:30\"],[\"14:20\",\"16:30\"],[\"16:45\",\"17:15\"]],[\"9:00\",\"17:30\"]]\n\n#Answer:\n\ndef ReturnAvailableMeeting(inputPerson1,inputPerson2):\n calendarPerson1 = inputPerson1[0]\n calendarPerson2 = inputPerson2[0]\n\n shiftHourPerson1 = inputPerson1[-1]\n shiftHourPerson2 = inputPerson2[-1]\n\n def FindEmptySpace(calendar,shift):\n emptySpaces=[]\n\n if calendar[0][0] != shift[0]:\n emptySpaces.append([float(shift[0].replace(\":\",\".\")),float(calendar[0][0].replace(\":\",\".\"))])\n\n for index,time in enumerate(calendar):\n if (index == len(calendar)-1) :\n emptySpaces.append([float(calendar[index][-1].replace(\":\",\".\")),float(shift[-1].replace(\":\",\".\"))])\n else:\n emptySpaces.append([float(calendar[index][-1].replace(\":\",\".\")),float(calendar[index+1][0].replace(\":\",\".\"))])\n\n if calendar[index][-1] == shift[-1]:\n emptySpaces.pop() \n\n return emptySpaces\n \n person1Empty = FindEmptySpace(calendarPerson1,shiftHourPerson1)\n person2Empty = FindEmptySpace(calendarPerson2,shiftHourPerson2)\n\n def ArrangeMeeting(p1, p2):\n available = []\n\n for x in p1:\n start1, end1 = x\n for y in p2:\n start2, end2 = y\n if (start1 <= start2 and end1 > start2) or (start1 < end2 and end1 >= end2) or (start1 >= start2 and end1 <= end2):\n overlap_start = max(start1, start2)\n overlap_end = min(end1, end2)\n available.append([overlap_start, overlap_end])\n\n formatted = []\n\n for t in available:\n starttime = str(t[0]).replace(\".\",\":\")\n endtime = str(t[-1]).replace(\".\",\":\")\n\n if len(starttime) == 4:\n starttime = starttime+\"0\"\n if len(endtime) == 4:\n endtime = endtime+\"0\"\n formatted.append([starttime,endtime])\n\n return formatted\n \n return ArrangeMeeting(person1Empty,person2Empty)\n\nresult = ReturnAvailableMeeting(inputPerson1,inputPerson2)\nprint(result)","repo_name":"PlatinMavi/InterviewQuestions","sub_path":"AvaibleMeetingTime/meetingArranger.py","file_name":"meetingArranger.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38402328444","text":"\"\"\"Create Pandas dataframes from features. This must be imported by absolute\nmodule (:mod:`zensols.nlp.dataframe`).\n\n\"\"\"\n__author__ = 'Paul Landes'\n\n\nfrom typing import Set, List, Tuple\nfrom dataclasses import dataclass, field\nimport pandas as pd\nfrom zensols.nlp import FeatureToken, FeatureDocument\n\n\n@dataclass\nclass FeatureDataFrameFactory(object):\n \"\"\"Creates a Pandas dataframe of features from a document annotations. Each\n feature ID is given a column in the output :class:`pandas.DataFrame`.\n\n \"\"\"\n token_feature_ids: Set[str] = field(default=FeatureToken.FEATURE_IDS)\n \"\"\"The feature IDs to add to the :class:`pandas.DataFrame`.\"\"\"\n\n priority_feature_ids: Tuple[str, ...] = field(\n default=FeatureToken.WRITABLE_FEATURE_IDS)\n \"\"\"Feature IDs that are used first in the column order in the output\n :class:`pandas.DataFrame`.\n\n \"\"\"\n def __call__(self, doc: FeatureDocument) -> pd.DataFrame:\n fids = self.token_feature_ids\n cols: List[str] = list(filter(lambda n: n in fids,\n self.priority_feature_ids))\n cols.extend(sorted(fids - set(cols)))\n rows = []\n for six, sent in enumerate(doc.sents):\n for tok in sent:\n feats = tok.asdict()\n rows.append(tuple(map(lambda f: feats.get(f), cols)))\n return pd.DataFrame(rows, columns=cols)\n","repo_name":"plandes/nlparse","sub_path":"src/python/zensols/nlp/dataframe.py","file_name":"dataframe.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"22855583800","text":"from PySide6.QtWidgets import *\nfrom PySide6.QtCore import *\nfrom PySide6.QtGui import *\n\nclass Revenue(QObject):\n chargersChanged = Signal()\n retailToFacilityChanged = Signal()\n totalChanged = Signal()\n\n def __init__(self):\n super().__init__()\n self.chargers_: float = 0\n self.retail_to_facility: float = 0\n self.total_:float = self.chargers_ + self.retail_to_facility\n\n\n def emitUpdateSignals(self): \n self.chargersChanged.emit()\n self.retailToFacilityChanged.emit()\n self.totalChanged.emit()\n\n @Property(float, notify=chargersChanged) #getter\n def chargers(self) -> float:\n return self.chargers_\n\n @chargers.setter\n def chargers(self, chargers:float) -> None:\n if self.chargers_ != chargers:\n self.chargers_ = chargers\n self.chargersChanged.emit()\n\n @Property(float, notify=retailToFacilityChanged) #getter\n def retailToFacility(self) -> float:\n return self.retail_to_facility\n\n @retailToFacility.setter\n def retailToFacility(self, retail_to_facility:float) -> None:\n if self.retail_to_facility != retail_to_facility:\n self.retail_to_facility = retail_to_facility\n self.retailToFacilityChanged.emit()\n\n @Property(float, notify=totalChanged) #getter\n def total(self) -> float:\n return self.total_\n\n @total.setter\n def total(self, total_:float) -> None:\n if self.total_ != total_:\n self.total_ = total_\n self.totalChanged.emit() ","repo_name":"Jevan-National-University-of-Singapore/MicrogridEconomyDashboard","sub_path":"src/Scenario/Year/Financial/Summary/EbitdaSection/Revenue/Revenue.py","file_name":"Revenue.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"28948689280","text":"import settings\nimport redis\n\n\nredis = redis.Redis(host='redis', port='6379')\n\n\nclass Cart:\n PREFIX = settings.CART_PREFIX\n EXPIRE = settings.CART_EXPIRE\n\n @classmethod\n def add_to_cart(cls, **kwargs):\n user_id = kwargs.get('user_id')\n product_id = kwargs.get('product_id')\n cart_name = f'{cls.PREFIX}_{user_id}_{product_id}'\n\n if redis.exists(cart_name):\n redis.hincrby(cart_name, 'quantity', kwargs['quantity'])\n\n else:\n [redis.hset(cart_name, field, value)\n for field, value in kwargs.items()]\n redis.expire(cart_name, cls.EXPIRE)\n\n return 'cart added/changed'\n\n @classmethod\n def get_cart(cls, user_id):\n cart = []\n for user_carts in redis.scan_iter(f'{cls.PREFIX}_{user_id}_*'):\n data = {key.decode('utf-8'): value.decode('utf-8')\n for key, value in redis.hgetall(user_carts).items()}\n cart.append(data)\n \n return cart\n \n @classmethod\n def delete_product(cls, user_id, product_id):\n cart_name = f'{cls.PREFIX}_{user_id}_{product_id}'\n return redis.delete(cart_name) \n \n @classmethod\n def delete_cart(cls, user_id):\n for user_carts in redis.scan_iter(f'{cls.PREFIX}_{user_id}_*'):\n redis.delete(user_carts) \n","repo_name":"MirYounes/fastapi-shop","sub_path":"cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"14012098514","text":"from django.http import HttpResponse\nfrom django.template import loader\n\n\ndef exchange_view_render(\n template_name: str, response, request, **kwargs\n) -> HttpResponse:\n \"\"\"\n Helper for extra views in account\n :param template_name: Template name to render\n :type template_name: str\n :param response: View parameter\n :param request: View parameter\n :param kwargs: View parameter\n :return: HTTP render view\n :rtype: HttpResponse\n \"\"\"\n template = loader.get_template(template_name)\n context = {\"con\": response, **kwargs}\n return HttpResponse(template.render(context, request))\n","repo_name":"guanana/vanir","sub_path":"vanir/core/account/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"23176836921","text":"\"\"\" Test functions for stats module\n\n WRITTEN BY LOUIS LUANGKESORN FOR THE STATS MODULE\n BASED ON WILKINSON'S STATISTICS QUIZ\n https://www.stanford.edu/~clint/bench/wilk.txt\n\n Additional tests by a host of SciPy developers.\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport warnings\nfrom collections import namedtuple\n\nfrom numpy.testing import (assert_, assert_equal,\n assert_almost_equal, assert_array_almost_equal,\n assert_array_equal, assert_approx_equal,\n assert_allclose)\nimport pytest\nfrom pytest import raises as assert_raises\nfrom scipy._lib._numpy_compat import suppress_warnings\nimport numpy.ma.testutils as mat\nfrom numpy import array, arange, float32, float64, power\nimport numpy as np\n\nimport scipy.stats as stats\nimport scipy.stats.mstats as mstats\nimport scipy.stats.mstats_basic as mstats_basic\nfrom scipy._lib._version import NumpyVersion\nfrom scipy._lib.six import xrange\nfrom .common_tests import check_named_results\nfrom scipy.special import kv\nfrom scipy.sparse.sputils import matrix\nfrom scipy.integrate import quad\n\n\"\"\" Numbers in docstrings beginning with 'W' refer to the section numbers\n and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are\n considered to be essential functionality. True testing and\n evaluation of a statistics package requires use of the\n NIST Statistical test data. See McCoullough(1999) Assessing The Reliability\n of Statistical Software for a test methodology and its\n implementation in testing SAS, SPSS, and S-Plus\n\"\"\"\n\n# Datasets\n# These data sets are from the nasty.dat sets used by Wilkinson\n# For completeness, I should write the relevant tests and count them as failures\n# Somewhat acceptable, since this is still beta software. It would count as a\n# good target for 1.0 status\nX = array([1,2,3,4,5,6,7,8,9], float)\nZERO = array([0,0,0,0,0,0,0,0,0], float)\nBIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,\n 99999998,99999999], float)\nLITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,\n 0.99999997,0.99999998,0.99999999], float)\nHUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float)\nTINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float)\nROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float)\n\nclass TestTrimmedStats(object):\n # TODO: write these tests to handle missing values properly\n dprec = np.finfo(np.float64).precision\n\n def test_tmean(self):\n y = stats.tmean(X, (2, 8), (True, True))\n assert_approx_equal(y, 5.0, significant=self.dprec)\n\n y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False))\n y2 = stats.tmean(X, limits=None)\n assert_approx_equal(y1, y2, significant=self.dprec)\n\n def test_tvar(self):\n y = stats.tvar(X, limits=(2, 8), inclusive=(True, True))\n assert_approx_equal(y, 4.6666666666666661, significant=self.dprec)\n\n y = stats.tvar(X, limits=None)\n assert_approx_equal(y, X.var(ddof=1), significant=self.dprec)\n\n def test_tstd(self):\n y = stats.tstd(X, (2, 8), (True, True))\n assert_approx_equal(y, 2.1602468994692865, significant=self.dprec)\n\n y = stats.tstd(X, limits=None)\n assert_approx_equal(y, X.std(ddof=1), significant=self.dprec)\n\n def test_tmin(self):\n assert_equal(stats.tmin(4), 4)\n\n x = np.arange(10)\n assert_equal(stats.tmin(x), 0)\n assert_equal(stats.tmin(x, lowerlimit=0), 0)\n assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1)\n\n x = x.reshape((5, 2))\n assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1])\n assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8])\n assert_equal(stats.tmin(x, axis=None), 0)\n\n x = np.arange(10.)\n x[9] = np.nan\n with suppress_warnings() as sup:\n r = sup.record(RuntimeWarning, \"invalid value*\")\n assert_equal(stats.tmin(x), np.nan)\n assert_equal(stats.tmin(x, nan_policy='omit'), 0.)\n assert_raises(ValueError, stats.tmin, x, nan_policy='raise')\n assert_raises(ValueError, stats.tmin, x, nan_policy='foobar')\n msg = \"'propagate', 'raise', 'omit'\"\n with assert_raises(ValueError, match=msg):\n stats.tmin(x, nan_policy='foo')\n\n def test_tmax(self):\n assert_equal(stats.tmax(4), 4)\n\n x = np.arange(10)\n assert_equal(stats.tmax(x), 9)\n assert_equal(stats.tmax(x, upperlimit=9), 9)\n assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8)\n\n x = x.reshape((5, 2))\n assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7])\n assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9])\n assert_equal(stats.tmax(x, axis=None), 9)\n\n x = np.arange(10.)\n x[6] = np.nan\n with suppress_warnings() as sup:\n r = sup.record(RuntimeWarning, \"invalid value*\")\n assert_equal(stats.tmax(x), np.nan)\n assert_equal(stats.tmax(x, nan_policy='omit'), 9.)\n assert_raises(ValueError, stats.tmax, x, nan_policy='raise')\n assert_raises(ValueError, stats.tmax, x, nan_policy='foobar')\n\n def test_tsem(self):\n y = stats.tsem(X, limits=(3, 8), inclusive=(False, True))\n y_ref = np.array([4, 5, 6, 7, 8])\n assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size),\n significant=self.dprec)\n\n assert_approx_equal(stats.tsem(X, limits=[-1, 10]),\n stats.tsem(X, limits=None),\n significant=self.dprec)\n\n\nclass TestCorrPearsonr(object):\n \"\"\" W.II.D. Compute a correlation matrix on all the variables.\n\n All the correlations, except for ZERO and MISS, should be exactly 1.\n ZERO and MISS should have undefined or missing correlations with the\n other variables. The same should go for SPEARMAN correlations, if\n your program has them.\n \"\"\"\n def test_pXX(self):\n y = stats.pearsonr(X,X)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pXBIG(self):\n y = stats.pearsonr(X,BIG)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pXLITTLE(self):\n y = stats.pearsonr(X,LITTLE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pXHUGE(self):\n y = stats.pearsonr(X,HUGE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pXTINY(self):\n y = stats.pearsonr(X,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pXROUND(self):\n y = stats.pearsonr(X,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pBIGBIG(self):\n y = stats.pearsonr(BIG,BIG)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pBIGLITTLE(self):\n y = stats.pearsonr(BIG,LITTLE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pBIGHUGE(self):\n y = stats.pearsonr(BIG,HUGE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pBIGTINY(self):\n y = stats.pearsonr(BIG,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pBIGROUND(self):\n y = stats.pearsonr(BIG,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pLITTLELITTLE(self):\n y = stats.pearsonr(LITTLE,LITTLE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pLITTLEHUGE(self):\n y = stats.pearsonr(LITTLE,HUGE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pLITTLETINY(self):\n y = stats.pearsonr(LITTLE,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pLITTLEROUND(self):\n y = stats.pearsonr(LITTLE,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pHUGEHUGE(self):\n y = stats.pearsonr(HUGE,HUGE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pHUGETINY(self):\n y = stats.pearsonr(HUGE,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pHUGEROUND(self):\n y = stats.pearsonr(HUGE,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pTINYTINY(self):\n y = stats.pearsonr(TINY,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pTINYROUND(self):\n y = stats.pearsonr(TINY,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_pROUNDROUND(self):\n y = stats.pearsonr(ROUND,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_r_exactly_pos1(self):\n a = arange(3.0)\n b = a\n r, prob = stats.pearsonr(a,b)\n assert_equal(r, 1.0)\n assert_equal(prob, 0.0)\n\n def test_r_exactly_neg1(self):\n a = arange(3.0)\n b = -a\n r, prob = stats.pearsonr(a,b)\n assert_equal(r, -1.0)\n assert_equal(prob, 0.0)\n\n def test_basic(self):\n # A basic test, with a correlation coefficient\n # that is not 1 or -1.\n a = array([-1, 0, 1])\n b = array([0, 0, 3])\n r, prob = stats.pearsonr(a, b)\n assert_approx_equal(r, np.sqrt(3)/2)\n assert_approx_equal(prob, 1.0/3)\n\n\nclass TestFisherExact(object):\n \"\"\"Some tests to show that fisher_exact() works correctly.\n\n Note that in SciPy 0.9.0 this was not working well for large numbers due to\n inaccuracy of the hypergeom distribution (see #1218). Fixed now.\n\n Also note that R and Scipy have different argument formats for their\n hypergeometric distribution functions.\n\n R:\n > phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)\n [1] 1.701815e-09\n \"\"\"\n def test_basic(self):\n fisher_exact = stats.fisher_exact\n\n res = fisher_exact([[14500, 20000], [30000, 40000]])[1]\n assert_approx_equal(res, 0.01106, significant=4)\n res = fisher_exact([[100, 2], [1000, 5]])[1]\n assert_approx_equal(res, 0.1301, significant=4)\n res = fisher_exact([[2, 7], [8, 2]])[1]\n assert_approx_equal(res, 0.0230141, significant=6)\n res = fisher_exact([[5, 1], [10, 10]])[1]\n assert_approx_equal(res, 0.1973244, significant=6)\n res = fisher_exact([[5, 15], [20, 20]])[1]\n assert_approx_equal(res, 0.0958044, significant=6)\n res = fisher_exact([[5, 16], [20, 25]])[1]\n assert_approx_equal(res, 0.1725862, significant=6)\n res = fisher_exact([[10, 5], [10, 1]])[1]\n assert_approx_equal(res, 0.1973244, significant=6)\n res = fisher_exact([[5, 0], [1, 4]])[1]\n assert_approx_equal(res, 0.04761904, significant=6)\n res = fisher_exact([[0, 1], [3, 2]])[1]\n assert_approx_equal(res, 1.0)\n res = fisher_exact([[0, 2], [6, 4]])[1]\n assert_approx_equal(res, 0.4545454545)\n res = fisher_exact([[2, 7], [8, 2]])\n assert_approx_equal(res[1], 0.0230141, significant=6)\n assert_approx_equal(res[0], 4.0 / 56)\n\n def test_precise(self):\n # results from R\n #\n # R defines oddsratio differently (see Notes section of fisher_exact\n # docstring), so those will not match. We leave them in anyway, in\n # case they will be useful later on. We test only the p-value.\n tablist = [\n ([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),\n ([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),\n ([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),\n ([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),\n ([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),\n ([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),\n ([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),\n ([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),\n ([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),\n ([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),\n ([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))\n ]\n for table, res_r in tablist:\n res = stats.fisher_exact(np.asarray(table))\n np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,\n verbose=True)\n\n @pytest.mark.slow\n def test_large_numbers(self):\n # Test with some large numbers. Regression test for #1401\n pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R\n for pval, num in zip(pvals, [75, 76, 77]):\n res = stats.fisher_exact([[17704, 496], [1065, num]])[1]\n assert_approx_equal(res, pval, significant=4)\n\n res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1]\n assert_approx_equal(res, 0.2751, significant=4)\n\n def test_raises(self):\n # test we raise an error for wrong shape of input.\n assert_raises(ValueError, stats.fisher_exact,\n np.arange(6).reshape(2, 3))\n\n def test_row_or_col_zero(self):\n tables = ([[0, 0], [5, 10]],\n [[5, 10], [0, 0]],\n [[0, 5], [0, 10]],\n [[5, 0], [10, 0]])\n for table in tables:\n oddsratio, pval = stats.fisher_exact(table)\n assert_equal(pval, 1.0)\n assert_equal(oddsratio, np.nan)\n\n def test_less_greater(self):\n tables = (\n # Some tables to compare with R:\n [[2, 7], [8, 2]],\n [[200, 7], [8, 300]],\n [[28, 21], [6, 1957]],\n [[190, 800], [200, 900]],\n # Some tables with simple exact values\n # (includes regression test for ticket #1568):\n [[0, 2], [3, 0]],\n [[1, 1], [2, 1]],\n [[2, 0], [1, 2]],\n [[0, 1], [2, 3]],\n [[1, 0], [1, 4]],\n )\n pvals = (\n # from R:\n [0.018521725952066501, 0.9990149169715733],\n [1.0, 2.0056578803889148e-122],\n [1.0, 5.7284374608319831e-44],\n [0.7416227, 0.2959826],\n # Exact:\n [0.1, 1.0],\n [0.7, 0.9],\n [1.0, 0.3],\n [2./3, 1.0],\n [1.0, 1./3],\n )\n for table, pval in zip(tables, pvals):\n res = []\n res.append(stats.fisher_exact(table, alternative=\"less\")[1])\n res.append(stats.fisher_exact(table, alternative=\"greater\")[1])\n assert_allclose(res, pval, atol=0, rtol=1e-7)\n\n def test_gh3014(self):\n # check if issue #3014 has been fixed.\n # before, this would have risen a ValueError\n odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]])\n\n\nclass TestCorrSpearmanr(object):\n \"\"\" W.II.D. Compute a correlation matrix on all the variables.\n\n All the correlations, except for ZERO and MISS, should be exactly 1.\n ZERO and MISS should have undefined or missing correlations with the\n other variables. The same should go for SPEARMAN corelations, if\n your program has them.\n \"\"\"\n def test_scalar(self):\n y = stats.spearmanr(4., 2.)\n assert_(np.isnan(y).all())\n\n def test_uneven_lengths(self):\n assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9])\n assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8)\n\n def test_uneven_2d_shapes(self):\n # Different number of columns should work - those just get concatenated.\n np.random.seed(232324)\n x = np.random.randn(4, 3)\n y = np.random.randn(4, 2)\n assert stats.spearmanr(x, y).correlation.shape == (5, 5)\n assert stats.spearmanr(x.T, y.T, axis=1).pvalue.shape == (5, 5)\n\n assert_raises(ValueError, stats.spearmanr, x, y, axis=1)\n assert_raises(ValueError, stats.spearmanr, x.T, y.T)\n\n def test_ndim_too_high(self):\n np.random.seed(232324)\n x = np.random.randn(4, 3, 2)\n assert_raises(ValueError, stats.spearmanr, x)\n assert_raises(ValueError, stats.spearmanr, x, x)\n assert_raises(ValueError, stats.spearmanr, x, None, None)\n # But should work with axis=None (raveling axes) for two input arrays\n assert_allclose(stats.spearmanr(x, x, axis=None),\n stats.spearmanr(x.flatten(), x.flatten(), axis=0))\n\n def test_nan_policy(self):\n x = np.arange(10.)\n x[9] = np.nan\n assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))\n assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'),\n (1.0, 0.0))\n assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')\n assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')\n\n def test_sXX(self):\n y = stats.spearmanr(X,X)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sXBIG(self):\n y = stats.spearmanr(X,BIG)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sXLITTLE(self):\n y = stats.spearmanr(X,LITTLE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sXHUGE(self):\n y = stats.spearmanr(X,HUGE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sXTINY(self):\n y = stats.spearmanr(X,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sXROUND(self):\n y = stats.spearmanr(X,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sBIGBIG(self):\n y = stats.spearmanr(BIG,BIG)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sBIGLITTLE(self):\n y = stats.spearmanr(BIG,LITTLE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sBIGHUGE(self):\n y = stats.spearmanr(BIG,HUGE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sBIGTINY(self):\n y = stats.spearmanr(BIG,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sBIGROUND(self):\n y = stats.spearmanr(BIG,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sLITTLELITTLE(self):\n y = stats.spearmanr(LITTLE,LITTLE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sLITTLEHUGE(self):\n y = stats.spearmanr(LITTLE,HUGE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sLITTLETINY(self):\n y = stats.spearmanr(LITTLE,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sLITTLEROUND(self):\n y = stats.spearmanr(LITTLE,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sHUGEHUGE(self):\n y = stats.spearmanr(HUGE,HUGE)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sHUGETINY(self):\n y = stats.spearmanr(HUGE,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sHUGEROUND(self):\n y = stats.spearmanr(HUGE,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sTINYTINY(self):\n y = stats.spearmanr(TINY,TINY)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sTINYROUND(self):\n y = stats.spearmanr(TINY,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_sROUNDROUND(self):\n y = stats.spearmanr(ROUND,ROUND)\n r = y[0]\n assert_approx_equal(r,1.0)\n\n def test_spearmanr_result_attributes(self):\n res = stats.spearmanr(X, X)\n attributes = ('correlation', 'pvalue')\n check_named_results(res, attributes)\n\n def test_1d_vs_2d(self):\n x1 = [1, 2, 3, 4, 5, 6]\n x2 = [1, 2, 3, 4, 6, 5]\n res1 = stats.spearmanr(x1, x2)\n res2 = stats.spearmanr(np.asarray([x1, x2]).T)\n assert_allclose(res1, res2)\n\n def test_1d_vs_2d_nans(self):\n # Now the same with NaNs present. Regression test for gh-9103.\n for nan_policy in ['propagate', 'omit']:\n x1 = [1, np.nan, 3, 4, 5, 6]\n x2 = [1, 2, 3, 4, 6, np.nan]\n res1 = stats.spearmanr(x1, x2, nan_policy=nan_policy)\n res2 = stats.spearmanr(np.asarray([x1, x2]).T, nan_policy=nan_policy)\n assert_allclose(res1, res2)\n\n def test_3cols(self):\n x1 = np.arange(6)\n x2 = -x1\n x3 = np.array([0, 1, 2, 3, 5, 4])\n x = np.asarray([x1, x2, x3]).T\n actual = stats.spearmanr(x)\n expected_corr = np.array([[1, -1, 0.94285714],\n [-1, 1, -0.94285714],\n [0.94285714, -0.94285714, 1]])\n expected_pvalue = np.zeros((3, 3), dtype=float)\n expected_pvalue[2, 0:2] = 0.00480466472\n expected_pvalue[0:2, 2] = 0.00480466472\n\n assert_allclose(actual.correlation, expected_corr)\n assert_allclose(actual.pvalue, expected_pvalue)\n\n def test_gh_9103(self):\n # Regression test for gh-9103.\n x = np.array([[np.nan, 3.0, 4.0, 5.0, 5.1, 6.0, 9.2],\n [5.0, np.nan, 4.1, 4.8, 4.9, 5.0, 4.1],\n [0.5, 4.0, 7.1, 3.8, 8.0, 5.1, 7.6]]).T\n corr = np.array([[np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, 1.]])\n assert_allclose(stats.spearmanr(x, nan_policy='propagate').correlation,\n corr)\n\n res = stats.spearmanr(x, nan_policy='omit').correlation\n assert_allclose((res[0][1], res[0][2], res[1][2]),\n (0.2051957, 0.4857143, -0.4707919), rtol=1e-6)\n\n def test_gh_8111(self):\n # Regression test for gh-8111 (different result for float/int/bool).\n n = 100\n np.random.seed(234568)\n x = np.random.rand(n)\n m = np.random.rand(n) > 0.7\n\n # bool against float, no nans\n a = (x > .5)\n b = np.array(x)\n res1 = stats.spearmanr(a, b, nan_policy='omit').correlation\n\n # bool against float with NaNs\n b[m] = np.nan\n res2 = stats.spearmanr(a, b, nan_policy='omit').correlation\n\n # int against float with NaNs\n a = a.astype(np.int32)\n res3 = stats.spearmanr(a, b, nan_policy='omit').correlation\n\n expected = [0.865895477, 0.866100381, 0.866100381]\n assert_allclose([res1, res2, res3], expected)\n\n\ndef test_spearmanr():\n # Cross-check with R:\n # cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method=\"spearmanr\")\n x1 = [1, 2, 3, 4, 5]\n x2 = [5, 6, 7, 8, 7]\n expected = (0.82078268166812329, 0.088587005313543798)\n res = stats.spearmanr(x1, x2)\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n attributes = ('correlation', 'pvalue')\n res = stats.spearmanr(x1, x2)\n check_named_results(res, attributes)\n\n # with only ties in one or both inputs\n with np.errstate(invalid=\"ignore\"):\n assert_equal(stats.spearmanr([2,2,2], [2,2,2]), (np.nan, np.nan))\n assert_equal(stats.spearmanr([2,0,2], [2,2,2]), (np.nan, np.nan))\n assert_equal(stats.spearmanr([2,2,2], [2,0,2]), (np.nan, np.nan))\n\n # empty arrays provided as input\n assert_equal(stats.spearmanr([], []), (np.nan, np.nan))\n\n np.random.seed(7546)\n x = np.array([np.random.normal(loc=1, scale=1, size=500),\n np.random.normal(loc=1, scale=1, size=500)])\n corr = [[1.0, 0.3],\n [0.3, 1.0]]\n x = np.dot(np.linalg.cholesky(corr), x)\n expected = (0.28659685838743354, 6.579862219051161e-11)\n res = stats.spearmanr(x[0], x[1])\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n assert_approx_equal(stats.spearmanr([1,1,2], [1,1,2])[0], 1.0)\n\n # test nan_policy\n x = np.arange(10.)\n x[9] = np.nan\n assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))\n assert_allclose(stats.spearmanr(x, x, nan_policy='omit'),\n (1.0, 0))\n assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')\n assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')\n\n # test unequal length inputs\n x = np.arange(10.)\n y = np.arange(20.)\n assert_raises(ValueError, stats.spearmanr, x, y)\n\n #test paired value\n x1 = [1, 2, 3, 4]\n x2 = [8, 7, 6, np.nan]\n res1 = stats.spearmanr(x1, x2, nan_policy='omit')\n res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit')\n assert_equal(res1, res2)\n\n # Regression test for GitHub issue #6061 - Overflow on Windows\n x = list(range(2000))\n y = list(range(2000))\n y[0], y[9] = y[9], y[0]\n y[10], y[434] = y[434], y[10]\n y[435], y[1509] = y[1509], y[435]\n # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))\n # = 1 - (1 / 500)\n # = 0.998\n x.append(np.nan)\n y.append(3.0)\n assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998)\n\nclass TestCorrSpearmanrTies(object):\n \"\"\"Some tests of tie-handling by the spearmanr function.\"\"\"\n\n def test_tie1(self):\n # Data\n x = [1.0, 2.0, 3.0, 4.0]\n y = [1.0, 2.0, 2.0, 3.0]\n # Ranks of the data, with tie-handling.\n xr = [1.0, 2.0, 3.0, 4.0]\n yr = [1.0, 2.5, 2.5, 4.0]\n # Result of spearmanr should be the same as applying\n # pearsonr to the ranks.\n sr = stats.spearmanr(x, y)\n pr = stats.pearsonr(xr, yr)\n assert_almost_equal(sr, pr)\n\n def test_tie2(self):\n # Test tie-handling if inputs contain nan's\n # Data without nan's\n x1 = [1, 2, 2.5, 2]\n y1 = [1, 3, 2.5, 4]\n # Same data with nan's\n x2 = [1, 2, 2.5, 2, np.nan]\n y2 = [1, 3, 2.5, 4, np.nan]\n\n # Results for two data sets should be the same if nan's are ignored\n sr1 = stats.spearmanr(x1, y1)\n sr2 = stats.spearmanr(x2, y2, nan_policy='omit')\n assert_almost_equal(sr1, sr2)\n\n\n# W.II.E. Tabulate X against X, using BIG as a case weight. The values\n# should appear on the diagonal and the total should be 899999955.\n# If the table cannot hold these values, forget about working with\n# census data. You can also tabulate HUGE against TINY. There is no\n# reason a tabulation program should not be able to distinguish\n# different values regardless of their magnitude.\n\n# I need to figure out how to do this one.\n\n\ndef test_kendalltau():\n # simple case without ties\n x = np.arange(10)\n y = np.arange(10)\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = (1.0, 5.511463844797e-07)\n res = stats.kendalltau(x, y)\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n # swap a couple of values\n b = y[1]\n y[1] = y[2]\n y[2] = b\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = (0.9555555555555556, 5.511463844797e-06)\n res = stats.kendalltau(x, y)\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n # swap a couple more\n b = y[5]\n y[5] = y[6]\n y[6] = b\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = (0.9111111111111111, 2.976190476190e-05)\n res = stats.kendalltau(x, y)\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n # same in opposite direction\n x = np.arange(10)\n y = np.arange(10)[::-1]\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = (-1.0, 5.511463844797e-07)\n res = stats.kendalltau(x, y)\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n # swap a couple of values\n b = y[1]\n y[1] = y[2]\n y[2] = b\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = (-0.9555555555555556, 5.511463844797e-06)\n res = stats.kendalltau(x, y)\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n # swap a couple more\n b = y[5]\n y[5] = y[6]\n y[6] = b\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = (-0.9111111111111111, 2.976190476190e-05)\n res = stats.kendalltau(x, y)\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n # check exception in case of ties\n y[2] = y[1]\n assert_raises(ValueError, stats.kendalltau, x, y, method='exact')\n\n # check exception in case of invalid method keyword\n assert_raises(ValueError, stats.kendalltau, x, y, method='banana')\n\n # with some ties\n # Cross-check with R:\n # cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method=\"kendall\",exact=FALSE)\n x1 = [12, 2, 1, 12, 2]\n x2 = [1, 4, 7, 1, 0]\n expected = (-0.47140452079103173, 0.28274545993277478)\n res = stats.kendalltau(x1, x2)\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n # test for namedtuple attribute results\n attributes = ('correlation', 'pvalue')\n res = stats.kendalltau(x1, x2)\n check_named_results(res, attributes)\n\n # with only ties in one or both inputs\n assert_equal(stats.kendalltau([2,2,2], [2,2,2]), (np.nan, np.nan))\n assert_equal(stats.kendalltau([2,0,2], [2,2,2]), (np.nan, np.nan))\n assert_equal(stats.kendalltau([2,2,2], [2,0,2]), (np.nan, np.nan))\n\n # empty arrays provided as input\n assert_equal(stats.kendalltau([], []), (np.nan, np.nan))\n\n # check with larger arrays\n np.random.seed(7546)\n x = np.array([np.random.normal(loc=1, scale=1, size=500),\n np.random.normal(loc=1, scale=1, size=500)])\n corr = [[1.0, 0.3],\n [0.3, 1.0]]\n x = np.dot(np.linalg.cholesky(corr), x)\n expected = (0.19291382765531062, 1.1337095377742629e-10)\n res = stats.kendalltau(x[0], x[1])\n assert_approx_equal(res[0], expected[0])\n assert_approx_equal(res[1], expected[1])\n\n # and do we get a tau of 1 for identical inputs?\n assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0)\n\n # test nan_policy\n x = np.arange(10.)\n x[9] = np.nan\n assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan))\n assert_allclose(stats.kendalltau(x, x, nan_policy='omit'),\n (1.0, 5.5114638e-6), rtol=1e-06)\n assert_allclose(stats.kendalltau(x, x, nan_policy='omit', method='asymptotic'),\n (1.0, 0.00017455009626808976), rtol=1e-06)\n assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise')\n assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar')\n\n # test unequal length inputs\n x = np.arange(10.)\n y = np.arange(20.)\n assert_raises(ValueError, stats.kendalltau, x, y)\n\n # test all ties\n tau, p_value = stats.kendalltau([], [])\n assert_equal(np.nan, tau)\n assert_equal(np.nan, p_value)\n tau, p_value = stats.kendalltau([0], [0])\n assert_equal(np.nan, tau)\n assert_equal(np.nan, p_value)\n\n # Regression test for GitHub issue #6061 - Overflow on Windows\n x = np.arange(2000, dtype=float)\n x = np.ma.masked_greater(x, 1995)\n y = np.arange(2000, dtype=float)\n y = np.concatenate((y[1000:], y[:1000]))\n assert_(np.isfinite(stats.kendalltau(x,y)[1]))\n\ndef test_kendalltau_vs_mstats_basic():\n np.random.seed(42)\n for s in range(2,10):\n a = []\n # Generate rankings with ties\n for i in range(s):\n a += [i]*i\n b = list(a)\n np.random.shuffle(a)\n np.random.shuffle(b)\n expected = mstats_basic.kendalltau(a, b)\n actual = stats.kendalltau(a, b)\n assert_approx_equal(actual[0], expected[0])\n assert_approx_equal(actual[1], expected[1])\n\n\ndef test_kendalltau_nan_2nd_arg():\n # regression test for gh-6134: nans in the second arg were not handled\n x = [1., 2., 3., 4.]\n y = [np.nan, 2.4, 3.4, 3.4]\n\n r1 = stats.kendalltau(x, y, nan_policy='omit')\n r2 = stats.kendalltau(x[1:], y[1:])\n assert_allclose(r1.correlation, r2.correlation, atol=1e-15)\n\n\ndef test_weightedtau():\n x = [12, 2, 1, 12, 2]\n y = [1, 4, 7, 1, 0]\n tau, p_value = stats.weightedtau(x, y)\n assert_approx_equal(tau, -0.56694968153682723)\n assert_equal(np.nan, p_value)\n tau, p_value = stats.weightedtau(x, y, additive=False)\n assert_approx_equal(tau, -0.62205716951801038)\n assert_equal(np.nan, p_value)\n # This must be exactly Kendall's tau\n tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1)\n assert_approx_equal(tau, -0.47140452079103173)\n assert_equal(np.nan, p_value)\n\n # Asymmetric, ranked version\n tau, p_value = stats.weightedtau(x, y, rank=None)\n assert_approx_equal(tau, -0.4157652301037516)\n assert_equal(np.nan, p_value)\n tau, p_value = stats.weightedtau(y, x, rank=None)\n assert_approx_equal(tau, -0.7181341329699029)\n assert_equal(np.nan, p_value)\n tau, p_value = stats.weightedtau(x, y, rank=None, additive=False)\n assert_approx_equal(tau, -0.40644850966246893)\n assert_equal(np.nan, p_value)\n tau, p_value = stats.weightedtau(y, x, rank=None, additive=False)\n assert_approx_equal(tau, -0.83766582937355172)\n assert_equal(np.nan, p_value)\n tau, p_value = stats.weightedtau(x, y, rank=False)\n assert_approx_equal(tau, -0.51604397940261848)\n assert_equal(np.nan, p_value)\n # This must be exactly Kendall's tau\n tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1)\n assert_approx_equal(tau, -0.47140452079103173)\n assert_equal(np.nan, p_value)\n tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1)\n assert_approx_equal(tau, -0.47140452079103173)\n assert_equal(np.nan, p_value)\n # Test argument conversion\n tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y)\n assert_approx_equal(tau, -0.56694968153682723)\n tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y)\n assert_approx_equal(tau, -0.56694968153682723)\n tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64))\n assert_approx_equal(tau, -0.56694968153682723)\n # All ties\n tau, p_value = stats.weightedtau([], [])\n assert_equal(np.nan, tau)\n assert_equal(np.nan, p_value)\n tau, p_value = stats.weightedtau([0], [0])\n assert_equal(np.nan, tau)\n assert_equal(np.nan, p_value)\n # Size mismatches\n assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2])\n assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0])\n # NaNs\n x = [12, 2, 1, 12, 2]\n y = [1, 4, 7, 1, np.nan]\n tau, p_value = stats.weightedtau(x, y)\n assert_approx_equal(tau, -0.56694968153682723)\n x = [12, 2, np.nan, 12, 2]\n tau, p_value = stats.weightedtau(x, y)\n assert_approx_equal(tau, -0.56694968153682723)\n\n\ndef test_kendall_tau_large():\n n = 172.\n x = np.arange(n)\n y = np.arange(n)\n _, pval = stats.kendalltau(x, y, method='exact')\n assert_equal(pval, 0.0)\n y[-1], y[-2] = y[-2], y[-1]\n _, pval = stats.kendalltau(x, y, method='exact')\n assert_equal(pval, 0.0)\n y[-3], y[-4] = y[-4], y[-3]\n _, pval = stats.kendalltau(x, y, method='exact')\n assert_equal(pval, 0.0)\n\n\ndef test_weightedtau_vs_quadratic():\n # Trivial quadratic implementation, all parameters mandatory\n def wkq(x, y, rank, weigher, add):\n tot = conc = disc = u = v = 0\n for i in range(len(x)):\n for j in range(len(x)):\n w = weigher(rank[i]) + weigher(rank[j]) if add else weigher(rank[i]) * weigher(rank[j])\n tot += w\n if x[i] == x[j]:\n u += w\n if y[i] == y[j]:\n v += w\n if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]:\n conc += w\n elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]:\n disc += w\n return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v)\n\n np.random.seed(42)\n for s in range(3,10):\n a = []\n # Generate rankings with ties\n for i in range(s):\n a += [i]*i\n b = list(a)\n np.random.shuffle(a)\n np.random.shuffle(b)\n # First pass: use element indices as ranks\n rank = np.arange(len(a), dtype=np.intp)\n for _ in range(2):\n for add in [True, False]:\n expected = wkq(a, b, rank, lambda x: 1./(x+1), add)\n actual = stats.weightedtau(a, b, rank, lambda x: 1./(x+1), add).correlation\n assert_approx_equal(expected, actual)\n # Second pass: use a random rank\n np.random.shuffle(rank)\n\n\nclass TestFindRepeats(object):\n\n def test_basic(self):\n a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5]\n res, nums = stats.find_repeats(a)\n assert_array_equal(res, [1, 2, 3, 4])\n assert_array_equal(nums, [3, 3, 2, 2])\n\n def test_empty_result(self):\n # Check that empty arrays are returned when there are no repeats.\n for a in [[10, 20, 50, 30, 40], []]:\n repeated, counts = stats.find_repeats(a)\n assert_array_equal(repeated, [])\n assert_array_equal(counts, [])\n\n\nclass TestRegression(object):\n def test_linregressBIGX(self):\n # W.II.F. Regress BIG on X.\n # The constant should be 99999990 and the regression coefficient should be 1.\n y = stats.linregress(X,BIG)\n intercept = y[1]\n r = y[2]\n assert_almost_equal(intercept,99999990)\n assert_almost_equal(r,1.0)\n\n def test_regressXX(self):\n # W.IV.B. Regress X on X.\n # The constant should be exactly 0 and the regression coefficient should be 1.\n # This is a perfectly valid regression. The program should not complain.\n y = stats.linregress(X,X)\n intercept = y[1]\n r = y[2]\n assert_almost_equal(intercept,0.0)\n assert_almost_equal(r,1.0)\n# W.IV.C. Regress X on BIG and LITTLE (two predictors). The program\n# should tell you that this model is \"singular\" because BIG and\n# LITTLE are linear combinations of each other. Cryptic error\n# messages are unacceptable here. Singularity is the most\n# fundamental regression error.\n# Need to figure out how to handle multiple linear regression. Not obvious\n\n def test_regressZEROX(self):\n # W.IV.D. Regress ZERO on X.\n # The program should inform you that ZERO has no variance or it should\n # go ahead and compute the regression and report a correlation and\n # total sum of squares of exactly 0.\n y = stats.linregress(X,ZERO)\n intercept = y[1]\n r = y[2]\n assert_almost_equal(intercept,0.0)\n assert_almost_equal(r,0.0)\n\n def test_regress_simple(self):\n # Regress a line with sinusoidal noise.\n x = np.linspace(0, 100, 100)\n y = 0.2 * np.linspace(0, 100, 100) + 10\n y += np.sin(np.linspace(0, 20, 100))\n\n res = stats.linregress(x, y)\n assert_almost_equal(res[4], 2.3957814497838803e-3)\n\n def test_regress_simple_onearg_rows(self):\n # Regress a line w sinusoidal noise, with a single input of shape (2, N).\n x = np.linspace(0, 100, 100)\n y = 0.2 * np.linspace(0, 100, 100) + 10\n y += np.sin(np.linspace(0, 20, 100))\n rows = np.vstack((x, y))\n\n res = stats.linregress(rows)\n assert_almost_equal(res[4], 2.3957814497838803e-3)\n\n def test_regress_simple_onearg_cols(self):\n x = np.linspace(0, 100, 100)\n y = 0.2 * np.linspace(0, 100, 100) + 10\n y += np.sin(np.linspace(0, 20, 100))\n cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1)))\n\n res = stats.linregress(cols)\n assert_almost_equal(res[4], 2.3957814497838803e-3)\n\n def test_regress_shape_error(self):\n # Check that a single input argument to linregress with wrong shape\n # results in a ValueError.\n assert_raises(ValueError, stats.linregress, np.ones((3, 3)))\n\n def test_linregress(self):\n # compared with multivariate ols with pinv\n x = np.arange(11)\n y = np.arange(5,16)\n y[[(1),(-2)]] -= 1\n y[[(0),(-1)]] += 1\n\n res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733)\n assert_array_almost_equal(stats.linregress(x,y),res,decimal=14)\n\n def test_regress_simple_negative_cor(self):\n # If the slope of the regression is negative the factor R tend to -1 not 1.\n # Sometimes rounding errors makes it < -1 leading to stderr being NaN\n a, n = 1e-71, 100000\n x = np.linspace(a, 2 * a, n)\n y = np.linspace(2 * a, a, n)\n stats.linregress(x, y)\n res = stats.linregress(x, y)\n assert_(res[2] >= -1) # propagated numerical errors were not corrected\n assert_almost_equal(res[2], -1) # perfect negative correlation case\n assert_(not np.isnan(res[4])) # stderr should stay finite\n\n def test_linregress_result_attributes(self):\n # Regress a line with sinusoidal noise.\n x = np.linspace(0, 100, 100)\n y = 0.2 * np.linspace(0, 100, 100) + 10\n y += np.sin(np.linspace(0, 20, 100))\n\n res = stats.linregress(x, y)\n attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')\n check_named_results(res, attributes)\n\n def test_regress_two_inputs(self):\n # Regress a simple line formed by two points.\n x = np.arange(2)\n y = np.arange(3, 5)\n\n res = stats.linregress(x, y)\n assert_almost_equal(res[3], 0.0) # non-horizontal line\n assert_almost_equal(res[4], 0.0) # zero stderr\n\n def test_regress_two_inputs_horizontal_line(self):\n # Regress a horizontal line formed by two points.\n x = np.arange(2)\n y = np.ones(2)\n\n res = stats.linregress(x, y)\n assert_almost_equal(res[3], 1.0) # horizontal line\n assert_almost_equal(res[4], 0.0) # zero stderr\n\n def test_nist_norris(self):\n x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0,\n 558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1,\n 995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0,\n 11.1, 118.3, 229.2, 669.1, 448.9, 0.5]\n\n y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9,\n 559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3,\n 998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9,\n 10.2, 117.6, 228.9, 668.4, 449.2, 0.2]\n\n # Expected values\n exp_slope = 1.00211681802045\n exp_intercept = -0.262323073774029\n exp_rsquared = 0.999993745883712\n\n actual = stats.linregress(x, y)\n\n assert_almost_equal(actual.slope, exp_slope)\n assert_almost_equal(actual.intercept, exp_intercept)\n assert_almost_equal(actual.rvalue**2, exp_rsquared)\n\n def test_empty_input(self):\n assert_raises(ValueError, stats.linregress, [], [])\n\n def test_nan_input(self):\n x = np.arange(10.)\n x[9] = np.nan\n\n with np.errstate(invalid=\"ignore\"):\n assert_array_equal(stats.linregress(x, x),\n (np.nan, np.nan, np.nan, np.nan, np.nan))\n\n\ndef test_theilslopes():\n # Basic slope test.\n slope, intercept, lower, upper = stats.theilslopes([0,1,1])\n assert_almost_equal(slope, 0.5)\n assert_almost_equal(intercept, 0.5)\n\n # Test of confidence intervals.\n x = [1, 2, 3, 4, 10, 12, 18]\n y = [9, 15, 19, 20, 45, 55, 78]\n slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07)\n assert_almost_equal(slope, 4)\n assert_almost_equal(upper, 4.38, decimal=2)\n assert_almost_equal(lower, 3.71, decimal=2)\n\n\ndef test_cumfreq():\n x = [1, 4, 2, 1, 3, 1]\n cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)\n assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.]))\n cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4,\n defaultreallimits=(1.5, 5))\n assert_(extrapoints == 3)\n\n # test for namedtuple attribute results\n attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints')\n res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))\n check_named_results(res, attributes)\n\n\ndef test_relfreq():\n a = np.array([1, 4, 2, 1, 3, 1])\n relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)\n assert_array_almost_equal(relfreqs,\n array([0.5, 0.16666667, 0.16666667, 0.16666667]))\n\n # test for namedtuple attribute results\n attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints')\n res = stats.relfreq(a, numbins=4)\n check_named_results(res, attributes)\n\n # check array_like input is accepted\n relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1],\n numbins=4)\n assert_array_almost_equal(relfreqs, relfreqs2)\n\n\nclass TestGMean(object):\n\n def test_1D_list(self):\n a = (1,2,3,4)\n actual = stats.gmean(a)\n desired = power(1*2*3*4,1./4.)\n assert_almost_equal(actual, desired,decimal=14)\n\n desired1 = stats.gmean(a,axis=-1)\n assert_almost_equal(actual, desired1, decimal=14)\n\n def test_1D_array(self):\n a = array((1,2,3,4), float32)\n actual = stats.gmean(a)\n desired = power(1*2*3*4,1./4.)\n assert_almost_equal(actual, desired, decimal=7)\n\n desired1 = stats.gmean(a,axis=-1)\n assert_almost_equal(actual, desired1, decimal=7)\n\n def test_2D_array_default(self):\n a = array(((1,2,3,4),\n (1,2,3,4),\n (1,2,3,4)))\n actual = stats.gmean(a)\n desired = array((1,2,3,4))\n assert_array_almost_equal(actual, desired, decimal=14)\n\n desired1 = stats.gmean(a,axis=0)\n assert_array_almost_equal(actual, desired1, decimal=14)\n\n def test_2D_array_dim1(self):\n a = array(((1,2,3,4),\n (1,2,3,4),\n (1,2,3,4)))\n actual = stats.gmean(a, axis=1)\n v = power(1*2*3*4,1./4.)\n desired = array((v,v,v))\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_large_values(self):\n a = array([1e100, 1e200, 1e300])\n actual = stats.gmean(a)\n assert_approx_equal(actual, 1e200, significant=13)\n\n\nclass TestHMean(object):\n def test_1D_list(self):\n a = (1,2,3,4)\n actual = stats.hmean(a)\n desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)\n assert_almost_equal(actual, desired, decimal=14)\n\n desired1 = stats.hmean(array(a),axis=-1)\n assert_almost_equal(actual, desired1, decimal=14)\n\n def test_1D_array(self):\n a = array((1,2,3,4), float64)\n actual = stats.hmean(a)\n desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)\n assert_almost_equal(actual, desired, decimal=14)\n\n desired1 = stats.hmean(a,axis=-1)\n assert_almost_equal(actual, desired1, decimal=14)\n\n def test_2D_array_default(self):\n a = array(((1,2,3,4),\n (1,2,3,4),\n (1,2,3,4)))\n actual = stats.hmean(a)\n desired = array((1.,2.,3.,4.))\n assert_array_almost_equal(actual, desired, decimal=14)\n\n actual1 = stats.hmean(a,axis=0)\n assert_array_almost_equal(actual1, desired, decimal=14)\n\n def test_2D_array_dim1(self):\n a = array(((1,2,3,4),\n (1,2,3,4),\n (1,2,3,4)))\n\n v = 4. / (1./1 + 1./2 + 1./3 + 1./4)\n desired1 = array((v,v,v))\n actual1 = stats.hmean(a, axis=1)\n assert_array_almost_equal(actual1, desired1, decimal=14)\n\n\nclass TestScoreatpercentile(object):\n def setup_method(self):\n self.a1 = [3, 4, 5, 10, -3, -5, 6]\n self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]\n self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]\n\n def test_basic(self):\n x = arange(8) * 0.5\n assert_equal(stats.scoreatpercentile(x, 0), 0.)\n assert_equal(stats.scoreatpercentile(x, 100), 3.5)\n assert_equal(stats.scoreatpercentile(x, 50), 1.75)\n\n def test_fraction(self):\n scoreatperc = stats.scoreatpercentile\n\n # Test defaults\n assert_equal(scoreatperc(list(range(10)), 50), 4.5)\n assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5)\n assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5)\n assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55)\n assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5)\n\n # explicitly specify interpolation_method 'fraction' (the default)\n assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'),\n 4.5)\n assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7),\n interpolation_method='fraction'),\n 4.5)\n assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8),\n interpolation_method='fraction'),\n 4.5)\n assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100),\n interpolation_method='fraction'),\n 55)\n assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10),\n interpolation_method='fraction'),\n 5.5)\n\n def test_lower_higher(self):\n scoreatperc = stats.scoreatpercentile\n\n # interpolation_method 'lower'/'higher'\n assert_equal(scoreatperc(list(range(10)), 50,\n interpolation_method='lower'), 4)\n assert_equal(scoreatperc(list(range(10)), 50,\n interpolation_method='higher'), 5)\n assert_equal(scoreatperc(list(range(10)), 50, (2,7),\n interpolation_method='lower'), 4)\n assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7),\n interpolation_method='higher'), 5)\n assert_equal(scoreatperc(list(range(100)), 50, (1,8),\n interpolation_method='lower'), 4)\n assert_equal(scoreatperc(list(range(100)), 50, (1,8),\n interpolation_method='higher'), 5)\n assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100),\n interpolation_method='lower'), 10)\n assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100),\n interpolation_method='higher'), 100)\n assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10),\n interpolation_method='lower'), 1)\n assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10),\n interpolation_method='higher'), 10)\n\n def test_sequence_per(self):\n x = arange(8) * 0.5\n expected = np.array([0, 3.5, 1.75])\n res = stats.scoreatpercentile(x, [0, 100, 50])\n assert_allclose(res, expected)\n assert_(isinstance(res, np.ndarray))\n # Test with ndarray. Regression test for gh-2861\n assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])),\n expected)\n # Also test combination of 2-D array, axis not None and array-like per\n res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)),\n np.array([0, 1, 100, 100]), axis=1)\n expected2 = array([[0, 4, 8],\n [0.03, 4.03, 8.03],\n [3, 7, 11],\n [3, 7, 11]])\n assert_allclose(res2, expected2)\n\n def test_axis(self):\n scoreatperc = stats.scoreatpercentile\n x = arange(12).reshape(3, 4)\n\n assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0])\n\n r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]\n assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0)\n\n r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]]\n assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1)\n\n x = array([[1, 1, 1],\n [1, 1, 1],\n [4, 4, 3],\n [1, 1, 1],\n [1, 1, 1]])\n score = stats.scoreatpercentile(x, 50)\n assert_equal(score.shape, ())\n assert_equal(score, 1.0)\n score = stats.scoreatpercentile(x, 50, axis=0)\n assert_equal(score.shape, (3,))\n assert_equal(score, [1, 1, 1])\n\n def test_exception(self):\n assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56,\n interpolation_method='foobar')\n assert_raises(ValueError, stats.scoreatpercentile, [1], 101)\n assert_raises(ValueError, stats.scoreatpercentile, [1], -1)\n\n def test_empty(self):\n assert_equal(stats.scoreatpercentile([], 50), np.nan)\n assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan)\n assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan])\n\n\nclass TestItemfreq(object):\n a = [5, 7, 1, 2, 1, 5, 7] * 10\n b = [1, 2, 5, 7]\n\n def test_numeric_types(self):\n # Check itemfreq works for all dtypes (adapted from np.unique tests)\n def _check_itemfreq(dt):\n a = np.array(self.a, dt)\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning)\n v = stats.itemfreq(a)\n assert_array_equal(v[:, 0], [1, 2, 5, 7])\n assert_array_equal(v[:, 1], np.array([20, 10, 20, 20], dtype=dt))\n\n dtypes = [np.int32, np.int64, np.float32, np.float64,\n np.complex64, np.complex128]\n for dt in dtypes:\n _check_itemfreq(dt)\n\n def test_object_arrays(self):\n a, b = self.a, self.b\n dt = 'O'\n aa = np.empty(len(a), dt)\n aa[:] = a\n bb = np.empty(len(b), dt)\n bb[:] = b\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning)\n v = stats.itemfreq(aa)\n assert_array_equal(v[:, 0], bb)\n\n def test_structured_arrays(self):\n a, b = self.a, self.b\n dt = [('', 'i'), ('', 'i')]\n aa = np.array(list(zip(a, a)), dt)\n bb = np.array(list(zip(b, b)), dt)\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning)\n v = stats.itemfreq(aa)\n # Arrays don't compare equal because v[:,0] is object array\n assert_equal(tuple(v[2, 0]), tuple(bb[2]))\n\n\nclass TestMode(object):\n def test_empty(self):\n vals, counts = stats.mode([])\n assert_equal(vals, np.array([]))\n assert_equal(counts, np.array([]))\n\n def test_scalar(self):\n vals, counts = stats.mode(4.)\n assert_equal(vals, np.array([4.]))\n assert_equal(counts, np.array([1]))\n\n def test_basic(self):\n data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]\n vals = stats.mode(data1)\n assert_equal(vals[0][0], 6)\n assert_equal(vals[1][0], 3)\n\n def test_axes(self):\n data1 = [10, 10, 30, 40]\n data2 = [10, 10, 10, 10]\n data3 = [20, 10, 20, 20]\n data4 = [30, 30, 30, 30]\n data5 = [40, 30, 30, 30]\n arr = np.array([data1, data2, data3, data4, data5])\n\n vals = stats.mode(arr, axis=None)\n assert_equal(vals[0], np.array([30]))\n assert_equal(vals[1], np.array([8]))\n\n vals = stats.mode(arr, axis=0)\n assert_equal(vals[0], np.array([[10, 10, 30, 30]]))\n assert_equal(vals[1], np.array([[2, 3, 3, 2]]))\n\n vals = stats.mode(arr, axis=1)\n assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]]))\n assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]]))\n\n def test_strings(self):\n data1 = ['rain', 'showers', 'showers']\n\n with suppress_warnings() as sup:\n r = sup.record(RuntimeWarning, \".*checked for nan values\")\n vals = stats.mode(data1)\n assert_equal(len(r), 1)\n\n assert_equal(vals[0][0], 'showers')\n assert_equal(vals[1][0], 2)\n\n def test_mixed_objects(self):\n objects = [10, True, np.nan, 'hello', 10]\n arr = np.empty((5,), dtype=object)\n arr[:] = objects\n with suppress_warnings() as sup:\n r = sup.record(RuntimeWarning, \".*checked for nan values\")\n vals = stats.mode(arr)\n assert_equal(len(r), 1)\n assert_equal(vals[0][0], 10)\n assert_equal(vals[1][0], 2)\n\n def test_objects(self):\n # Python objects must be sortable (le + eq) and have ne defined\n # for np.unique to work. hash is for set.\n class Point(object):\n def __init__(self, x):\n self.x = x\n\n def __eq__(self, other):\n return self.x == other.x\n\n def __ne__(self, other):\n return self.x != other.x\n\n def __lt__(self, other):\n return self.x < other.x\n\n def __hash__(self):\n return hash(self.x)\n\n points = [Point(x) for x in [1, 2, 3, 4, 3, 2, 2, 2]]\n arr = np.empty((8,), dtype=object)\n arr[:] = points\n assert_(len(set(points)) == 4)\n assert_equal(np.unique(arr).shape, (4,))\n with suppress_warnings() as sup:\n r = sup.record(RuntimeWarning, \".*checked for nan values\")\n vals = stats.mode(arr)\n assert_equal(len(r), 1)\n\n assert_equal(vals[0][0], Point(2))\n assert_equal(vals[1][0], 4)\n\n def test_mode_result_attributes(self):\n data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]\n data2 = []\n actual = stats.mode(data1)\n attributes = ('mode', 'count')\n check_named_results(actual, attributes)\n actual2 = stats.mode(data2)\n check_named_results(actual2, attributes)\n\n def test_mode_nan(self):\n data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]\n actual = stats.mode(data1)\n assert_equal(actual, (6, 3))\n\n actual = stats.mode(data1, nan_policy='omit')\n assert_equal(actual, (6, 3))\n assert_raises(ValueError, stats.mode, data1, nan_policy='raise')\n assert_raises(ValueError, stats.mode, data1, nan_policy='foobar')\n\n @pytest.mark.parametrize(\"data\", [\n [3, 5, 1, 1, 3],\n [3, np.nan, 5, 1, 1, 3],\n [3, 5, 1],\n [3, np.nan, 5, 1],\n ])\n def test_smallest_equal(self, data):\n result = stats.mode(data, nan_policy='omit')\n assert_equal(result[0][0], 1)\n\n\nclass TestVariability(object):\n\n testcase = [1,2,3,4]\n scalar_testcase = 4.\n\n def test_sem(self):\n # This is not in R, so used:\n # sqrt(var(testcase)*3/4)/sqrt(3)\n\n # y = stats.sem(self.shoes[0])\n # assert_approx_equal(y,0.775177399)\n with suppress_warnings() as sup, np.errstate(invalid=\"ignore\"):\n sup.filter(RuntimeWarning, \"Degrees of freedom <= 0 for slice\")\n y = stats.sem(self.scalar_testcase)\n assert_(np.isnan(y))\n\n y = stats.sem(self.testcase)\n assert_approx_equal(y, 0.6454972244)\n n = len(self.testcase)\n assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),\n stats.sem(self.testcase, ddof=2))\n\n x = np.arange(10.)\n x[9] = np.nan\n assert_equal(stats.sem(x), np.nan)\n assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769)\n assert_raises(ValueError, stats.sem, x, nan_policy='raise')\n assert_raises(ValueError, stats.sem, x, nan_policy='foobar')\n\n def test_zmap(self):\n # not in R, so tested by using:\n # (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)\n y = stats.zmap(self.testcase,self.testcase)\n desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])\n assert_array_almost_equal(desired,y,decimal=12)\n\n def test_zmap_axis(self):\n # Test use of 'axis' keyword in zmap.\n x = np.array([[0.0, 0.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 2.0],\n [2.0, 0.0, 2.0, 0.0]])\n\n t1 = 1.0/np.sqrt(2.0/3)\n t2 = np.sqrt(3.)/3\n t3 = np.sqrt(2.)\n\n z0 = stats.zmap(x, x, axis=0)\n z1 = stats.zmap(x, x, axis=1)\n\n z0_expected = [[-t1, -t3/2, -t3/2, 0.0],\n [0.0, t3, -t3/2, t1],\n [t1, -t3/2, t3, -t1]]\n z1_expected = [[-1.0, -1.0, 1.0, 1.0],\n [-t2, -t2, -t2, np.sqrt(3.)],\n [1.0, -1.0, 1.0, -1.0]]\n\n assert_array_almost_equal(z0, z0_expected)\n assert_array_almost_equal(z1, z1_expected)\n\n def test_zmap_ddof(self):\n # Test use of 'ddof' keyword in zmap.\n x = np.array([[0.0, 0.0, 1.0, 1.0],\n [0.0, 1.0, 2.0, 3.0]])\n\n z = stats.zmap(x, x, axis=1, ddof=1)\n\n z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))\n z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))\n assert_array_almost_equal(z[0], z0_expected)\n assert_array_almost_equal(z[1], z1_expected)\n\n def test_zscore(self):\n # not in R, so tested by using:\n # (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)\n y = stats.zscore(self.testcase)\n desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])\n assert_array_almost_equal(desired,y,decimal=12)\n\n def test_zscore_axis(self):\n # Test use of 'axis' keyword in zscore.\n x = np.array([[0.0, 0.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 2.0],\n [2.0, 0.0, 2.0, 0.0]])\n\n t1 = 1.0/np.sqrt(2.0/3)\n t2 = np.sqrt(3.)/3\n t3 = np.sqrt(2.)\n\n z0 = stats.zscore(x, axis=0)\n z1 = stats.zscore(x, axis=1)\n\n z0_expected = [[-t1, -t3/2, -t3/2, 0.0],\n [0.0, t3, -t3/2, t1],\n [t1, -t3/2, t3, -t1]]\n z1_expected = [[-1.0, -1.0, 1.0, 1.0],\n [-t2, -t2, -t2, np.sqrt(3.)],\n [1.0, -1.0, 1.0, -1.0]]\n\n assert_array_almost_equal(z0, z0_expected)\n assert_array_almost_equal(z1, z1_expected)\n\n def test_zscore_ddof(self):\n # Test use of 'ddof' keyword in zscore.\n x = np.array([[0.0, 0.0, 1.0, 1.0],\n [0.0, 1.0, 2.0, 3.0]])\n\n z = stats.zscore(x, axis=1, ddof=1)\n\n z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))\n z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))\n assert_array_almost_equal(z[0], z0_expected)\n assert_array_almost_equal(z[1], z1_expected)\n\n\nclass _numpy_version_warn_context_mgr(object):\n \"\"\"\n A simple context maneger class to avoid retyping the same code for\n different versions of numpy when the only difference is that older\n versions raise warnings.\n\n This manager does not apply for cases where the old code returns\n different values.\n \"\"\"\n def __init__(self, min_numpy_version, warning_type, num_warnings):\n if NumpyVersion(np.__version__) < min_numpy_version:\n self.numpy_is_old = True\n self.warning_type = warning_type\n self.num_warnings = num_warnings\n self.delegate = warnings.catch_warnings(record = True)\n else:\n self.numpy_is_old = False\n\n def __enter__(self):\n if self.numpy_is_old:\n self.warn_list = self.delegate.__enter__()\n warnings.simplefilter(\"always\")\n return None\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.numpy_is_old:\n self.delegate.__exit__(exc_type, exc_value, traceback)\n _check_warnings(self.warn_list, self.warning_type, self.num_warnings)\n\n\ndef _check_warnings(warn_list, expected_type, expected_len):\n \"\"\"\n Checks that all of the warnings from a list returned by\n `warnings.catch_all(record=True)` are of the required type and that the list\n contains expected number of warnings.\n \"\"\"\n assert_equal(len(warn_list), expected_len, \"number of warnings\")\n for warn_ in warn_list:\n assert_(warn_.category is expected_type)\n\n\nclass TestIQR(object):\n\n def test_basic(self):\n x = np.arange(8) * 0.5\n np.random.shuffle(x)\n assert_equal(stats.iqr(x), 1.75)\n\n def test_api(self):\n d = np.ones((5, 5))\n stats.iqr(d)\n stats.iqr(d, None)\n stats.iqr(d, 1)\n stats.iqr(d, (0, 1))\n stats.iqr(d, None, (10, 90))\n stats.iqr(d, None, (30, 20), 'raw')\n stats.iqr(d, None, (25, 75), 1.5, 'propagate')\n if NumpyVersion(np.__version__) >= '1.9.0a':\n stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear')\n stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True)\n\n def test_empty(self):\n assert_equal(stats.iqr([]), np.nan)\n assert_equal(stats.iqr(np.arange(0)), np.nan)\n\n def test_constant(self):\n # Constant array always gives 0\n x = np.ones((7, 4))\n assert_equal(stats.iqr(x), 0.0)\n assert_array_equal(stats.iqr(x, axis=0), np.zeros(4))\n assert_array_equal(stats.iqr(x, axis=1), np.zeros(7))\n # Even for older versions, 'linear' does not raise a warning\n with _numpy_version_warn_context_mgr('1.9.0a', RuntimeWarning, 4):\n assert_equal(stats.iqr(x, interpolation='linear'), 0.0)\n assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0)\n assert_equal(stats.iqr(x, interpolation='nearest'), 0.0)\n assert_equal(stats.iqr(x, interpolation='lower'), 0.0)\n assert_equal(stats.iqr(x, interpolation='higher'), 0.0)\n\n # 0 only along constant dimensions\n # This also tests much of `axis`\n y = np.ones((4, 5, 6)) * np.arange(6)\n assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6)))\n assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6)))\n assert_array_equal(stats.iqr(y, axis=2), 2.5 * np.ones((4, 5)))\n assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6))\n assert_array_equal(stats.iqr(y, axis=(0, 2)), 3. * np.ones(5))\n assert_array_equal(stats.iqr(y, axis=(1, 2)), 3. * np.ones(4))\n\n def test_scalarlike(self):\n x = np.arange(1) + 7.0\n assert_equal(stats.iqr(x[0]), 0.0)\n assert_equal(stats.iqr(x), 0.0)\n if NumpyVersion(np.__version__) >= '1.9.0a':\n assert_array_equal(stats.iqr(x, keepdims=True), [0.0])\n else:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n assert_array_equal(stats.iqr(x, keepdims=True), 0.0)\n _check_warnings(w, RuntimeWarning, 1)\n\n def test_2D(self):\n x = np.arange(15).reshape((3, 5))\n assert_equal(stats.iqr(x), 7.0)\n assert_array_equal(stats.iqr(x, axis=0), 5. * np.ones(5))\n assert_array_equal(stats.iqr(x, axis=1), 2. * np.ones(3))\n assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0)\n assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0)\n\n def test_axis(self):\n # The `axis` keyword is also put through its paces in `test_keepdims`.\n o = np.random.normal(size=(71, 23))\n x = np.dstack([o] * 10) # x.shape = (71, 23, 10)\n q = stats.iqr(o)\n\n assert_equal(stats.iqr(x, axis=(0, 1)), q)\n x = np.rollaxis(x, -1, 0) # x.shape = (10, 71, 23)\n assert_equal(stats.iqr(x, axis=(2, 1)), q)\n x = x.swapaxes(0, 1) # x.shape = (71, 10, 23)\n assert_equal(stats.iqr(x, axis=(0, 2)), q)\n x = x.swapaxes(0, 1) # x.shape = (10, 71, 23)\n\n assert_equal(stats.iqr(x, axis=(0, 1, 2)),\n stats.iqr(x, axis=None))\n assert_equal(stats.iqr(x, axis=(0,)),\n stats.iqr(x, axis=0))\n\n d = np.arange(3 * 5 * 7 * 11)\n # Older versions of numpy only shuffle along axis=0.\n # Not sure about newer, don't care.\n np.random.shuffle(d)\n d = d.reshape((3, 5, 7, 11))\n assert_equal(stats.iqr(d, axis=(0, 1, 2))[0],\n stats.iqr(d[:,:,:, 0].ravel()))\n assert_equal(stats.iqr(d, axis=(0, 1, 3))[1],\n stats.iqr(d[:,:, 1,:].ravel()))\n assert_equal(stats.iqr(d, axis=(3, 1, -4))[2],\n stats.iqr(d[:,:, 2,:].ravel()))\n assert_equal(stats.iqr(d, axis=(3, 1, 2))[2],\n stats.iqr(d[2,:,:,:].ravel()))\n assert_equal(stats.iqr(d, axis=(3, 2))[2, 1],\n stats.iqr(d[2, 1,:,:].ravel()))\n assert_equal(stats.iqr(d, axis=(1, -2))[2, 1],\n stats.iqr(d[2, :, :, 1].ravel()))\n assert_equal(stats.iqr(d, axis=(1, 3))[2, 2],\n stats.iqr(d[2, :, 2,:].ravel()))\n\n if NumpyVersion(np.__version__) >= '1.9.0a':\n assert_raises(IndexError, stats.iqr, d, axis=4)\n else:\n assert_raises(ValueError, stats.iqr, d, axis=4)\n assert_raises(ValueError, stats.iqr, d, axis=(0, 0))\n\n def test_rng(self):\n x = np.arange(5)\n assert_equal(stats.iqr(x), 2)\n assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5)\n assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5)\n assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6) # 3-1.4\n\n assert_raises(ValueError, stats.iqr, x, rng=(0, 101))\n assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25))\n assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60))\n\n def test_interpolation(self):\n x = np.arange(5)\n y = np.arange(4)\n # Default\n assert_equal(stats.iqr(x), 2)\n assert_equal(stats.iqr(y), 1.5)\n if NumpyVersion(np.__version__) >= '1.9.0a':\n # Linear\n assert_equal(stats.iqr(x, interpolation='linear'), 2)\n assert_equal(stats.iqr(y, interpolation='linear'), 1.5)\n # Higher\n assert_equal(stats.iqr(x, interpolation='higher'), 2)\n assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3)\n assert_equal(stats.iqr(y, interpolation='higher'), 2)\n # Lower (will generally, but not always be the same as higher)\n assert_equal(stats.iqr(x, interpolation='lower'), 2)\n assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2)\n assert_equal(stats.iqr(y, interpolation='lower'), 2)\n # Nearest\n assert_equal(stats.iqr(x, interpolation='nearest'), 2)\n assert_equal(stats.iqr(y, interpolation='nearest'), 1)\n # Midpoint\n if NumpyVersion(np.__version__) >= '1.11.0a':\n assert_equal(stats.iqr(x, interpolation='midpoint'), 2)\n assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5)\n assert_equal(stats.iqr(y, interpolation='midpoint'), 2)\n else:\n # midpoint did not work correctly before numpy 1.11.0\n assert_equal(stats.iqr(x, interpolation='midpoint'), 2)\n assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2)\n assert_equal(stats.iqr(y, interpolation='midpoint'), 2)\n else:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n # Linear\n assert_equal(stats.iqr(x, interpolation='linear'), 2)\n assert_equal(stats.iqr(y, interpolation='linear'), 1.5)\n # Higher\n assert_equal(stats.iqr(x, interpolation='higher'), 2)\n assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 2.2)\n assert_equal(stats.iqr(y, interpolation='higher'), 1.5)\n # Lower\n assert_equal(stats.iqr(x, interpolation='lower'), 2)\n assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2.2)\n assert_equal(stats.iqr(y, interpolation='lower'), 1.5)\n # Nearest\n assert_equal(stats.iqr(x, interpolation='nearest'), 2)\n assert_equal(stats.iqr(y, interpolation='nearest'), 1.5)\n # Midpoint\n assert_equal(stats.iqr(x, interpolation='midpoint'), 2)\n assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.2)\n assert_equal(stats.iqr(y, interpolation='midpoint'), 1.5)\n _check_warnings(w, RuntimeWarning, 11)\n\n if NumpyVersion(np.__version__) >= '1.9.0a':\n assert_raises(ValueError, stats.iqr, x, interpolation='foobar')\n else:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n assert_equal(stats.iqr(x, interpolation='foobar'), 2)\n _check_warnings(w, RuntimeWarning, 1)\n\n def test_keepdims(self):\n numpy_version = NumpyVersion(np.__version__)\n\n # Also tests most of `axis`\n x = np.ones((3, 5, 7, 11))\n assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ())\n assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11))\n assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11))\n assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7))\n assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11))\n assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ())\n assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,))\n\n if numpy_version >= '1.9.0a':\n assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1))\n assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11))\n assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11))\n assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1))\n assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11))\n assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1))\n assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1))\n else:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, ())\n assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 11))\n assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (7, 11))\n assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (5, 7))\n assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 7, 11))\n assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, ())\n assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (7,))\n _check_warnings(w, RuntimeWarning, 7)\n\n def test_nanpolicy(self):\n numpy_version = NumpyVersion(np.__version__)\n x = np.arange(15.0).reshape((3, 5))\n\n # No NaNs\n assert_equal(stats.iqr(x, nan_policy='propagate'), 7)\n assert_equal(stats.iqr(x, nan_policy='omit'), 7)\n assert_equal(stats.iqr(x, nan_policy='raise'), 7)\n\n # Yes NaNs\n x[1, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n if numpy_version < '1.10.0a':\n # Fails over to mishmash of omit/propagate, but mostly omit\n # The first case showcases the \"incorrect\" behavior of np.percentile\n assert_equal(stats.iqr(x, nan_policy='propagate'), 8)\n assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])\n if numpy_version < '1.9.0a':\n assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2])\n else:\n # some fixes to percentile nan handling in 1.9\n assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])\n _check_warnings(w, RuntimeWarning, 3)\n else:\n assert_equal(stats.iqr(x, nan_policy='propagate'), np.nan)\n assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])\n assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n if numpy_version < '1.9.0a':\n # Fails over to mishmash of omit/propagate, but mostly omit\n assert_equal(stats.iqr(x, nan_policy='omit'), 8)\n assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), [5, 5, np.nan, 5, 5])\n assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 3, 2])\n _check_warnings(w, RuntimeWarning, 3)\n else:\n assert_equal(stats.iqr(x, nan_policy='omit'), 7.5)\n assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), 5 * np.ones(5))\n assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2])\n\n assert_raises(ValueError, stats.iqr, x, nan_policy='raise')\n assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise')\n assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise')\n\n # Bad policy\n assert_raises(ValueError, stats.iqr, x, nan_policy='barfood')\n\n def test_scale(self):\n numpy_version = NumpyVersion(np.__version__)\n x = np.arange(15.0).reshape((3, 5))\n\n # No NaNs\n assert_equal(stats.iqr(x, scale='raw'), 7)\n assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795)\n assert_equal(stats.iqr(x, scale=2.0), 3.5)\n\n # Yes NaNs\n x[1, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n if numpy_version < '1.10.0a':\n # Fails over to mishmash of omit/propagate, but mostly omit\n assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), 8)\n assert_almost_equal(stats.iqr(x, scale='normal',\n nan_policy='propagate'),\n 8 / 1.3489795)\n assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), 4)\n # axis=1 chosen to show behavior with both nans and without\n if numpy_version < '1.9.0a':\n assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2])\n assert_almost_equal(stats.iqr(x, axis=1, scale='normal',\n nan_policy='propagate'),\n np.array([2, 3, 2]) / 1.3489795)\n assert_equal(stats.iqr(x, axis=1, scale=2.0,\n nan_policy='propagate'), [1, 1.5, 1])\n else:\n # some fixes to percentile nan handling in 1.9\n assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])\n assert_almost_equal(stats.iqr(x, axis=1, scale='normal',\n nan_policy='propagate'),\n np.array([2, np.nan, 2]) / 1.3489795)\n assert_equal(stats.iqr(x, axis=1, scale=2.0,\n nan_policy='propagate'), [1, np.nan, 1])\n _check_warnings(w, RuntimeWarning, 6)\n else:\n assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), np.nan)\n assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan)\n assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan)\n # axis=1 chosen to show behavior with both nans and without\n assert_equal(stats.iqr(x, axis=1, scale='raw',\n nan_policy='propagate'), [2, np.nan, 2])\n assert_almost_equal(stats.iqr(x, axis=1, scale='normal',\n nan_policy='propagate'),\n np.array([2, np.nan, 2]) / 1.3489795)\n assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'),\n [1, np.nan, 1])\n if numpy_version <= '1.16.6':\n _check_warnings(w, RuntimeWarning, 6)\n else:\n _check_warnings(w, RuntimeWarning, 0)\n\n if numpy_version < '1.9.0a':\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n # Fails over to mishmash of omit/propagate, but mostly omit\n assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 8)\n assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),\n 8 / 1.3489795)\n assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 4)\n _check_warnings(w, RuntimeWarning, 3)\n else:\n assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 7.5)\n assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),\n 7.5 / 1.3489795)\n assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75)\n\n # Bad scale\n assert_raises(ValueError, stats.iqr, x, scale='foobar')\n\n\nclass TestMoments(object):\n \"\"\"\n Comparison numbers are found using R v.1.5.1\n note that length(testcase) = 4\n testmathworks comes from documentation for the\n Statistics Toolbox for Matlab and can be found at both\n https://www.mathworks.com/help/stats/kurtosis.html\n https://www.mathworks.com/help/stats/skewness.html\n Note that both test cases came from here.\n \"\"\"\n testcase = [1,2,3,4]\n scalar_testcase = 4.\n np.random.seed(1234)\n testcase_moment_accuracy = np.random.rand(42)\n testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965]\n\n def test_moment(self):\n # mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))\n y = stats.moment(self.scalar_testcase)\n assert_approx_equal(y, 0.0)\n y = stats.moment(self.testcase, 0)\n assert_approx_equal(y, 1.0)\n y = stats.moment(self.testcase, 1)\n assert_approx_equal(y, 0.0, 10)\n y = stats.moment(self.testcase, 2)\n assert_approx_equal(y, 1.25)\n y = stats.moment(self.testcase, 3)\n assert_approx_equal(y, 0.0)\n y = stats.moment(self.testcase, 4)\n assert_approx_equal(y, 2.5625)\n\n # check array_like input for moment\n y = stats.moment(self.testcase, [1, 2, 3, 4])\n assert_allclose(y, [0, 1.25, 0, 2.5625])\n\n # check moment input consists only of integers\n y = stats.moment(self.testcase, 0.0)\n assert_approx_equal(y, 1.0)\n assert_raises(ValueError, stats.moment, self.testcase, 1.2)\n y = stats.moment(self.testcase, [1.0, 2, 3, 4.0])\n assert_allclose(y, [0, 1.25, 0, 2.5625])\n\n # test empty input\n y = stats.moment([])\n assert_equal(y, np.nan)\n\n x = np.arange(10.)\n x[9] = np.nan\n assert_equal(stats.moment(x, 2), np.nan)\n assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0)\n assert_raises(ValueError, stats.moment, x, nan_policy='raise')\n assert_raises(ValueError, stats.moment, x, nan_policy='foobar')\n\n def test_moment_propagate_nan(self):\n # Check that the shape of the result is the same for inputs\n # with and without nans, cf gh-5817\n a = np.arange(8).reshape(2, -1).astype(float)\n a[1, 0] = np.nan\n mm = stats.moment(a, 2, axis=1, nan_policy=\"propagate\")\n np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15)\n\n def test_variation(self):\n # variation = samplestd / mean\n y = stats.variation(self.scalar_testcase)\n assert_approx_equal(y, 0.0)\n y = stats.variation(self.testcase)\n assert_approx_equal(y, 0.44721359549996, 10)\n\n x = np.arange(10.)\n x[9] = np.nan\n assert_equal(stats.variation(x), np.nan)\n assert_almost_equal(stats.variation(x, nan_policy='omit'),\n 0.6454972243679028)\n assert_raises(ValueError, stats.variation, x, nan_policy='raise')\n assert_raises(ValueError, stats.variation, x, nan_policy='foobar')\n\n def test_variation_propagate_nan(self):\n # Check that the shape of the result is the same for inputs\n # with and without nans, cf gh-5817\n a = np.arange(8).reshape(2, -1).astype(float)\n a[1, 0] = np.nan\n vv = stats.variation(a, axis=1, nan_policy=\"propagate\")\n np.testing.assert_allclose(vv, [0.7453559924999299, np.nan], atol=1e-15)\n\n def test_skewness(self):\n # Scalar test case\n y = stats.skew(self.scalar_testcase)\n assert_approx_equal(y, 0.0)\n # sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) /\n # ((sqrt(var(testmathworks)*4/5))**3)/5\n y = stats.skew(self.testmathworks)\n assert_approx_equal(y, -0.29322304336607, 10)\n y = stats.skew(self.testmathworks, bias=0)\n assert_approx_equal(y, -0.437111105023940, 10)\n y = stats.skew(self.testcase)\n assert_approx_equal(y, 0.0, 10)\n\n x = np.arange(10.)\n x[9] = np.nan\n with np.errstate(invalid='ignore'):\n assert_equal(stats.skew(x), np.nan)\n assert_equal(stats.skew(x, nan_policy='omit'), 0.)\n assert_raises(ValueError, stats.skew, x, nan_policy='raise')\n assert_raises(ValueError, stats.skew, x, nan_policy='foobar')\n\n def test_skewness_scalar(self):\n # `skew` must return a scalar for 1-dim input\n assert_equal(stats.skew(arange(10)), 0.0)\n\n def test_skew_propagate_nan(self):\n # Check that the shape of the result is the same for inputs\n # with and without nans, cf gh-5817\n a = np.arange(8).reshape(2, -1).astype(float)\n a[1, 0] = np.nan\n with np.errstate(invalid='ignore'):\n s = stats.skew(a, axis=1, nan_policy=\"propagate\")\n np.testing.assert_allclose(s, [0, np.nan], atol=1e-15)\n\n def test_kurtosis(self):\n # Scalar test case\n y = stats.kurtosis(self.scalar_testcase)\n assert_approx_equal(y, -3.0)\n # sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4\n # sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5\n # Set flags for axis = 0 and\n # fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab)\n y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)\n assert_approx_equal(y, 2.1658856802973, 10)\n\n # Note that MATLAB has confusing docs for the following case\n # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness\n # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)\n # The MATLAB docs imply that both should give Fisher's\n y = stats.kurtosis(self.testmathworks, fisher=0, bias=0)\n assert_approx_equal(y, 3.663542721189047, 10)\n y = stats.kurtosis(self.testcase, 0, 0)\n assert_approx_equal(y, 1.64)\n\n x = np.arange(10.)\n x[9] = np.nan\n assert_equal(stats.kurtosis(x), np.nan)\n assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000)\n assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise')\n assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar')\n\n def test_kurtosis_array_scalar(self):\n assert_equal(type(stats.kurtosis([1,2,3])), float)\n\n def test_kurtosis_propagate_nan(self):\n # Check that the shape of the result is the same for inputs\n # with and without nans, cf gh-5817\n a = np.arange(8).reshape(2, -1).astype(float)\n a[1, 0] = np.nan\n k = stats.kurtosis(a, axis=1, nan_policy=\"propagate\")\n np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15)\n\n def test_moment_accuracy(self):\n # 'moment' must have a small enough error compared to the slower\n # but very accurate numpy.power() implementation.\n tc_no_mean = self.testcase_moment_accuracy - \\\n np.mean(self.testcase_moment_accuracy)\n assert_allclose(np.power(tc_no_mean, 42).mean(),\n stats.moment(self.testcase_moment_accuracy, 42))\n\n\nclass TestStudentTest(object):\n X1 = np.array([-1, 0, 1])\n X2 = np.array([0, 1, 2])\n T1_0 = 0\n P1_0 = 1\n T1_1 = -1.732051\n P1_1 = 0.2254033\n T1_2 = -3.464102\n P1_2 = 0.0741799\n T2_0 = 1.732051\n P2_0 = 0.2254033\n\n def test_onesample(self):\n with suppress_warnings() as sup, np.errstate(invalid=\"ignore\"):\n sup.filter(RuntimeWarning, \"Degrees of freedom <= 0 for slice\")\n t, p = stats.ttest_1samp(4., 3.)\n assert_(np.isnan(t))\n assert_(np.isnan(p))\n\n t, p = stats.ttest_1samp(self.X1, 0)\n\n assert_array_almost_equal(t, self.T1_0)\n assert_array_almost_equal(p, self.P1_0)\n\n res = stats.ttest_1samp(self.X1, 0)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n t, p = stats.ttest_1samp(self.X2, 0)\n\n assert_array_almost_equal(t, self.T2_0)\n assert_array_almost_equal(p, self.P2_0)\n\n t, p = stats.ttest_1samp(self.X1, 1)\n\n assert_array_almost_equal(t, self.T1_1)\n assert_array_almost_equal(p, self.P1_1)\n\n t, p = stats.ttest_1samp(self.X1, 2)\n\n assert_array_almost_equal(t, self.T1_2)\n assert_array_almost_equal(p, self.P1_2)\n\n # check nan policy\n np.random.seed(7654567)\n x = stats.norm.rvs(loc=5, scale=10, size=51)\n x[50] = np.nan\n with np.errstate(invalid=\"ignore\"):\n assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan))\n\n assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'),\n (-1.6412624074367159, 0.107147027334048005))\n assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise')\n assert_raises(ValueError, stats.ttest_1samp, x, 5.0,\n nan_policy='foobar')\n\n\ndef test_percentileofscore():\n pcos = stats.percentileofscore\n\n assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0)\n\n for (kind, result) in [('mean', 35.0),\n ('strict', 30.0),\n ('weak', 40.0)]:\n assert_equal(pcos(np.arange(10) + 1, 4, kind=kind), result)\n\n # multiple - 2\n for (kind, result) in [('rank', 45.0),\n ('strict', 30.0),\n ('weak', 50.0),\n ('mean', 40.0)]:\n assert_equal(pcos([1,2,3,4,4,5,6,7,8,9], 4, kind=kind), result)\n\n # multiple - 3\n assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0)\n for (kind, result) in [('rank', 50.0),\n ('mean', 45.0),\n ('strict', 30.0),\n ('weak', 60.0)]:\n\n assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4, kind=kind), result)\n\n # missing\n for kind in ('rank', 'mean', 'strict', 'weak'):\n assert_equal(pcos([1,2,3,5,6,7,8,9,10,11], 4, kind=kind), 30)\n\n # larger numbers\n for (kind, result) in [('mean', 35.0),\n ('strict', 30.0),\n ('weak', 40.0)]:\n assert_equal(\n pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40,\n kind=kind), result)\n\n for (kind, result) in [('mean', 45.0),\n ('strict', 30.0),\n ('weak', 60.0)]:\n assert_equal(\n pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80],\n 40, kind=kind), result)\n\n for kind in ('rank', 'mean', 'strict', 'weak'):\n assert_equal(\n pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],\n 40, kind=kind), 30.0)\n\n # boundaries\n for (kind, result) in [('rank', 10.0),\n ('mean', 5.0),\n ('strict', 0.0),\n ('weak', 10.0)]:\n assert_equal(\n pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],\n 10, kind=kind), result)\n\n for (kind, result) in [('rank', 100.0),\n ('mean', 95.0),\n ('strict', 90.0),\n ('weak', 100.0)]:\n assert_equal(\n pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],\n 110, kind=kind), result)\n\n # out of bounds\n for (kind, score, result) in [('rank', 200, 100.0),\n ('mean', 200, 100.0),\n ('mean', 0, 0.0)]:\n assert_equal(\n pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],\n score, kind=kind), result)\n\n assert_raises(ValueError, pcos, [1, 2, 3, 3, 4], 3, kind='unrecognized')\n\n\nPowerDivCase = namedtuple('Case', ['f_obs', 'f_exp', 'ddof', 'axis',\n 'chi2', # Pearson's\n 'log', # G-test (log-likelihood)\n 'mod_log', # Modified log-likelihood\n 'cr', # Cressie-Read (lambda=2/3)\n ])\n\n# The details of the first two elements in power_div_1d_cases are used\n# in a test in TestPowerDivergence. Check that code before making\n# any changes here.\npower_div_1d_cases = [\n # Use the default f_exp.\n PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None,\n chi2=4,\n log=2*(4*np.log(4/8) + 12*np.log(12/8)),\n mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),\n cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),\n # Give a non-uniform f_exp.\n PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None,\n chi2=24,\n log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)),\n mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)),\n cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) +\n 8*((8/2)**(2/3) - 1))/(5/9)),\n # f_exp is a scalar.\n PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None,\n chi2=4,\n log=2*(4*np.log(4/8) + 12*np.log(12/8)),\n mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),\n cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),\n # f_exp equal to f_obs.\n PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0,\n chi2=0, log=0, mod_log=0, cr=0),\n]\n\n\npower_div_empty_cases = [\n # Shape is (0,)--a data set with length 0. The computed\n # test statistic should be 0.\n PowerDivCase(f_obs=[],\n f_exp=None, ddof=0, axis=0,\n chi2=0, log=0, mod_log=0, cr=0),\n # Shape is (0, 3). This is 3 data sets, but each data set has\n # length 0, so the computed test statistic should be [0, 0, 0].\n PowerDivCase(f_obs=np.array([[],[],[]]).T,\n f_exp=None, ddof=0, axis=0,\n chi2=[0, 0, 0],\n log=[0, 0, 0],\n mod_log=[0, 0, 0],\n cr=[0, 0, 0]),\n # Shape is (3, 0). This represents an empty collection of\n # data sets in which each data set has length 3. The test\n # statistic should be an empty array.\n PowerDivCase(f_obs=np.array([[],[],[]]),\n f_exp=None, ddof=0, axis=0,\n chi2=[],\n log=[],\n mod_log=[],\n cr=[]),\n]\n\n\nclass TestPowerDivergence(object):\n\n def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_,\n expected_stat):\n f_obs = np.asarray(f_obs)\n if axis is None:\n num_obs = f_obs.size\n else:\n b = np.broadcast(f_obs, f_exp)\n num_obs = b.shape[axis]\n\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"Mean of empty slice\")\n stat, p = stats.power_divergence(\n f_obs=f_obs, f_exp=f_exp, ddof=ddof,\n axis=axis, lambda_=lambda_)\n assert_allclose(stat, expected_stat)\n\n if lambda_ == 1 or lambda_ == \"pearson\":\n # Also test stats.chisquare.\n stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof,\n axis=axis)\n assert_allclose(stat, expected_stat)\n\n ddof = np.asarray(ddof)\n expected_p = stats.distributions.chi2.sf(expected_stat,\n num_obs - 1 - ddof)\n assert_allclose(p, expected_p)\n\n def test_basic(self):\n for case in power_div_1d_cases:\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n None, case.chi2)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n \"pearson\", case.chi2)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n 1, case.chi2)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n \"log-likelihood\", case.log)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n \"mod-log-likelihood\", case.mod_log)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n \"cressie-read\", case.cr)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n 2/3, case.cr)\n\n def test_basic_masked(self):\n for case in power_div_1d_cases:\n mobs = np.ma.array(case.f_obs)\n self.check_power_divergence(\n mobs, case.f_exp, case.ddof, case.axis,\n None, case.chi2)\n self.check_power_divergence(\n mobs, case.f_exp, case.ddof, case.axis,\n \"pearson\", case.chi2)\n self.check_power_divergence(\n mobs, case.f_exp, case.ddof, case.axis,\n 1, case.chi2)\n self.check_power_divergence(\n mobs, case.f_exp, case.ddof, case.axis,\n \"log-likelihood\", case.log)\n self.check_power_divergence(\n mobs, case.f_exp, case.ddof, case.axis,\n \"mod-log-likelihood\", case.mod_log)\n self.check_power_divergence(\n mobs, case.f_exp, case.ddof, case.axis,\n \"cressie-read\", case.cr)\n self.check_power_divergence(\n mobs, case.f_exp, case.ddof, case.axis,\n 2/3, case.cr)\n\n def test_axis(self):\n case0 = power_div_1d_cases[0]\n case1 = power_div_1d_cases[1]\n f_obs = np.vstack((case0.f_obs, case1.f_obs))\n f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),\n case1.f_exp))\n # Check the four computational code paths in power_divergence\n # using a 2D array with axis=1.\n self.check_power_divergence(\n f_obs, f_exp, 0, 1,\n \"pearson\", [case0.chi2, case1.chi2])\n self.check_power_divergence(\n f_obs, f_exp, 0, 1,\n \"log-likelihood\", [case0.log, case1.log])\n self.check_power_divergence(\n f_obs, f_exp, 0, 1,\n \"mod-log-likelihood\", [case0.mod_log, case1.mod_log])\n self.check_power_divergence(\n f_obs, f_exp, 0, 1,\n \"cressie-read\", [case0.cr, case1.cr])\n # Reshape case0.f_obs to shape (2,2), and use axis=None.\n # The result should be the same.\n self.check_power_divergence(\n np.array(case0.f_obs).reshape(2, 2), None, 0, None,\n \"pearson\", case0.chi2)\n\n def test_ddof_broadcasting(self):\n # Test that ddof broadcasts correctly.\n # ddof does not affect the test statistic. It is broadcast\n # with the computed test statistic for the computation of\n # the p value.\n\n case0 = power_div_1d_cases[0]\n case1 = power_div_1d_cases[1]\n # Create 4x2 arrays of observed and expected frequencies.\n f_obs = np.vstack((case0.f_obs, case1.f_obs)).T\n f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),\n case1.f_exp)).T\n\n expected_chi2 = [case0.chi2, case1.chi2]\n\n # ddof has shape (2, 1). This is broadcast with the computed\n # statistic, so p will have shape (2,2).\n ddof = np.array([[0], [1]])\n\n stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof)\n assert_allclose(stat, expected_chi2)\n\n # Compute the p values separately, passing in scalars for ddof.\n stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0])\n stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0])\n\n assert_array_equal(p, np.vstack((p0, p1)))\n\n def test_empty_cases(self):\n with warnings.catch_warnings():\n for case in power_div_empty_cases:\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n \"pearson\", case.chi2)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n \"log-likelihood\", case.log)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n \"mod-log-likelihood\", case.mod_log)\n self.check_power_divergence(\n case.f_obs, case.f_exp, case.ddof, case.axis,\n \"cressie-read\", case.cr)\n\n def test_power_divergence_result_attributes(self):\n f_obs = power_div_1d_cases[0].f_obs\n f_exp = power_div_1d_cases[0].f_exp\n ddof = power_div_1d_cases[0].ddof\n axis = power_div_1d_cases[0].axis\n\n res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof,\n axis=axis, lambda_=\"pearson\")\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n\ndef test_chisquare_masked_arrays():\n # Test masked arrays.\n obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T\n mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T\n mobs = np.ma.masked_array(obs, mask)\n expected_chisq = np.array([24.0, 0.5])\n expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)),\n 2*(3*np.log(0.75) + 5*np.log(1.25))])\n\n chi2 = stats.distributions.chi2\n\n chisq, p = stats.chisquare(mobs)\n mat.assert_array_equal(chisq, expected_chisq)\n mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,\n mobs.count(axis=0) - 1))\n\n g, p = stats.power_divergence(mobs, lambda_='log-likelihood')\n mat.assert_array_almost_equal(g, expected_g, decimal=15)\n mat.assert_array_almost_equal(p, chi2.sf(expected_g,\n mobs.count(axis=0) - 1))\n\n chisq, p = stats.chisquare(mobs.T, axis=1)\n mat.assert_array_equal(chisq, expected_chisq)\n mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,\n mobs.T.count(axis=1) - 1))\n g, p = stats.power_divergence(mobs.T, axis=1, lambda_=\"log-likelihood\")\n mat.assert_array_almost_equal(g, expected_g, decimal=15)\n mat.assert_array_almost_equal(p, chi2.sf(expected_g,\n mobs.count(axis=0) - 1))\n\n obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0])\n exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1])\n chi2, p = stats.chisquare(obs1, f_exp=exp1)\n # Because of the mask at index 3 of obs1 and at index 4 of exp1,\n # only the first three elements are included in the calculation\n # of the statistic.\n mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8)\n\n # When axis=None, the two values should have type np.float64.\n chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None)\n assert_(isinstance(chisq, np.float64))\n assert_(isinstance(p, np.float64))\n assert_equal(chisq, 1.0)\n assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2))\n\n # Empty arrays:\n # A data set with length 0 returns a masked scalar.\n with np.errstate(invalid='ignore'):\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"Mean of empty slice\")\n chisq, p = stats.chisquare(np.ma.array([]))\n assert_(isinstance(chisq, np.ma.MaskedArray))\n assert_equal(chisq.shape, ())\n assert_(chisq.mask)\n\n empty3 = np.ma.array([[],[],[]])\n\n # empty3 is a collection of 0 data sets (whose lengths would be 3, if\n # there were any), so the return value is an array with length 0.\n chisq, p = stats.chisquare(empty3)\n assert_(isinstance(chisq, np.ma.MaskedArray))\n mat.assert_array_equal(chisq, [])\n\n # empty3.T is an array containing 3 data sets, each with length 0,\n # so an array of size (3,) is returned, with all values masked.\n with np.errstate(invalid='ignore'):\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"Mean of empty slice\")\n chisq, p = stats.chisquare(empty3.T)\n\n assert_(isinstance(chisq, np.ma.MaskedArray))\n assert_equal(chisq.shape, (3,))\n assert_(np.all(chisq.mask))\n\n\ndef test_power_divergence_against_cressie_read_data():\n # Test stats.power_divergence against tables 4 and 5 from\n # Cressie and Read, \"Multimonial Goodness-of-Fit Tests\",\n # J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464.\n # This tests the calculation for several values of lambda.\n\n # `table4` holds just the second and third columns from Table 4.\n table4 = np.array([\n # observed, expected,\n 15, 15.171,\n 11, 13.952,\n 14, 12.831,\n 17, 11.800,\n 5, 10.852,\n 11, 9.9796,\n 10, 9.1777,\n 4, 8.4402,\n 8, 7.7620,\n 10, 7.1383,\n 7, 6.5647,\n 9, 6.0371,\n 11, 5.5520,\n 3, 5.1059,\n 6, 4.6956,\n 1, 4.3183,\n 1, 3.9713,\n 4, 3.6522,\n ]).reshape(-1, 2)\n table5 = np.array([\n # lambda, statistic\n -10.0, 72.2e3,\n -5.0, 28.9e1,\n -3.0, 65.6,\n -2.0, 40.6,\n -1.5, 34.0,\n -1.0, 29.5,\n -0.5, 26.5,\n 0.0, 24.6,\n 0.5, 23.4,\n 0.67, 23.1,\n 1.0, 22.7,\n 1.5, 22.6,\n 2.0, 22.9,\n 3.0, 24.8,\n 5.0, 35.5,\n 10.0, 21.4e1,\n ]).reshape(-1, 2)\n\n for lambda_, expected_stat in table5:\n stat, p = stats.power_divergence(table4[:,0], table4[:,1],\n lambda_=lambda_)\n assert_allclose(stat, expected_stat, rtol=5e-3)\n\n\ndef test_friedmanchisquare():\n # see ticket:113\n # verified with matlab and R\n # From Demsar \"Statistical Comparisons of Classifiers over Multiple Data Sets\"\n # 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28)\n x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583,\n 0.775, 1.0, 0.94, 0.619, 0.972, 0.957]),\n array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583,\n 0.838, 1.0, 0.962, 0.666, 0.981, 0.978]),\n array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563,\n 0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]),\n array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625,\n 0.875, 1.0, 0.962, 0.669, 0.975, 0.970])]\n\n # From \"Bioestadistica para las ciencias de la salud\" Xf=18.95 p<0.001:\n x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]),\n array([2,2,1,2,3,1,2,3,2,1,1,3]),\n array([2,4,3,3,4,3,3,4,4,1,2,1]),\n array([3,5,4,3,4,4,3,3,3,4,4,4])]\n\n # From Jerrorl H. Zar, \"Biostatistical Analysis\"(example 12.6), Xf=10.68, 0.005 < p < 0.01:\n # Probability from this example is inexact using Chisquare approximation of Friedman Chisquare.\n x3 = [array([7.0,9.9,8.5,5.1,10.3]),\n array([5.3,5.7,4.7,3.5,7.7]),\n array([4.9,7.6,5.5,2.8,8.4]),\n array([8.8,8.9,8.1,3.3,9.1])]\n\n assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),\n (10.2283464566929, 0.0167215803284414))\n assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),\n (18.9428571428571, 0.000280938375189499))\n assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),\n (10.68, 0.0135882729582176))\n assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1])\n\n # test for namedtuple attribute results\n attributes = ('statistic', 'pvalue')\n res = stats.friedmanchisquare(*x1)\n check_named_results(res, attributes)\n\n # test using mstats\n assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1],\n x1[2], x1[3]),\n (10.2283464566929, 0.0167215803284414))\n # the following fails\n # assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),\n # (18.9428571428571, 0.000280938375189499))\n assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1],\n x3[2], x3[3]),\n (10.68, 0.0135882729582176))\n assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1])\n\n\ndef test_kstest():\n # from numpy.testing import assert_almost_equal\n\n # comparing with values from R\n x = np.linspace(-1,1,9)\n D,p = stats.kstest(x,'norm')\n assert_almost_equal(D, 0.15865525393145705, 12)\n assert_almost_equal(p, 0.95164069201518386, 1)\n\n x = np.linspace(-15,15,9)\n D,p = stats.kstest(x,'norm')\n assert_almost_equal(D, 0.44435602715924361, 15)\n assert_almost_equal(p, 0.038850140086788665, 8)\n\n # test for namedtuple attribute results\n attributes = ('statistic', 'pvalue')\n res = stats.kstest(x, 'norm')\n check_named_results(res, attributes)\n\n # the following tests rely on deterministicaly replicated rvs\n np.random.seed(987654321)\n x = stats.norm.rvs(loc=0.2, size=100)\n D,p = stats.kstest(x, 'norm', mode='asymp')\n assert_almost_equal(D, 0.12464329735846891, 15)\n assert_almost_equal(p, 0.089444888711820769, 15)\n assert_almost_equal(np.array(stats.kstest(x, 'norm', mode='asymp')),\n np.array((0.12464329735846891, 0.089444888711820769)), 15)\n assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='less')),\n np.array((0.12464329735846891, 0.040989164077641749)), 15)\n # this 'greater' test fails with precision of decimal=14\n assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='greater')),\n np.array((0.0072115233216310994, 0.98531158590396228)), 12)\n\n # missing: no test that uses *args\n\n\ndef test_ks_2samp():\n # exact small sample solution\n data1 = np.array([1.0,2.0])\n data2 = np.array([1.0,2.0,3.0])\n assert_almost_equal(np.array(stats.ks_2samp(data1+0.01,data2)),\n np.array((0.33333333333333337, 0.99062316386915694)))\n assert_almost_equal(np.array(stats.ks_2samp(data1-0.01,data2)),\n np.array((0.66666666666666674, 0.42490954988801982)))\n # these can also be verified graphically\n assert_almost_equal(\n np.array(stats.ks_2samp(np.linspace(1,100,100),\n np.linspace(1,100,100)+2+0.1)),\n np.array((0.030000000000000027, 0.99999999996005062)))\n assert_almost_equal(\n np.array(stats.ks_2samp(np.linspace(1,100,100),\n np.linspace(1,100,100)+2-0.1)),\n np.array((0.020000000000000018, 0.99999999999999933)))\n # these are just regression tests\n assert_almost_equal(\n np.array(stats.ks_2samp(np.linspace(1,100,100),\n np.linspace(1,100,110)+20.1)),\n np.array((0.21090909090909091, 0.015880386730710221)))\n assert_almost_equal(\n np.array(stats.ks_2samp(np.linspace(1,100,100),\n np.linspace(1,100,110)+20-0.1)),\n np.array((0.20818181818181825, 0.017981441789762638)))\n\n # test for namedtuple attribute results\n attributes = ('statistic', 'pvalue')\n res = stats.ks_2samp(data1 - 0.01, data2)\n check_named_results(res, attributes)\n\n\ndef test_ttest_rel():\n # regression test\n tr,pr = 0.81248591389165692, 0.41846234511362157\n tpr = ([tr,-tr],[pr,pr])\n\n rvs1 = np.linspace(1,100,100)\n rvs2 = np.linspace(1.01,99.989,100)\n rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])\n rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])\n\n t,p = stats.ttest_rel(rvs1, rvs2, axis=0)\n assert_array_almost_equal([t,p],(tr,pr))\n t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)\n assert_array_almost_equal([t,p],tpr)\n t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)\n assert_array_almost_equal([t,p],tpr)\n\n # test scalars\n with suppress_warnings() as sup, np.errstate(invalid=\"ignore\"):\n sup.filter(RuntimeWarning, \"Degrees of freedom <= 0 for slice\")\n t, p = stats.ttest_rel(4., 3.)\n assert_(np.isnan(t))\n assert_(np.isnan(p))\n\n # test for namedtuple attribute results\n attributes = ('statistic', 'pvalue')\n res = stats.ttest_rel(rvs1, rvs2, axis=0)\n check_named_results(res, attributes)\n\n # test on 3 dimensions\n rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])\n rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])\n t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)\n assert_array_almost_equal(np.abs(t), tr)\n assert_array_almost_equal(np.abs(p), pr)\n assert_equal(t.shape, (2, 3))\n\n t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)\n assert_array_almost_equal(np.abs(t), tr)\n assert_array_almost_equal(np.abs(p), pr)\n assert_equal(t.shape, (3, 2))\n\n # check nan policy\n np.random.seed(12345678)\n x = stats.norm.rvs(loc=5, scale=10, size=501)\n x[500] = np.nan\n y = (stats.norm.rvs(loc=5, scale=10, size=501) +\n stats.norm.rvs(scale=0.2, size=501))\n y[500] = np.nan\n\n with np.errstate(invalid=\"ignore\"):\n assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan))\n\n assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'),\n (0.25299925303978066, 0.8003729814201519))\n assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise')\n assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar')\n\n # test zero division problem\n t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1])\n assert_equal((np.abs(t), p), (np.inf, 0))\n with np.errstate(invalid=\"ignore\"):\n assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))\n\n # check that nan in input array result in nan output\n anan = np.array([[1, np.nan], [-1, 1]])\n assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))),\n ([0, np.nan], [1, np.nan]))\n\n # test incorrect input shape raise an error\n x = np.arange(24)\n assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)),\n x.reshape((2, 3, 4)))\n\n\ndef test_ttest_rel_nan_2nd_arg():\n # regression test for gh-6134: nans in the second arg were not handled\n x = [np.nan, 2.0, 3.0, 4.0]\n y = [1.0, 2.0, 1.0, 2.0]\n\n r1 = stats.ttest_rel(x, y, nan_policy='omit')\n r2 = stats.ttest_rel(y, x, nan_policy='omit')\n assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)\n assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)\n\n # NB: arguments are paired when NaNs are dropped\n r3 = stats.ttest_rel(y[1:], x[1:])\n assert_allclose(r2, r3, atol=1e-15)\n\n # .. and this is consistent with R. R code:\n # x = c(NA, 2.0, 3.0, 4.0)\n # y = c(1.0, 2.0, 1.0, 2.0)\n # t.test(x, y, paired=TRUE)\n assert_allclose(r2, (-2, 0.1835), atol=1e-4)\n\n\ndef _desc_stats(x1, x2, axis=0):\n def _stats(x, axis=0):\n x = np.asarray(x)\n mu = np.mean(x, axis=axis)\n std = np.std(x, axis=axis, ddof=1)\n nobs = x.shape[axis]\n return mu, std, nobs\n return _stats(x1, axis) + _stats(x2, axis)\n\n\ndef test_ttest_ind():\n # regression test\n tr = 1.0912746897927283\n pr = 0.27647818616351882\n tpr = ([tr,-tr],[pr,pr])\n\n rvs2 = np.linspace(1,100,100)\n rvs1 = np.linspace(5,105,100)\n rvs1_2D = np.array([rvs1, rvs2])\n rvs2_2D = np.array([rvs2, rvs1])\n\n t,p = stats.ttest_ind(rvs1, rvs2, axis=0)\n assert_array_almost_equal([t,p],(tr,pr))\n # test from_stats API\n assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,\n rvs2)),\n [t, p])\n t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)\n assert_array_almost_equal([t,p],tpr)\n args = _desc_stats(rvs1_2D.T, rvs2_2D.T)\n assert_array_almost_equal(stats.ttest_ind_from_stats(*args),\n [t, p])\n t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)\n assert_array_almost_equal([t,p],tpr)\n args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)\n assert_array_almost_equal(stats.ttest_ind_from_stats(*args),\n [t, p])\n\n # test scalars\n with suppress_warnings() as sup, np.errstate(invalid=\"ignore\"):\n sup.filter(RuntimeWarning, \"Degrees of freedom <= 0 for slice\")\n t, p = stats.ttest_ind(4., 3.)\n assert_(np.isnan(t))\n assert_(np.isnan(p))\n\n # test on 3 dimensions\n rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])\n rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])\n t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1)\n assert_almost_equal(np.abs(t), np.abs(tr))\n assert_array_almost_equal(np.abs(p), pr)\n assert_equal(t.shape, (2, 3))\n\n t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)\n assert_array_almost_equal(np.abs(t), np.abs(tr))\n assert_array_almost_equal(np.abs(p), pr)\n assert_equal(t.shape, (3, 2))\n\n # check nan policy\n np.random.seed(12345678)\n x = stats.norm.rvs(loc=5, scale=10, size=501)\n x[500] = np.nan\n y = stats.norm.rvs(loc=5, scale=10, size=500)\n\n with np.errstate(invalid=\"ignore\"):\n assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan))\n\n assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'),\n (0.24779670949091914, 0.80434267337517906))\n assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise')\n assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar')\n\n # test zero division problem\n t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1])\n assert_equal((np.abs(t), p), (np.inf, 0))\n\n with np.errstate(invalid=\"ignore\"):\n assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))\n\n # check that nan in input array result in nan output\n anan = np.array([[1, np.nan], [-1, 1]])\n assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))),\n ([0, np.nan], [1, np.nan]))\n\n\ndef test_ttest_ind_with_uneq_var():\n # check vs. R\n a = (1, 2, 3)\n b = (1.1, 2.9, 4.2)\n pr = 0.53619490753126731\n tr = -0.68649512735572582\n t, p = stats.ttest_ind(a, b, equal_var=False)\n assert_array_almost_equal([t,p], [tr, pr])\n # test from desc stats API\n assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),\n equal_var=False),\n [t, p])\n\n a = (1, 2, 3, 4)\n pr = 0.84354139131608286\n tr = -0.2108663315950719\n t, p = stats.ttest_ind(a, b, equal_var=False)\n assert_array_almost_equal([t,p], [tr, pr])\n assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),\n equal_var=False),\n [t, p])\n\n # regression test\n tr = 1.0912746897927283\n tr_uneq_n = 0.66745638708050492\n pr = 0.27647831993021388\n pr_uneq_n = 0.50873585065616544\n tpr = ([tr,-tr],[pr,pr])\n\n rvs3 = np.linspace(1,100, 25)\n rvs2 = np.linspace(1,100,100)\n rvs1 = np.linspace(5,105,100)\n rvs1_2D = np.array([rvs1, rvs2])\n rvs2_2D = np.array([rvs2, rvs1])\n\n t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)\n assert_array_almost_equal([t,p],(tr,pr))\n assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,\n rvs2),\n equal_var=False),\n (t, p))\n\n t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False)\n assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n))\n assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,\n rvs3),\n equal_var=False),\n (t, p))\n\n t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False)\n assert_array_almost_equal([t,p],tpr)\n args = _desc_stats(rvs1_2D.T, rvs2_2D.T)\n assert_array_almost_equal(stats.ttest_ind_from_stats(*args,\n equal_var=False),\n (t, p))\n\n t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False)\n assert_array_almost_equal([t,p],tpr)\n args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)\n assert_array_almost_equal(stats.ttest_ind_from_stats(*args,\n equal_var=False),\n (t, p))\n\n # test for namedtuple attribute results\n attributes = ('statistic', 'pvalue')\n res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)\n check_named_results(res, attributes)\n\n # test on 3 dimensions\n rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])\n rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])\n t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False)\n assert_almost_equal(np.abs(t), np.abs(tr))\n assert_array_almost_equal(np.abs(p), pr)\n assert_equal(t.shape, (2, 3))\n args = _desc_stats(rvs1_3D, rvs2_3D, axis=1)\n t, p = stats.ttest_ind_from_stats(*args, equal_var=False)\n assert_almost_equal(np.abs(t), np.abs(tr))\n assert_array_almost_equal(np.abs(p), pr)\n assert_equal(t.shape, (2, 3))\n\n t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2),\n axis=2, equal_var=False)\n assert_array_almost_equal(np.abs(t), np.abs(tr))\n assert_array_almost_equal(np.abs(p), pr)\n assert_equal(t.shape, (3, 2))\n args = _desc_stats(np.rollaxis(rvs1_3D, 2),\n np.rollaxis(rvs2_3D, 2), axis=2)\n t, p = stats.ttest_ind_from_stats(*args, equal_var=False)\n assert_array_almost_equal(np.abs(t), np.abs(tr))\n assert_array_almost_equal(np.abs(p), pr)\n assert_equal(t.shape, (3, 2))\n\n # test zero division problem\n t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)\n assert_equal((np.abs(t), p), (np.inf, 0))\n with np.errstate(all='ignore'):\n assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False),\n (np.nan, np.nan))\n\n # check that nan in input array result in nan output\n anan = np.array([[1, np.nan], [-1, 1]])\n assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False),\n ([0, np.nan], [1, np.nan]))\n\n\ndef test_ttest_ind_nan_2nd_arg():\n # regression test for gh-6134: nans in the second arg were not handled\n x = [np.nan, 2.0, 3.0, 4.0]\n y = [1.0, 2.0, 1.0, 2.0]\n\n r1 = stats.ttest_ind(x, y, nan_policy='omit')\n r2 = stats.ttest_ind(y, x, nan_policy='omit')\n assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)\n assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)\n\n # NB: arguments are not paired when NaNs are dropped\n r3 = stats.ttest_ind(y, x[1:])\n assert_allclose(r2, r3, atol=1e-15)\n\n # .. and this is consistent with R. R code:\n # x = c(NA, 2.0, 3.0, 4.0)\n # y = c(1.0, 2.0, 1.0, 2.0)\n # t.test(x, y, var.equal=TRUE)\n assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901), atol=1e-15)\n\n\ndef test_gh5686():\n mean1, mean2 = np.array([1, 2]), np.array([3, 4])\n std1, std2 = np.array([5, 3]), np.array([4, 5])\n nobs1, nobs2 = np.array([130, 140]), np.array([100, 150])\n # This will raise a TypeError unless gh-5686 is fixed.\n stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)\n\n\ndef test_ttest_1samp_new():\n n1, n2, n3 = (10,15,20)\n rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3))\n\n # check multidimensional array and correct axis handling\n # deterministic rvn1 and rvn2 would be better as in test_ttest_rel\n t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0)\n t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0)\n t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1)\n assert_array_almost_equal(t1,t2, decimal=14)\n assert_almost_equal(t1[0,0],t3, decimal=14)\n assert_equal(t1.shape, (n2,n3))\n\n t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1)\n t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1)\n t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1)\n assert_array_almost_equal(t1,t2, decimal=14)\n assert_almost_equal(t1[0,0],t3, decimal=14)\n assert_equal(t1.shape, (n1,n3))\n\n t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2)\n t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2)\n t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1)\n assert_array_almost_equal(t1,t2, decimal=14)\n assert_almost_equal(t1[0,0],t3, decimal=14)\n assert_equal(t1.shape, (n1,n2))\n\n # test zero division problem\n t, p = stats.ttest_1samp([0, 0, 0], 1)\n assert_equal((np.abs(t), p), (np.inf, 0))\n\n with np.errstate(all='ignore'):\n assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan))\n\n # check that nan in input array result in nan output\n anan = np.array([[1, np.nan],[-1, 1]])\n assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan]))\n\n\nclass TestDescribe(object):\n def test_describe_scalar(self):\n with suppress_warnings() as sup, np.errstate(invalid=\"ignore\"):\n sup.filter(RuntimeWarning, \"Degrees of freedom <= 0 for slice\")\n n, mm, m, v, sk, kurt = stats.describe(4.)\n assert_equal(n, 1)\n assert_equal(mm, (4.0, 4.0))\n assert_equal(m, 4.0)\n assert_(np.isnan(v))\n assert_array_almost_equal(sk, 0.0, decimal=13)\n assert_array_almost_equal(kurt, -3.0, decimal=13)\n\n def test_describe_numbers(self):\n x = np.vstack((np.ones((3,4)), 2 * np.ones((2,4))))\n nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))\n mc = np.array([1.4, 1.4, 1.4, 1.4])\n vc = np.array([0.3, 0.3, 0.3, 0.3])\n skc = [0.40824829046386357] * 4\n kurtc = [-1.833333333333333] * 4\n n, mm, m, v, sk, kurt = stats.describe(x)\n assert_equal(n, nc)\n assert_equal(mm, mmc)\n assert_equal(m, mc)\n assert_equal(v, vc)\n assert_array_almost_equal(sk, skc, decimal=13)\n assert_array_almost_equal(kurt, kurtc, decimal=13)\n n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)\n assert_equal(n, nc)\n assert_equal(mm, mmc)\n assert_equal(m, mc)\n assert_equal(v, vc)\n assert_array_almost_equal(sk, skc, decimal=13)\n assert_array_almost_equal(kurt, kurtc, decimal=13)\n\n x = np.arange(10.)\n x[9] = np.nan\n\n nc, mmc = (9, (0.0, 8.0))\n mc = 4.0\n vc = 7.5\n skc = 0.0\n kurtc = -1.2300000000000002\n n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit')\n assert_equal(n, nc)\n assert_equal(mm, mmc)\n assert_equal(m, mc)\n assert_equal(v, vc)\n assert_array_almost_equal(sk, skc)\n assert_array_almost_equal(kurt, kurtc, decimal=13)\n\n assert_raises(ValueError, stats.describe, x, nan_policy='raise')\n assert_raises(ValueError, stats.describe, x, nan_policy='foobar')\n\n def test_describe_result_attributes(self):\n actual = stats.describe(np.arange(5))\n attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',\n 'kurtosis')\n check_named_results(actual, attributes)\n\n def test_describe_ddof(self):\n x = np.vstack((np.ones((3, 4)), 2 * np.ones((2, 4))))\n nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))\n mc = np.array([1.4, 1.4, 1.4, 1.4])\n vc = np.array([0.24, 0.24, 0.24, 0.24])\n skc = [0.40824829046386357] * 4\n kurtc = [-1.833333333333333] * 4\n n, mm, m, v, sk, kurt = stats.describe(x, ddof=0)\n assert_equal(n, nc)\n assert_allclose(mm, mmc, rtol=1e-15)\n assert_allclose(m, mc, rtol=1e-15)\n assert_allclose(v, vc, rtol=1e-15)\n assert_array_almost_equal(sk, skc, decimal=13)\n assert_array_almost_equal(kurt, kurtc, decimal=13)\n\n def test_describe_axis_none(self):\n x = np.vstack((np.ones((3, 4)), 2 * np.ones((2, 4))))\n\n # expected values\n e_nobs, e_minmax = (20, (1.0, 2.0))\n e_mean = 1.3999999999999999\n e_var = 0.25263157894736848\n e_skew = 0.4082482904638634\n e_kurt = -1.8333333333333333\n\n # actual values\n a = stats.describe(x, axis=None)\n\n assert_equal(a.nobs, e_nobs)\n assert_almost_equal(a.minmax, e_minmax)\n assert_almost_equal(a.mean, e_mean)\n assert_almost_equal(a.variance, e_var)\n assert_array_almost_equal(a.skewness, e_skew, decimal=13)\n assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13)\n\n def test_describe_empty(self):\n assert_raises(ValueError, stats.describe, [])\n\n\ndef test_normalitytests():\n assert_raises(ValueError, stats.skewtest, 4.)\n assert_raises(ValueError, stats.kurtosistest, 4.)\n assert_raises(ValueError, stats.normaltest, 4.)\n\n # numbers verified with R: dagoTest in package fBasics\n st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734)\n pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019)\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\n attributes = ('statistic', 'pvalue')\n\n assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal))\n check_named_results(stats.normaltest(x), attributes)\n assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew))\n check_named_results(stats.skewtest(x), attributes)\n assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt))\n check_named_results(stats.kurtosistest(x), attributes)\n\n # Test axis=None (equal to axis=0 for 1-D input)\n assert_array_almost_equal(stats.normaltest(x, axis=None),\n (st_normal, pv_normal))\n assert_array_almost_equal(stats.skewtest(x, axis=None),\n (st_skew, pv_skew))\n assert_array_almost_equal(stats.kurtosistest(x, axis=None),\n (st_kurt, pv_kurt))\n\n x = np.arange(10.)\n x[9] = np.nan\n with np.errstate(invalid=\"ignore\"):\n assert_array_equal(stats.skewtest(x), (np.nan, np.nan))\n\n expected = (1.0184643553962129, 0.30845733195153502)\n assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected)\n\n with np.errstate(all='ignore'):\n assert_raises(ValueError, stats.skewtest, x, nan_policy='raise')\n assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar')\n\n x = np.arange(30.)\n x[29] = np.nan\n with np.errstate(all='ignore'):\n assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan))\n\n expected = (-2.2683547379505273, 0.023307594135872967)\n assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'),\n expected)\n\n assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise')\n assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar')\n\n with np.errstate(all='ignore'):\n assert_array_equal(stats.normaltest(x), (np.nan, np.nan))\n\n expected = (6.2260409514287449, 0.04446644248650191)\n assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected)\n\n assert_raises(ValueError, stats.normaltest, x, nan_policy='raise')\n assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar')\n\n # regression test for issue gh-9033: x cleary non-normal but power of\n # negtative denom needs to be handled correctly to reject normality\n counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]\n x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])\n assert_equal(stats.kurtosistest(x)[1] < 0.01, True)\n\n\nclass TestRankSums(object):\n def test_ranksums_result_attributes(self):\n res = stats.ranksums(np.arange(5), np.arange(25))\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n\nclass TestJarqueBera(object):\n def test_jarque_bera_stats(self):\n np.random.seed(987654321)\n x = np.random.normal(0, 1, 100000)\n y = np.random.chisquare(10000, 100000)\n z = np.random.rayleigh(1, 100000)\n\n assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1])\n assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1])\n assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1])\n\n def test_jarque_bera_array_like(self):\n np.random.seed(987654321)\n x = np.random.normal(0, 1, 100000)\n\n JB1, p1 = stats.jarque_bera(list(x))\n JB2, p2 = stats.jarque_bera(tuple(x))\n JB3, p3 = stats.jarque_bera(x.reshape(2, 50000))\n\n assert_(JB1 == JB2 == JB3)\n assert_(p1 == p2 == p3)\n\n def test_jarque_bera_size(self):\n assert_raises(ValueError, stats.jarque_bera, [])\n\n\ndef test_skewtest_too_few_samples():\n # Regression test for ticket #1492.\n # skewtest requires at least 8 samples; 7 should raise a ValueError.\n x = np.arange(7.0)\n assert_raises(ValueError, stats.skewtest, x)\n\n\ndef test_kurtosistest_too_few_samples():\n # Regression test for ticket #1425.\n # kurtosistest requires at least 5 samples; 4 should raise a ValueError.\n x = np.arange(4.0)\n assert_raises(ValueError, stats.kurtosistest, x)\n\n\nclass TestMannWhitneyU(object):\n X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589,\n 20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105,\n 19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953,\n 20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274,\n 20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021,\n 19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892,\n 17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179,\n 20.4970638083542, 19.5567594734914]\n\n Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575,\n 19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655,\n 19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841,\n 18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636,\n 19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356]\n\n significant = 14\n\n def test_mannwhitneyu_one_sided(self):\n u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less')\n u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater')\n u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater')\n u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less')\n\n assert_equal(p1, p2)\n assert_equal(p3, p4)\n assert_(p1 != p3)\n assert_equal(u1, 498)\n assert_equal(u2, 102)\n assert_equal(u3, 498)\n assert_equal(u4, 102)\n assert_approx_equal(p1, 0.999957683256589, significant=self.significant)\n assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant)\n\n def test_mannwhitneyu_two_sided(self):\n u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided')\n u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided')\n\n assert_equal(p1, p2)\n assert_equal(u1, 498)\n assert_equal(u2, 102)\n assert_approx_equal(p1, 9.188326533255e-05,\n significant=self.significant)\n\n def test_mannwhitneyu_default(self):\n # The default value for alternative is None\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning,\n \"Calling `mannwhitneyu` without .*`alternative`\")\n u1, p1 = stats.mannwhitneyu(self.X, self.Y)\n u2, p2 = stats.mannwhitneyu(self.Y, self.X)\n u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative=None)\n\n assert_equal(p1, p2)\n assert_equal(p1, p3)\n assert_equal(u1, 102)\n assert_equal(u2, 102)\n assert_equal(u3, 102)\n assert_approx_equal(p1, 4.5941632666275e-05,\n significant=self.significant)\n\n def test_mannwhitneyu_no_correct_one_sided(self):\n u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,\n alternative='less')\n u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,\n alternative='greater')\n u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,\n alternative='greater')\n u4, p4 = stats.mannwhitneyu(self.Y, self.X, False,\n alternative='less')\n\n assert_equal(p1, p2)\n assert_equal(p3, p4)\n assert_(p1 != p3)\n assert_equal(u1, 498)\n assert_equal(u2, 102)\n assert_equal(u3, 498)\n assert_equal(u4, 102)\n assert_approx_equal(p1, 0.999955905990004, significant=self.significant)\n assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant)\n\n def test_mannwhitneyu_no_correct_two_sided(self):\n u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,\n alternative='two-sided')\n u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,\n alternative='two-sided')\n\n assert_equal(p1, p2)\n assert_equal(u1, 498)\n assert_equal(u2, 102)\n assert_approx_equal(p1, 8.81880199916178e-05,\n significant=self.significant)\n\n def test_mannwhitneyu_no_correct_default(self):\n # The default value for alternative is None\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning,\n \"Calling `mannwhitneyu` without .*`alternative`\")\n u1, p1 = stats.mannwhitneyu(self.X, self.Y, False)\n u2, p2 = stats.mannwhitneyu(self.Y, self.X, False)\n u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,\n alternative=None)\n\n assert_equal(p1, p2)\n assert_equal(p1, p3)\n assert_equal(u1, 102)\n assert_equal(u2, 102)\n assert_equal(u3, 102)\n assert_approx_equal(p1, 4.40940099958089e-05,\n significant=self.significant)\n\n def test_mannwhitneyu_ones(self):\n x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1.])\n\n y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,\n 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,\n 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,\n 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,\n 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,\n 1., 1., 1., 1.])\n\n # p-value verified with matlab and R to 5 significant digits\n assert_array_almost_equal(stats.stats.mannwhitneyu(x, y,\n alternative='less'),\n (16980.5, 2.8214327656317373e-005),\n decimal=12)\n\n def test_mannwhitneyu_result_attributes(self):\n # test for namedtuple attribute results\n attributes = ('statistic', 'pvalue')\n res = stats.mannwhitneyu(self.X, self.Y, alternative=\"less\")\n check_named_results(res, attributes)\n\n\ndef test_pointbiserial():\n # same as mstats test except for the nan\n # Test data: https://web.archive.org/web/20060504220742/https://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output\n x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,\n 0,0,0,0,1]\n y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,\n 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,\n 0.8,0.7,0.6,0.5,0.2,0.2,0.1]\n assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5)\n\n # test for namedtuple attribute results\n attributes = ('correlation', 'pvalue')\n res = stats.pointbiserialr(x, y)\n check_named_results(res, attributes)\n\n\ndef test_obrientransform():\n # A couple tests calculated by hand.\n x1 = np.array([0, 2, 4])\n t1 = stats.obrientransform(x1)\n expected = [7, -2, 7]\n assert_allclose(t1[0], expected)\n\n x2 = np.array([0, 3, 6, 9])\n t2 = stats.obrientransform(x2)\n expected = np.array([30, 0, 0, 30])\n assert_allclose(t2[0], expected)\n\n # Test two arguments.\n a, b = stats.obrientransform(x1, x2)\n assert_equal(a, t1[0])\n assert_equal(b, t2[0])\n\n # Test three arguments.\n a, b, c = stats.obrientransform(x1, x2, x1)\n assert_equal(a, t1[0])\n assert_equal(b, t2[0])\n assert_equal(c, t1[0])\n\n # This is a regression test to check np.var replacement.\n # The author of this test didn't separately verify the numbers.\n x1 = np.arange(5)\n result = np.array(\n [[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667],\n [21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]])\n assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8)\n\n # Example from \"O'Brien Test for Homogeneity of Variance\"\n # by Herve Abdi.\n values = range(5, 11)\n reps = np.array([5, 11, 9, 3, 2, 2])\n data = np.repeat(values, reps)\n transformed_values = np.array([3.1828, 0.5591, 0.0344,\n 1.6086, 5.2817, 11.0538])\n expected = np.repeat(transformed_values, reps)\n result = stats.obrientransform(data)\n assert_array_almost_equal(result[0], expected, decimal=4)\n\n\nclass HarMeanTestCase:\n def test_1dlist(self):\n # Test a 1d list\n a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n b = 34.1417152147\n self.do(a, b)\n\n def test_1darray(self):\n # Test a 1d array\n a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n b = 34.1417152147\n self.do(a, b)\n\n def test_1dma(self):\n # Test a 1d masked array\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n b = 34.1417152147\n self.do(a, b)\n\n def test_1dmavalue(self):\n # Test a 1d masked array with a masked value\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],\n mask=[0,0,0,0,0,0,0,0,0,1])\n b = 31.8137186141\n self.do(a, b)\n\n # Note the next tests use axis=None as default, not axis=0\n def test_2dlist(self):\n # Test a 2d list\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = 38.6696271841\n self.do(a, b)\n\n def test_2darray(self):\n # Test a 2d array\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = 38.6696271841\n self.do(np.array(a), b)\n\n def test_2dma(self):\n # Test a 2d masked array\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = 38.6696271841\n self.do(np.ma.array(a), b)\n\n def test_2daxis0(self):\n # Test a 2d list with axis=0\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545])\n self.do(a, b, axis=0)\n\n def test_2daxis1(self):\n # Test a 2d list with axis=1\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = np.array([19.2, 63.03939962, 103.80078637])\n self.do(a, b, axis=1)\n\n def test_2dmatrixdaxis0(self):\n # Test a 2d list with axis=0\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]])\n self.do(matrix(a), b, axis=0)\n\n def test_2dmatrixaxis1(self):\n # Test a 2d list with axis=1\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = matrix([[19.2, 63.03939962, 103.80078637]]).T\n self.do(matrix(a), b, axis=1)\n\n\nclass TestHarMean(HarMeanTestCase):\n def do(self, a, b, axis=None, dtype=None):\n x = stats.hmean(a, axis=axis, dtype=dtype)\n assert_almost_equal(b, x)\n assert_equal(x.dtype, dtype)\n\n\nclass GeoMeanTestCase:\n def test_1dlist(self):\n # Test a 1d list\n a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n b = 45.2872868812\n self.do(a, b)\n\n def test_1darray(self):\n # Test a 1d array\n a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n b = 45.2872868812\n self.do(a, b)\n\n def test_1dma(self):\n # Test a 1d masked array\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n b = 45.2872868812\n self.do(a, b)\n\n def test_1dmavalue(self):\n # Test a 1d masked array with a masked value\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1])\n b = 41.4716627439\n self.do(a, b)\n\n # Note the next tests use axis=None as default, not axis=0\n def test_2dlist(self):\n # Test a 2d list\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = 52.8885199\n self.do(a, b)\n\n def test_2darray(self):\n # Test a 2d array\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = 52.8885199\n self.do(np.array(a), b)\n\n def test_2dma(self):\n # Test a 2d masked array\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = 52.8885199\n self.do(np.ma.array(a), b)\n\n def test_2daxis0(self):\n # Test a 2d list with axis=0\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371])\n self.do(a, b, axis=0)\n\n def test_2daxis1(self):\n # Test a 2d list with axis=1\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = np.array([22.13363839, 64.02171746, 104.40086817])\n self.do(a, b, axis=1)\n\n def test_2dmatrixdaxis0(self):\n # Test a 2d list with axis=0\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]])\n self.do(matrix(a), b, axis=0)\n\n def test_2dmatrixaxis1(self):\n # Test a 2d list with axis=1\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b = matrix([[22.13363839, 64.02171746, 104.40086817]]).T\n self.do(matrix(a), b, axis=1)\n\n def test_1dlist0(self):\n # Test a 1d list with zero element\n a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0]\n b = 0.0 # due to exp(-inf)=0\n olderr = np.seterr(all='ignore')\n try:\n self.do(a, b)\n finally:\n np.seterr(**olderr)\n\n def test_1darray0(self):\n # Test a 1d array with zero element\n a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])\n b = 0.0 # due to exp(-inf)=0\n olderr = np.seterr(all='ignore')\n try:\n self.do(a, b)\n finally:\n np.seterr(**olderr)\n\n def test_1dma0(self):\n # Test a 1d masked array with zero element\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])\n b = 41.4716627439\n olderr = np.seterr(all='ignore')\n try:\n self.do(a, b)\n finally:\n np.seterr(**olderr)\n\n def test_1dmainf(self):\n # Test a 1d masked array with negative element\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])\n b = 41.4716627439\n olderr = np.seterr(all='ignore')\n try:\n self.do(a, b)\n finally:\n np.seterr(**olderr)\n\n\nclass TestGeoMean(GeoMeanTestCase):\n def do(self, a, b, axis=None, dtype=None):\n # Note this doesn't test when axis is not specified\n x = stats.gmean(a, axis=axis, dtype=dtype)\n assert_almost_equal(b, x)\n assert_equal(x.dtype, dtype)\n\n\ndef test_binomtest():\n # precision tests compared to R for ticket:986\n pp = np.concatenate((np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5),\n np.linspace(0.85,0.95,5)))\n n = 501\n x = 450\n results = [0.0, 0.0, 1.0159969301994141e-304,\n 2.9752418572150531e-275, 7.7668382922535275e-250,\n 2.3381250925167094e-099, 7.8284591587323951e-081,\n 9.9155947819961383e-065, 2.8729390725176308e-050,\n 1.7175066298388421e-037, 0.0021070691951093692,\n 0.12044570587262322, 0.88154763174802508, 0.027120993063129286,\n 2.6102587134694721e-006]\n\n for p, res in zip(pp,results):\n assert_approx_equal(stats.binom_test(x, n, p), res,\n significant=12, err_msg='fail forp=%f' % p)\n\n assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024,\n significant=12, err_msg='fail forp=%f' % p)\n\n\ndef test_binomtest2():\n # test added for issue #2384\n res2 = [\n [1.0, 1.0],\n [0.5,1.0,0.5],\n [0.25,1.00,1.00,0.25],\n [0.125,0.625,1.000,0.625,0.125],\n [0.0625,0.3750,1.0000,1.0000,0.3750,0.0625],\n [0.03125,0.21875,0.68750,1.00000,0.68750,0.21875,0.03125],\n [0.015625,0.125000,0.453125,1.000000,1.000000,0.453125,0.125000,0.015625],\n [0.0078125,0.0703125,0.2890625,0.7265625,1.0000000,0.7265625,0.2890625,\n 0.0703125,0.0078125],\n [0.00390625,0.03906250,0.17968750,0.50781250,1.00000000,1.00000000,\n 0.50781250,0.17968750,0.03906250,0.00390625],\n [0.001953125,0.021484375,0.109375000,0.343750000,0.753906250,1.000000000,\n 0.753906250,0.343750000,0.109375000,0.021484375,0.001953125]\n ]\n\n for k in range(1, 11):\n res1 = [stats.binom_test(v, k, 0.5) for v in range(k + 1)]\n assert_almost_equal(res1, res2[k-1], decimal=10)\n\n\ndef test_binomtest3():\n # test added for issue #2384\n # test when x == n*p and neighbors\n res3 = [stats.binom_test(v, v*k, 1./k) for v in range(1, 11)\n for k in range(2, 11)]\n assert_equal(res3, np.ones(len(res3), int))\n\n #> bt=c()\n #> for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}\n binom_testm1 = np.array([\n 0.5, 0.5555555555555556, 0.578125, 0.5904000000000003,\n 0.5981224279835393, 0.603430543396034, 0.607304096221924,\n 0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115,\n 0.68853759765625, 0.6980101120000006, 0.703906431368616,\n 0.70793209416498, 0.7108561134173507, 0.713076544331419,\n 0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174,\n 0.74986110468096, 0.7548015520398076, 0.7581671424768577,\n 0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625,\n 0.761553963657302, 0.774800934828818, 0.7818005980538996,\n 0.78613491480358, 0.789084353140195, 0.7912217659828884,\n 0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176,\n 0.7976688481430754, 0.8039848974727624, 0.807891868948366,\n 0.8105487660137676, 0.812473307174702, 0.8139318233591120,\n 0.815075399104785, 0.7744140625, 0.8037322594985427,\n 0.814742863657656, 0.8205425178645808, 0.8241275984172285,\n 0.8265645374416, 0.8283292196088257, 0.829666291102775,\n 0.8307144686362666, 0.7905273437499996, 0.8178712053954738,\n 0.828116983756619, 0.833508948940494, 0.8368403871552892,\n 0.839104213210105, 0.840743186196171, 0.84198481438049,\n 0.8429580531563676, 0.803619384765625, 0.829338573944648,\n 0.8389591907548646, 0.84401876783902, 0.84714369697889,\n 0.8492667010581667, 0.850803474598719, 0.851967542858308,\n 0.8528799045949524, 0.8145294189453126, 0.838881732845347,\n 0.847979024541911, 0.852760894015685, 0.8557134656773457,\n 0.8577190131799202, 0.85917058278431, 0.860270010472127,\n 0.861131648404582, 0.823802947998047, 0.846984756807511,\n 0.855635653643743, 0.860180994825685, 0.86298688573253,\n 0.864892525675245, 0.866271647085603, 0.867316125625004,\n 0.8681346531755114\n ])\n\n # > bt=c()\n # > for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}\n\n binom_testp1 = np.array([\n 0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551,\n 0.2635138663069203, 0.2636951804161073, 0.2638162407564354,\n 0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875,\n 0.4295746560000003, 0.43473045988554, 0.4383309503172684,\n 0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875,\n 0.4927602499618962, 0.5096031427383425, 0.5189636628480,\n 0.5249280070771274, 0.5290623300865124, 0.5320974248125793,\n 0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808,\n 0.5669248746708034, 0.576436455045805, 0.5824538812831795,\n 0.5866053321547824, 0.589642781414643, 0.5919618019300193,\n 0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209,\n 0.617303847446822, 0.623172512167948, 0.627208862156123,\n 0.6301556891501057, 0.632401894928977, 0.6341708982290303,\n 0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579,\n 0.65392850011132, 0.657816519817211, 0.660650782947676,\n 0.662808780346311, 0.6645068560246006, 0.7905273437499996,\n 0.6478843304312477, 0.6640468318879372, 0.6727589686071775,\n 0.6782129857784873, 0.681950188903695, 0.684671508668418,\n 0.686741824999918, 0.688369886732168, 0.803619384765625,\n 0.668716055304315, 0.684360013879534, 0.6927642396829181,\n 0.6980155964704895, 0.701609591890657, 0.7042244320992127,\n 0.7062125081341817, 0.707775152962577, 0.8145294189453126,\n 0.686243374488305, 0.7013873696358975, 0.709501223328243,\n 0.714563595144314, 0.718024953392931, 0.7205416252126137,\n 0.722454130389843, 0.723956813292035, 0.823802947998047,\n 0.701255953767043, 0.715928221686075, 0.723772209289768,\n 0.7286603031173616, 0.7319999279787631, 0.7344267920995765,\n 0.736270323773157, 0.737718376096348\n ])\n\n res4_p1 = [stats.binom_test(v+1, v*k, 1./k) for v in range(1, 11)\n for k in range(2, 11)]\n res4_m1 = [stats.binom_test(v-1, v*k, 1./k) for v in range(1, 11)\n for k in range(2, 11)]\n\n assert_almost_equal(res4_p1, binom_testp1, decimal=13)\n assert_almost_equal(res4_m1, binom_testm1, decimal=13)\n\n\nclass TestTrim(object):\n # test trim functions\n def test_trim1(self):\n a = np.arange(11)\n assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10))\n assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9))\n assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')),\n np.arange(2, 11))\n assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')),\n np.arange(3, 11))\n assert_equal(stats.trim1(a, 1.0), [])\n assert_equal(stats.trim1(a, 1.0, tail='left'), [])\n\n # empty input\n assert_equal(stats.trim1([], 0.1), [])\n assert_equal(stats.trim1([], 3/11., tail='left'), [])\n assert_equal(stats.trim1([], 4/6.), [])\n\n def test_trimboth(self):\n a = np.arange(11)\n assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8))\n assert_equal(np.sort(stats.trimboth(a, 0.2)),\n np.array([2, 3, 4, 5, 6, 7, 8]))\n assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)),\n np.arange(4, 20).reshape(4, 4))\n assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T,\n 2/6.)),\n np.array([[2, 8, 14, 20], [3, 9, 15, 21]]))\n assert_raises(ValueError, stats.trimboth,\n np.arange(24).reshape(4, 6).T, 4/6.)\n\n # empty input\n assert_equal(stats.trimboth([], 0.1), [])\n assert_equal(stats.trimboth([], 3/11.), [])\n assert_equal(stats.trimboth([], 4/6.), [])\n\n def test_trim_mean(self):\n # don't use pre-sorted arrays\n a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6])\n idx = np.array([3, 5, 0, 1, 2, 4])\n a2 = np.arange(24).reshape(6, 4)[idx, :]\n a3 = np.arange(24).reshape(6, 4, order='F')[idx, :]\n assert_equal(stats.trim_mean(a3, 2/6.),\n np.array([2.5, 8.5, 14.5, 20.5]))\n assert_equal(stats.trim_mean(a2, 2/6.),\n np.array([10., 11., 12., 13.]))\n idx4 = np.array([1, 0, 3, 2])\n a4 = np.arange(24).reshape(4, 6)[idx4, :]\n assert_equal(stats.trim_mean(a4, 2/6.),\n np.array([9., 10., 11., 12., 13., 14.]))\n # shuffled arange(24) as array_like\n a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23,\n 20, 2, 14, 4, 13, 8, 3]\n assert_equal(stats.trim_mean(a, 2/6.), 11.5)\n assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5)\n\n # check axis argument\n np.random.seed(1234)\n a = np.random.randint(20, size=(5, 6, 4, 7))\n for axis in [0, 1, 2, 3, -1]:\n res1 = stats.trim_mean(a, 2/6., axis=axis)\n res2 = stats.trim_mean(np.rollaxis(a, axis), 2/6.)\n assert_equal(res1, res2)\n\n res1 = stats.trim_mean(a, 2/6., axis=None)\n res2 = stats.trim_mean(a.ravel(), 2/6.)\n assert_equal(res1, res2)\n\n assert_raises(ValueError, stats.trim_mean, a, 0.6)\n\n # empty input\n assert_equal(stats.trim_mean([], 0.0), np.nan)\n assert_equal(stats.trim_mean([], 0.6), np.nan)\n\n\nclass TestSigmaClip(object):\n def test_sigmaclip1(self):\n a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))\n fact = 4 # default\n c, low, upp = stats.sigmaclip(a)\n assert_(c.min() > low)\n assert_(c.max() < upp)\n assert_equal(low, c.mean() - fact*c.std())\n assert_equal(upp, c.mean() + fact*c.std())\n assert_equal(c.size, a.size)\n\n def test_sigmaclip2(self):\n a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))\n fact = 1.5\n c, low, upp = stats.sigmaclip(a, fact, fact)\n assert_(c.min() > low)\n assert_(c.max() < upp)\n assert_equal(low, c.mean() - fact*c.std())\n assert_equal(upp, c.mean() + fact*c.std())\n assert_equal(c.size, 4)\n assert_equal(a.size, 36) # check original array unchanged\n\n def test_sigmaclip3(self):\n a = np.concatenate((np.linspace(9.5, 10.5, 11),\n np.linspace(-100, -50, 3)))\n fact = 1.8\n c, low, upp = stats.sigmaclip(a, fact, fact)\n assert_(c.min() > low)\n assert_(c.max() < upp)\n assert_equal(low, c.mean() - fact*c.std())\n assert_equal(upp, c.mean() + fact*c.std())\n assert_equal(c, np.linspace(9.5, 10.5, 11))\n\n def test_sigmaclip_result_attributes(self):\n a = np.concatenate((np.linspace(9.5, 10.5, 11),\n np.linspace(-100, -50, 3)))\n fact = 1.8\n res = stats.sigmaclip(a, fact, fact)\n attributes = ('clipped', 'lower', 'upper')\n check_named_results(res, attributes)\n\n def test_std_zero(self):\n # regression test #8632\n x = np.ones(10)\n assert_equal(stats.sigmaclip(x)[0], x)\n\n\nclass TestFOneWay(object):\n def test_trivial(self):\n # A trivial test of stats.f_oneway, with F=0.\n F, p = stats.f_oneway([0,2], [0,2])\n assert_equal(F, 0.0)\n\n def test_basic(self):\n # Despite being a floating point calculation, this data should\n # result in F being exactly 2.0.\n F, p = stats.f_oneway([0,2], [2,4])\n assert_equal(F, 2.0)\n\n def test_large_integer_array(self):\n a = np.array([655, 788], dtype=np.uint16)\n b = np.array([789, 772], dtype=np.uint16)\n F, p = stats.f_oneway(a, b)\n assert_almost_equal(F, 0.77450216931805538)\n\n def test_result_attributes(self):\n a = np.array([655, 788], dtype=np.uint16)\n b = np.array([789, 772], dtype=np.uint16)\n res = stats.f_oneway(a, b)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n def test_nist(self):\n # These are the nist ANOVA files. They can be found at:\n # https://www.itl.nist.gov/div898/strd/anova/anova.html\n filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat',\n 'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat',\n 'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat']\n\n for test_case in filenames:\n rtol = 1e-7\n fname = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'data/nist_anova', test_case))\n with open(fname, 'r') as f:\n content = f.read().split('\\n')\n certified = [line.split() for line in content[40:48]\n if line.strip()]\n dataf = np.loadtxt(fname, skiprows=60)\n y, x = dataf.T\n y = y.astype(int)\n caty = np.unique(y)\n f = float(certified[0][-1])\n\n xlist = [x[y == i] for i in caty]\n res = stats.f_oneway(*xlist)\n\n # With the hard test cases we relax the tolerance a bit.\n hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat')\n if test_case in hard_tc:\n rtol = 1e-4\n\n assert_allclose(res[0], f, rtol=rtol,\n err_msg='Failing testcase: %s' % test_case)\n\n\nclass TestKruskal(object):\n def test_simple(self):\n x = [1]\n y = [2]\n h, p = stats.kruskal(x, y)\n assert_equal(h, 1.0)\n assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))\n h, p = stats.kruskal(np.array(x), np.array(y))\n assert_equal(h, 1.0)\n assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))\n\n def test_basic(self):\n x = [1, 3, 5, 7, 9]\n y = [2, 4, 6, 8, 10]\n h, p = stats.kruskal(x, y)\n assert_approx_equal(h, 3./11, significant=10)\n assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))\n h, p = stats.kruskal(np.array(x), np.array(y))\n assert_approx_equal(h, 3./11, significant=10)\n assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))\n\n def test_simple_tie(self):\n x = [1]\n y = [1, 2]\n h_uncorr = 1.5**2 + 2*2.25**2 - 12\n corr = 0.75\n expected = h_uncorr / corr # 0.5\n h, p = stats.kruskal(x, y)\n # Since the expression is simple and the exact answer is 0.5, it\n # should be safe to use assert_equal().\n assert_equal(h, expected)\n\n def test_another_tie(self):\n x = [1, 1, 1, 2]\n y = [2, 2, 2, 2]\n h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9\n corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)\n expected = h_uncorr / corr\n h, p = stats.kruskal(x, y)\n assert_approx_equal(h, expected)\n\n def test_three_groups(self):\n # A test of stats.kruskal with three groups, with ties.\n x = [1, 1, 1]\n y = [2, 2, 2]\n z = [2, 2]\n h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0\n corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)\n expected = h_uncorr / corr # 7.0\n h, p = stats.kruskal(x, y, z)\n assert_approx_equal(h, expected)\n assert_approx_equal(p, stats.distributions.chi2.sf(h, 2))\n\n def test_empty(self):\n # A test of stats.kruskal with three groups, with ties.\n x = [1, 1, 1]\n y = [2, 2, 2]\n z = []\n assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan))\n\n def test_kruskal_result_attributes(self):\n x = [1, 3, 5, 7, 9]\n y = [2, 4, 6, 8, 10]\n res = stats.kruskal(x, y)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n def test_nan_policy(self):\n x = np.arange(10.)\n x[9] = np.nan\n assert_equal(stats.kruskal(x, x), (np.nan, np.nan))\n assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0))\n assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise')\n assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar')\n\n\nclass TestCombinePvalues(object):\n\n def test_fisher(self):\n # Example taken from https://en.wikipedia.org/wiki/Fisher%27s_exact_test#Example\n xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher')\n assert_approx_equal(p, 0.02156, significant=4)\n\n def test_stouffer(self):\n Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer')\n assert_approx_equal(p, 0.01651, significant=4)\n\n def test_stouffer2(self):\n Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer')\n assert_approx_equal(p, 0.5, significant=4)\n\n def test_weighted_stouffer(self):\n Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',\n weights=np.ones(3))\n assert_approx_equal(p, 0.01651, significant=4)\n\n def test_weighted_stouffer2(self):\n Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',\n weights=np.array((1, 4, 9)))\n assert_approx_equal(p, 0.1464, significant=4)\n\n\nclass TestCdfDistanceValidation(object):\n \"\"\"\n Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors\n for bad inputs.\n \"\"\"\n\n def test_distinct_value_and_weight_lengths(self):\n # When the number of weights does not match the number of values,\n # a ValueError should be raised.\n assert_raises(ValueError, stats.wasserstein_distance,\n [1], [2], [4], [3, 1])\n assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0])\n\n def test_zero_weight(self):\n # When a distribution is given zero weight, a ValueError should be\n # raised.\n assert_raises(ValueError, stats.wasserstein_distance,\n [0, 1], [2], [0, 0])\n assert_raises(ValueError, stats.wasserstein_distance,\n [0, 1], [2], [3, 1], [0])\n\n def test_negative_weights(self):\n # A ValueError should be raised if there are any negative weights.\n assert_raises(ValueError, stats.wasserstein_distance,\n [0, 1], [2, 2], [1, 1], [3, -1])\n\n def test_empty_distribution(self):\n # A ValueError should be raised when trying to measure the distance\n # between something and nothing.\n assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2])\n assert_raises(ValueError, stats.wasserstein_distance, [1], [])\n\n def test_inf_weight(self):\n # An inf weight is not valid.\n assert_raises(ValueError, stats.wasserstein_distance,\n [1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1])\n\n\nclass TestWassersteinDistance(object):\n \"\"\" Tests for wasserstein_distance() output values.\n \"\"\"\n\n def test_simple(self):\n # For basic distributions, the value of the Wasserstein distance is\n # straightforward.\n assert_almost_equal(\n stats.wasserstein_distance([0, 1], [0], [1, 1], [1]),\n .5)\n assert_almost_equal(stats.wasserstein_distance(\n [0, 1], [0], [3, 1], [1]),\n .25)\n assert_almost_equal(stats.wasserstein_distance(\n [0, 2], [0], [1, 1], [1]),\n 1)\n assert_almost_equal(stats.wasserstein_distance(\n [0, 1, 2], [1, 2, 3]),\n 1)\n\n def test_same_distribution(self):\n # Any distribution moved to itself should have a Wasserstein distance of\n # zero.\n assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0)\n assert_equal(\n stats.wasserstein_distance([1, 1, 1, 4], [4, 1],\n [1, 1, 1, 1], [1, 3]),\n 0)\n\n def test_shift(self):\n # If the whole distribution is shifted by x, then the Wasserstein\n # distance should be x.\n assert_almost_equal(stats.wasserstein_distance([0], [1]), 1)\n assert_almost_equal(stats.wasserstein_distance([-5], [5]), 10)\n assert_almost_equal(\n stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]),\n 10)\n assert_almost_equal(\n stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2],\n [3, 1, 1], [1, 3, 1]),\n 2.5)\n\n def test_combine_weights(self):\n # Assigning a weight w to a value is equivalent to including that value\n # w times in the value array with weight of 1.\n assert_almost_equal(\n stats.wasserstein_distance(\n [0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],\n [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),\n stats.wasserstein_distance([5, 0, 1], [0, 4, 3],\n [1, 2, 4], [1, 2, 4]))\n\n def test_collapse(self):\n # Collapsing a distribution to a point distribution at zero is\n # equivalent to taking the average of the absolute values of the values.\n u = np.arange(-10, 30, 0.3)\n v = np.zeros_like(u)\n assert_almost_equal(\n stats.wasserstein_distance(u, v),\n np.mean(np.abs(u)))\n\n u_weights = np.arange(len(u))\n v_weights = u_weights[::-1]\n assert_almost_equal(\n stats.wasserstein_distance(u, v, u_weights, v_weights),\n np.average(np.abs(u), weights=u_weights))\n\n def test_zero_weight(self):\n # Values with zero weight have no impact on the Wasserstein distance.\n assert_almost_equal(\n stats.wasserstein_distance([1, 2, 100000], [1, 1],\n [1, 1, 0], [1, 1]),\n stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1]))\n\n def test_inf_values(self):\n # Inf values can lead to an inf distance or trigger a RuntimeWarning\n # (and return NaN) if the distance is undefined.\n assert_equal(\n stats.wasserstein_distance([1, 2, np.inf], [1, 1]),\n np.inf)\n assert_equal(\n stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]),\n np.inf)\n assert_equal(\n stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]),\n np.inf)\n with suppress_warnings() as sup:\n r = sup.record(RuntimeWarning, \"invalid value*\")\n assert_equal(\n stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]),\n np.nan)\n\n\nclass TestEnergyDistance(object):\n \"\"\" Tests for energy_distance() output values.\n \"\"\"\n\n def test_simple(self):\n # For basic distributions, the value of the energy distance is\n # straightforward.\n assert_almost_equal(\n stats.energy_distance([0, 1], [0], [1, 1], [1]),\n np.sqrt(2) * .5)\n assert_almost_equal(stats.energy_distance(\n [0, 1], [0], [3, 1], [1]),\n np.sqrt(2) * .25)\n assert_almost_equal(stats.energy_distance(\n [0, 2], [0], [1, 1], [1]),\n 2 * .5)\n assert_almost_equal(\n stats.energy_distance([0, 1, 2], [1, 2, 3]),\n np.sqrt(2) * (3*(1./3**2))**.5)\n\n def test_same_distribution(self):\n # Any distribution moved to itself should have a energy distance of\n # zero.\n assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0)\n assert_equal(\n stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]),\n 0)\n\n def test_shift(self):\n # If a single-point distribution is shifted by x, then the energy\n # distance should be sqrt(2) * sqrt(x).\n assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2))\n assert_almost_equal(\n stats.energy_distance([-5], [5]),\n np.sqrt(2) * 10**.5)\n\n def test_combine_weights(self):\n # Assigning a weight w to a value is equivalent to including that value\n # w times in the value array with weight of 1.\n assert_almost_equal(\n stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],\n [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),\n stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4]))\n\n def test_zero_weight(self):\n # Values with zero weight have no impact on the energy distance.\n assert_almost_equal(\n stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]),\n stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1]))\n\n def test_inf_values(self):\n # Inf values can lead to an inf distance or trigger a RuntimeWarning\n # (and return NaN) if the distance is undefined.\n assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf)\n assert_equal(\n stats.energy_distance([1, 2, np.inf], [-np.inf, 1]),\n np.inf)\n assert_equal(\n stats.energy_distance([1, -np.inf, np.inf], [1, 1]),\n np.inf)\n with suppress_warnings() as sup:\n r = sup.record(RuntimeWarning, \"invalid value*\")\n assert_equal(\n stats.energy_distance([1, 2, np.inf], [np.inf, 1]),\n np.nan)\n\n\nclass TestBrunnerMunzel(object):\n # Data from (Lumley, 1996)\n X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]\n Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]\n significant = 13\n\n def test_brunnermunzel_one_sided(self):\n # Results are compared with R's lawstat package.\n u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='less')\n u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='greater')\n u3, p3 = stats.brunnermunzel(self.X, self.Y, alternative='greater')\n u4, p4 = stats.brunnermunzel(self.Y, self.X, alternative='less')\n\n assert_approx_equal(p1, p2, significant=self.significant)\n assert_approx_equal(p3, p4, significant=self.significant)\n assert_(p1 != p3)\n assert_approx_equal(u1, 3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(u2, -3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(u3, 3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(u4, -3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(p1, 0.0028931043330757342,\n significant=self.significant)\n assert_approx_equal(p3, 0.99710689566692423,\n significant=self.significant)\n\n def test_brunnermunzel_two_sided(self):\n # Results are compared with R's lawstat package.\n u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='two-sided')\n u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='two-sided')\n\n assert_approx_equal(p1, p2, significant=self.significant)\n assert_approx_equal(u1, 3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(u2, -3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(p1, 0.0057862086661515377,\n significant=self.significant)\n\n def test_brunnermunzel_default(self):\n # The default value for alternative is two-sided\n u1, p1 = stats.brunnermunzel(self.X, self.Y)\n u2, p2 = stats.brunnermunzel(self.Y, self.X)\n\n assert_approx_equal(p1, p2, significant=self.significant)\n assert_approx_equal(u1, 3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(u2, -3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(p1, 0.0057862086661515377,\n significant=self.significant)\n\n def test_brunnermunzel_alternative_error(self):\n alternative = \"error\"\n distribution = \"t\"\n nan_policy = \"propagate\"\n assert_(alternative not in [\"two-sided\", \"greater\", \"less\"])\n assert_raises(ValueError,\n stats.brunnermunzel,\n self.X,\n self.Y,\n alternative,\n distribution,\n nan_policy)\n\n def test_brunnermunzel_distribution_norm(self):\n u1, p1 = stats.brunnermunzel(self.X, self.Y, distribution=\"normal\")\n u2, p2 = stats.brunnermunzel(self.Y, self.X, distribution=\"normal\")\n assert_approx_equal(p1, p2, significant=self.significant)\n assert_approx_equal(u1, 3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(u2, -3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(p1, 0.0017041417600383024,\n significant=self.significant)\n\n def test_brunnermunzel_distribution_error(self):\n alternative = \"two-sided\"\n distribution = \"error\"\n nan_policy = \"propagate\"\n assert_(alternative not in [\"t\", \"normal\"])\n assert_raises(ValueError,\n stats.brunnermunzel,\n self.X,\n self.Y,\n alternative,\n distribution,\n nan_policy)\n\n def test_brunnermunzel_empty_imput(self):\n u1, p1 = stats.brunnermunzel(self.X, [])\n u2, p2 = stats.brunnermunzel([], self.Y)\n u3, p3 = stats.brunnermunzel([], [])\n\n assert_equal(u1, np.nan)\n assert_equal(p1, np.nan)\n assert_equal(u2, np.nan)\n assert_equal(p2, np.nan)\n assert_equal(u3, np.nan)\n assert_equal(p3, np.nan)\n\n def test_brunnermunzel_nan_input_propagate(self):\n X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]\n Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]\n u1, p1 = stats.brunnermunzel(X, Y, nan_policy=\"propagate\")\n u2, p2 = stats.brunnermunzel(Y, X, nan_policy=\"propagate\")\n\n assert_equal(u1, np.nan)\n assert_equal(p1, np.nan)\n assert_equal(u2, np.nan)\n assert_equal(p2, np.nan)\n\n def test_brunnermunzel_nan_input_raise(self):\n X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]\n Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]\n alternative = \"two-sided\"\n distribution = \"t\"\n nan_policy = \"raise\"\n\n assert_raises(ValueError,\n stats.brunnermunzel,\n X,\n Y,\n alternative,\n distribution,\n nan_policy)\n assert_raises(ValueError,\n stats.brunnermunzel,\n Y,\n X,\n alternative,\n distribution,\n nan_policy)\n\n def test_brunnermunzel_nan_input_omit(self):\n X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]\n Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]\n u1, p1 = stats.brunnermunzel(X, Y, nan_policy=\"omit\")\n u2, p2 = stats.brunnermunzel(Y, X, nan_policy=\"omit\")\n\n assert_approx_equal(p1, p2, significant=self.significant)\n assert_approx_equal(u1, 3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(u2, -3.1374674823029505,\n significant=self.significant)\n assert_approx_equal(p1, 0.0057862086661515377,\n significant=self.significant)\n\n\nclass TestRatioUniforms(object):\n \"\"\" Tests for rvs_ratio_uniforms.\n \"\"\"\n def test_rv_generation(self):\n # use KS test to check distribution of rvs\n # normal distribution\n f = stats.norm.pdf\n v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)\n umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound\n rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500,\n random_state=12345)\n assert_equal(stats.kstest(rvs, 'norm')[1] > 0.25, True)\n\n # exponential distribution\n rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,\n vmin=0, vmax=2*np.exp(-1),\n size=1000, random_state=12345)\n assert_equal(stats.kstest(rvs, 'expon')[1] > 0.25, True)\n\n def test_shape(self):\n # test shape of return value depending on size parameter\n f = stats.norm.pdf\n v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)\n umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound\n\n r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=3,\n random_state=1234)\n r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3,),\n random_state=1234)\n r3 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 1),\n random_state=1234)\n assert_equal(r1, r2)\n assert_equal(r2, r3.flatten())\n assert_equal(r1.shape, (3,))\n assert_equal(r3.shape, (3, 1))\n\n r4 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 3, 3),\n random_state=12)\n r5 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=27,\n random_state=12)\n assert_equal(r4.flatten(), r5)\n assert_equal(r4.shape, (3, 3, 3))\n\n r6 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, random_state=1234)\n r7 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=1,\n random_state=1234)\n r8 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(1, ),\n random_state=1234)\n assert_equal(r6, r7)\n assert_equal(r7, r8)\n\n def test_random_state(self):\n f = stats.norm.pdf\n v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)\n umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound\n np.random.seed(1234)\n r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4))\n r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4),\n random_state=1234)\n assert_equal(r1, r2)\n\n def test_exceptions(self):\n f = stats.norm.pdf\n # need vmin < vmax\n assert_raises(ValueError,\n stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=3, vmax=1)\n assert_raises(ValueError,\n stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=1, vmax=1)\n # need umax > 0\n assert_raises(ValueError,\n stats.rvs_ratio_uniforms, pdf=f, umax=-1, vmin=1, vmax=1)\n assert_raises(ValueError,\n stats.rvs_ratio_uniforms, pdf=f, umax=0, vmin=1, vmax=1)\n\n def test_gig(self):\n # test generalized inverse gaussian distribution\n p, b = 0.5, 0.75\n\n def gig_mode(p, b):\n return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)\n\n def gig_pdf(x, p, b):\n c = 1/(2 * kv(p, b))\n return c * x**(p - 1) * np.exp(- b * (x + 1/x) / 2)\n\n def gig_cdf(x, p, b):\n x = np.atleast_1d(x)\n cdf = [quad(gig_pdf, 0, xi, args=(p, b))[0] for xi in x]\n return np.array(cdf)\n\n s = kv(p+2, b) / kv(p, b)\n vmax = np.sqrt(gig_pdf(gig_mode(p + 2, b), p + 2, b) * s)\n umax = np.sqrt(gig_pdf(gig_mode(p, b), p, b))\n\n rvs = stats.rvs_ratio_uniforms(lambda x: gig_pdf(x, p, b), umax,\n 0, vmax, random_state=1234, size=1500)\n\n assert_equal(stats.kstest(rvs, lambda x: gig_cdf(x, p, b))[1] > 0.25,\n True)\n","repo_name":"catboost/catboost","sub_path":"contrib/python/scipy/py2/scipy/stats/tests/test_stats.py","file_name":"test_stats.py","file_ext":"py","file_size_in_byte":188587,"program_lang":"python","lang":"en","doc_type":"code","stars":7463,"dataset":"github-code","pt":"57"} +{"seq_id":"90934249","text":"#tkinter\nfrom tkinter import Tk\nfrom classes.Sensors import Sensors\nfrom classes.Config import Config\nfrom ui_components.Title import Title\nfrom ui_components.AppCanvas import AppCanvas\n\n# main application class\nclass MainApplication(Tk):\n def __init__(self):\n super().__init__()\n\n #local class that has all the callbacks and the sensor manager on it\n self.s = Sensors()\n self.c = Config()\n\n #simple test function\n #s.test_sensor_manager()\n\n self.width = Tk.winfo_screenwidth(self)\n self.height = Tk.winfo_screenheight(self)\n self.geometry(f\"{self.width}x{self.height}\")\n self.title(\"powered by Right Step\")\n\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(1, weight=1)\n self.canvas = AppCanvas(self, self.s, self.c)\n self.title_label = Title(self, text=\"Right Step Desktop\")\n\nif __name__ == \"__main__\":\n app = MainApplication()\n app.mainloop()\n","repo_name":"mcrooks83/rs-desktop","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26688048188","text":"class Home:\n class home:\n card_names = {\"css\": \"a.content-card__name\"}\n phone_home = {\"css\": \"a.content-card__phone-content\"}\n home_address = {\"css\": \"p.content-card__address\"}\n tools_link_at_home = {\"css\": \"li.list-choice__item-tool a\"}\n\n class link:\n link_master = {\"css\": \"a.city-master-link\"}\n link_telephone = {\"css\": \"card-button-content\"}\n\n class map:\n map_home = {\"css\": \"div.main-maps\"}\n img_marker = {\"css\": \"image-geo-marker\"}\n marker_icon_at_map = {\"css\": \"img.leaflet-marker-icon\"}\n","repo_name":"lebedevadare/test_lock","sub_path":"locators/Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"4564845412","text":"\"\"\"Apcheduler imports\"\"\"\nimport logging\n\nfrom django.conf import settings\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom django.core.management.base import BaseCommand\nfrom django_apscheduler.jobstores import DjangoJobStore\nfrom django_apscheduler.models import DjangoJobExecution\n\n\n\"\"\"My imports\"\"\"\nfrom django import db\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nimport datetime\n\nfrom news.models import Post, Category\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef my_job():\n today_date = datetime.datetime.now()\n week_ago_date = today_date - datetime.timedelta(weeks=1)\n\n categories = Category.objects.all()\n\n for category in categories:\n subscribers_list = category.subscriber.all()\n post_list = Post.objects.filter(category=category, time__range=(week_ago_date, today_date))\n\n for subscriber in subscribers_list:\n html_content = render_to_string(\n 'news/send_notify_weekly.html',\n {\n 'post_list': post_list,\n 'category': category,\n 'username':subscriber.username\n }\n )\n\n email_text = EmailMultiAlternatives(\n subject='Еженедельная рассылка новостей',\n body='123',\n from_email='nedgalkin@gmail.com',\n to=[subscriber.email],\n )\n email_text.attach_alternative(html_content, \"text/html\")\n email_text.send()\n\ndef delete_old_job_executions(max_age=604_800):\n \"\"\"This job deletes all apscheduler job executions older than `max_age` from the database.\"\"\"\n DjangoJobExecution.objects.delete_old_job_executions(max_age)\n\n\nclass Command(BaseCommand):\n help = \"Runs apscheduler.\"\n\n def handle(self, *args, **options):\n scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)\n scheduler.add_jobstore(DjangoJobStore(), \"default\")\n\n scheduler.add_job(\n my_job,\n trigger=CronTrigger(day_of_week=\"*/5\"), # Every saturday\n id=\"my_job\", # The `id` assigned to each job MUST be unique\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\"Added job 'my_job'.\")\n\n scheduler.add_job(\n delete_old_job_executions,\n trigger=CronTrigger(\n day_of_week=\"mon\", hour=\"00\", minute=\"00\"\n ), # Midnight on Monday, before start of the next work week.\n id=\"delete_old_job_executions\",\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\n \"Added weekly job: 'delete_old_job_executions'.\"\n )\n\n try:\n logger.info(\"Starting scheduler...\")\n scheduler.start()\n except KeyboardInterrupt:\n logger.info(\"Stopping scheduler...\")\n scheduler.shutdown()\n logger.info(\"Scheduler shut down successfully!\")\n","repo_name":"Mistyhops/task_D6","sub_path":"NewsPaper/news/management/commands/runapscheduler.py","file_name":"runapscheduler.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12707588325","text":"from options import MainParser as Parser\nfrom src.trainers import get_trainer\nimport os\nimport joblib\nimport json\nimport torch\nfrom src.models import get_model\n\n\ndef main():\n args = Parser().parse_args()\n exp = args.exp\n final = exp[:5] == 'final'\n args.exp = '_'.join(filter(bool, [args.cond, exp]))\n trainer = get_trainer(args, final)\n if args.gen:\n trainer.generate_samples(args.gen)\n if not args.eval:\n while args.epochs > trainer.epoch:\n trainer.train(args.checkpoint, val_after_train=not final)\n trainer.save()\n if not final:\n trainer.test('val')\n trainer.test('test' if final else 'val', save_outputs=args.ind)\n\n model_path = os.path.join(args.data_dir, 'models')\n if args.cond == 'svc':\n svc_model = joblib.load(os.path.join(model_path, '_'.join(filter(bool, ['svc', exp])) + '.joblib'))\n\n def model(x):\n return svc_model.predict_proba(x)[:, 1]\n else:\n mlp_model = get_model('MLP', args.res, args.z_dim)\n pretrain = 'pretrain' if args.cond == 'ae' else ''\n model_path_dir = os.path.join(args.data_dir, 'models', '_'.join(filter(bool, ['mlp', pretrain, exp])))\n final_epoch = json.load(open(os.path.join(model_path_dir, 'settings.json')))['training_epochs']\n model_path = os.path.join(model_path_dir, f'model_epoch{final_epoch}.pt')\n state = torch.load(model_path, map_location=torch.device(trainer.device))\n mlp_model.load_state_dict(state, strict=False)\n print('Pretrained weights in ' + model_path + ' loaded.')\n mlp_model = mlp_model.to(trainer.device)\n\n @torch.inference_mode()\n def model(x):\n if not torch.is_tensor(x):\n x = torch.from_numpy(x)\n mlp_model.eval()\n return torch.sigmoid(mlp_model(x.to(trainer.device))['logits']).cpu().squeeze().numpy()\n\n for i in args.ind:\n trainer.viz_sample(i)\n trainer.viz_counterfactual(i, model)\n if args.eval_counterfactuals:\n trainer.evaluate_counterfactuals(model=model, part='test' if final else 'val', save_fig=args.save_fig)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nverchev/FC_counterfactuals","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15129987062","text":"try:\n from .base import *\n from .lgca_square import LGCA_Square\n\nexcept ModuleNotFoundError:\n from base import *\n from lgca_square import LGCA_Square\n\n\nclass LGCA_Hex(LGCA_Square):\n \"\"\"\n 2d lattice-gas cellular automaton on a hexagonal lattice.\n \"\"\"\n velocitychannels = 6\n cix = np.cos(np.arange(velocitychannels) * pi2 / velocitychannels)\n ciy = np.sin(np.arange(velocitychannels) * pi2 / velocitychannels)\n c = np.array([cix, ciy])\n r_poly = 0.5 / np.cos(np.pi / velocitychannels)\n dy = np.sin(2 * np.pi / velocitychannels)\n orientation = 0.\n\n def init_coords(self):\n if self.ly % 2 != 0:\n print('Warning: uneven number of rows; only use for plotting - boundary conditions do not work!')\n self.x = np.arange(self.lx) + self.r_int\n self.y = np.arange(self.ly) + self.r_int\n self.xx, self.yy = np.meshgrid(self.x, self.y, indexing='ij')\n self.coord_pairs = list(zip(self.xx.flat, self.yy.flat))\n\n self.xcoords, self.ycoords = np.meshgrid(np.arange(self.lx + 2 * self.r_int) - self.r_int,\n np.arange(self.ly + 2 * self.r_int) - self.r_int, indexing='ij')\n self.xcoords = self.xcoords.astype(float)\n self.ycoords = self.ycoords.astype(float)\n\n self.xcoords[:, 1::2] += 0.5\n self.ycoords *= self.dy\n self.xcoords = self.xcoords[self.r_int:-self.r_int, self.r_int:-self.r_int]\n self.ycoords = self.ycoords[self.r_int:-self.r_int, self.r_int:-self.r_int]\n self.nonborder = (self.xx, self.yy)\n\n def propagation(self):\n newcellnodes = np.zeros(self.nodes.shape, dtype=self.nodes.dtype)\n newcellnodes[..., 6:] = self.nodes[..., 6:]\n\n # prop in 0-direction\n newcellnodes[1:, :, 0] = self.nodes[:-1, :, 0]\n\n # prop in 1-direction\n newcellnodes[:, 1::2, 1] = self.nodes[:, :-1:2, 1]\n newcellnodes[1:, 2::2, 1] = self.nodes[:-1, 1:-1:2, 1]\n\n # prop in 2-direction\n newcellnodes[:-1, 1::2, 2] = self.nodes[1:, :-1:2, 2]\n newcellnodes[:, 2::2, 2] = self.nodes[:, 1:-1:2, 2]\n\n # prop in 3-direction\n newcellnodes[:-1, :, 3] = self.nodes[1:, :, 3]\n\n # prop in 4-direction\n newcellnodes[:, :-1:2, 4] = self.nodes[:, 1::2, 4]\n newcellnodes[:-1, 1:-1:2, 4] = self.nodes[1:, 2::2, 4]\n\n # prop in 5-direction\n newcellnodes[1:, :-1:2, 5] = self.nodes[:-1, 1::2, 5]\n newcellnodes[:, 1:-1:2, 5] = self.nodes[:, 2::2, 5]\n\n self.nodes = newcellnodes\n return self.nodes\n\n def apply_rbcx(self):\n # left boundary\n self.nodes[self.r_int, :, 0] += self.nodes[self.r_int - 1, :, 3]\n self.nodes[self.r_int, 2:-1:2, 1] += self.nodes[self.r_int - 1, 1:-2:2, 4]\n self.nodes[self.r_int, 2:-1:2, 5] += self.nodes[self.r_int - 1, 3::2, 2]\n\n # right boundary\n self.nodes[-self.r_int - 1, :, 3] += self.nodes[-self.r_int, :, 0]\n self.nodes[-self.r_int - 1, 1:-1:2, 4] += self.nodes[-self.r_int, 2::2, 1]\n self.nodes[-self.r_int - 1, 1:-1:2, 2] += self.nodes[-self.r_int, :-2:2, 5]\n\n self.apply_abcx()\n\n def apply_rbcy(self):\n lx, ly, _ = self.nodes.shape\n\n # lower boundary\n self.nodes[(1 - (self.r_int % 2)):, self.r_int, 1] += self.nodes[:lx - (1 - (self.r_int % 2)), self.r_int - 1,\n 4]\n self.nodes[:lx - (self.r_int % 2), self.r_int, 2] += self.nodes[(self.r_int % 2):, self.r_int - 1, 5]\n\n # upper boundary\n self.nodes[:lx - ((ly - 1 - self.r_int) % 2), -self.r_int - 1, 4] += self.nodes[((ly - 1 - self.r_int) % 2):,\n -self.r_int, 1]\n self.nodes[(1 - ((ly - 1 - self.r_int) % 2)):, -self.r_int - 1, 5] += self.nodes[\n :lx - (1 - ((ly - 1 - self.r_int) % 2)),\n -self.r_int, 2]\n self.apply_abcy()\n\n def gradient(self, qty):\n gx = np.zeros_like(qty, dtype=float)\n gy = np.zeros_like(qty, dtype=float)\n\n # x-component\n gx[:-1, ...] += self.cix[0] * qty[1:, ...]\n\n gx[1:, ...] += self.cix[3] * qty[:-1, ...]\n\n gx[:, :-1:2, ...] += self.cix[1] * qty[:, 1::2, ...]\n gx[:-1, 1:-1:2, ...] += self.cix[1] * qty[1:, 2::2, ...]\n\n gx[1:, :-1:2, ...] += self.cix[2] * qty[:-1, 1::2, ...]\n gx[:, 1:-1:2, ...] += self.cix[2] * qty[:, 2::2, ...]\n\n gx[:, 1::2, ...] += self.cix[4] * qty[:, :-1:2, ...]\n gx[1:, 2::2, ...] += self.cix[4] * qty[:-1, 1:-1:2, ...]\n\n gx[:-1, 1::2, ...] += self.cix[5] * qty[1:, :-1:2, ...]\n gx[:, 2::2, ...] += self.cix[5] * qty[:, 1:-1:2, ...]\n\n # y-component\n gy[:, :-1:2, ...] += self.ciy[1] * qty[:, 1::2, ...]\n gy[:-1, 1:-1:2, ...] += self.ciy[1] * qty[1:, 2::2, ...]\n\n gy[1:, :-1:2, ...] += self.ciy[2] * qty[:-1, 1::2, ...]\n gy[:, 1:-1:2, ...] += self.ciy[2] * qty[:, 2::2, ...]\n\n gy[:, 1::2, ...] += self.ciy[4] * qty[:, :-1:2, ...]\n gy[1:, 2::2, ...] += self.ciy[4] * qty[:-1, 1:-1:2, ...]\n\n gy[:-1, 1::2, ...] += self.ciy[5] * qty[1:, :-1:2, ...]\n gy[:, 2::2, ...] += self.ciy[5] * qty[:, 1:-1:2, ...]\n\n g = np.moveaxis(np.array([gx, gy]), 0, -1)\n return g\n\n def channel_weight(self, qty):\n weights = np.zeros(qty.shape + (self.velocitychannels,))\n weights[:-1, :, 0] = qty[1:, ...]\n weights[1:, :, 3] = qty[:-1, ...]\n\n weights[:, :-1:2, 1] = qty[:, 1::2, ...]\n weights[:-1, 1:-1:2, 1] = qty[1:, 2::2, ...]\n\n weights[1:, :-1:2, 2] = qty[:-1, 1::2, ...]\n weights[:, 1:-1:2, 2] = qty[:, 2::2, ...]\n\n weights[:, 1::2, 4] = qty[:, :-1:2, ...]\n weights[1:, 2::2, 4] = qty[:-1, 1:-1:2, ...]\n\n weights[:-1, 1::2, 5] = qty[1:, :-1:2, ...]\n weights[:, 2::2, 5] = qty[:, 1:-1:2, ...]\n\n return weights\n\n def nb_sum(self, qty):\n sum = np.zeros(qty.shape)\n sum[:-1, ...] += qty[1:, ...]\n sum[1:, ...] += qty[:-1, ...]\n sum[:, 1::2, ...] += qty[:, :-1:2, ...]\n sum[1:, 2::2, ...] += qty[:-1, 1:-1:2, ...]\n sum[:-1, 1::2, ...] += qty[1:, :-1:2, ...]\n sum[:, 2::2, ...] += qty[:, 1:-1:2, ...]\n sum[:, :-1:2, ...] += qty[:, 1::2, ...]\n sum[:-1, 1:-1:2, ...] += qty[1:, 2::2, ...]\n sum[1:, :-1:2, ...] += qty[:-1, 1::2, ...]\n sum[:, 1:-1:2, ...] += qty[:, 2::2, ...]\n return sum\n\n\nclass IBLGCA_Hex(IBLGCA_base, LGCA_Hex):\n \"\"\"\n Identity-based LGCA simulator class.\n \"\"\"\n\n def init_nodes(self, density=0.1, nodes=None):\n self.nodes = np.zeros((self.lx + 2 * self.r_int, self.ly + 2 * self.r_int, self.K), dtype=np.uint)\n if nodes is None:\n self.random_reset(density)\n\n else:\n self.nodes[self.nonborder] = nodes.astype(np.uint)\n self.maxlabel = self.nodes.max()\n\n\nif __name__ == '__main__':\n #lx = 5\n #ly = lx\n #restchannels = 6\n #nodes = np.zeros((lx, ly, 6 + restchannels))\n #nodes[lx // 2, ly // 2, -1] = 1\n # nodes[...] = 1\n # nodes[:lx // 2, :, -2:] = 1\n # nodes[..., -1] = 1\n # nodes[:, ly//2:, 6:] = 1\n # nodes[0, :, :4] = 1\n # lgca = LGCA_Hex(restchannels=restchannels, dims=(lx, ly), density=0.5 / (6 + restchannels), bc='pbc',\n # interaction='wetting', beta=20., gamma=10)\n # lgca.ecm = np.zeros_like(lgca.cell_density, dtype=bool)\n # lgca = IBLGCA_Hex(nodes=nodes, interaction='go_and_grow')\n # lgca.set_interaction('contact_guidance', beta=2)\n # cProfile.run('lgca.timeevo(timesteps=1000)')\n # lgca.timeevo(timesteps=50, record=True)\n # ani = lgca.animate_flow(interval=500)\n # ani = lgca.animate_flux(interval=50)\n # ani = lgca.animate_density(interval=50)\n # ani = lgca.animate_density(density_t=refr, interval=50, vmax=lgca.restchannels)\n # ani2 = lgca.animate_density(density_t=exc, interval=50, vmax=lgca.velocitychannels)\n # ani = lgca.animate_config(interval=10, grid=False)\n\n # ani = lgca.live_animate_density(interval=100, vmax=lgca.restchannels, channels=range(6, lgca.K))\n # ani2 = lgca.live_animate_density(interval=100, vmax=lgca.velocitychannels, channels=range(6))\n # ani = lgca.live_animate_flow()\n # ani = lgca.live_animate_density()\n #ani = lgca.live_animate_config()\n # plt.streamplot(lgca.xcoords[:, 0], lgca.ycoords[-1], lgca.g[1:-1, 1:-1, 0].T, lgca.g[1:-1, 1:-1, 1].T, density=.5,\n # arrowstyle='->', color='orange', linewidth=2.)\n # ani = lgca.live_animate_density()\n # lgca.plot_config()\n # lgca.plot_density(edgecolor='k')\n # plt.show()\n from lgca import get_lgca\n import numpy as np\n from matplotlib import pyplot as plt\n\n l = 50\n l_spheroid = 2\n dims = (l, l)\n tmax = 100\n restc = 3\n rho_0 = 3\n nodes = np.zeros((l, l, restc + 6), dtype=bool)\n nodes[..., :l_spheroid, -rho_0:] = 1\n lgca = get_lgca(geometry='hex', interaction='wetting', beta=10., gamma=5., bc='rbc', density=0, restchannels=restc,\n nodes=nodes, rho_0=rho_0)\n lgca.r_b = .05\n lgca.spheroid = np.zeros_like(lgca.cell_density, dtype=bool)\n lgca.spheroid[lgca.r_int:-lgca.r_int, :lgca.r_int + l_spheroid] = 1\n lgca.ecm = 1 - lgca.spheroid.astype(float)\n lgca.ecm *= 0.\n # lgca.timestep()\n lgca.timeevo(12, record=True)\n # lgca.plot_flow(cbar=False)\n # ani = lgca.live_animate_flow(cbar=False)\n plt.show()\n","repo_name":"sisyga/jamminglgca","sub_path":"lgca/lgca_hex.py","file_name":"lgca_hex.py","file_ext":"py","file_size_in_byte":9655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12847072964","text":"# coding: utf-8\n\"\"\"Utility functions.\"\"\"\nimport json\nimport os\nimport shutil\nfrom pathlib import Path\nfrom typing import (\n Dict,\n List,\n Union,\n)\n\nfrom pkg_resources._vendor.packaging.specifiers import SpecifierSet\n\nfrom .models import (\n VendoredLibrary,\n VendoredList,\n)\nfrom .parse import parse_requirements\n\n\ndef get_py_executable() -> str:\n if os.name == 'nt':\n return 'py'\n\n py_path: Path = Path(__file__).parent / 'tools' / 'py'\n\n # Use Unix `py` tool\n # Make sure it is executable\n import stat\n cur_mode = py_path.stat().st_mode\n if not (cur_mode & stat.S_IEXEC):\n answer = input(f'Needs to execute: {py_path}, OK? [y/N] ')\n if not answer or answer.lower() != 'y':\n raise Exception('Unable to execute Unix `py` tool, requires user consent.')\n py_path.chmod(cur_mode | stat.S_IEXEC)\n\n return str(py_path)\n\n\ndef load_requirements(listpath: Path, ignore_errors: bool = False) -> VendoredList:\n \"\"\"Get requirements from list.\"\"\"\n requirements = VendoredList()\n\n for req, error in parse_requirements(listpath):\n if error and not ignore_errors:\n raise error\n requirements.add(req)\n\n return requirements\n\n\ndef package_module_paths(req: VendoredLibrary, root: Path) -> List[Path]:\n \"\"\"Make a list of all of the full module paths for deletion.\"\"\"\n package_modules: List[Path] = []\n\n for folder in req.folder:\n target_path: Path = root / folder\n for module in req.modules:\n module_path: Path = (target_path / module).resolve()\n # Make sure we're not removing anything outside the target folder!\n if target_path not in module_path.parents:\n continue\n # raise Exception(\n # 'Stopping before removal of files outside target folder!'\n # f' - {module_path} is not within {target_path}'\n # )\n\n package_modules.append(module_path)\n\n # Remove dangling namespace folders\n if '/' in module:\n ns_path = target_path / module.split('/', 1)[0]\n check_paths = [ns_path / '__init__.py'] + package_modules\n remaining_files = any(p not in check_paths for p in ns_path.glob('*'))\n if not remaining_files:\n for mod in package_modules:\n if ns_path in mod.parents:\n package_modules.remove(mod)\n package_modules.append(ns_path)\n\n return package_modules\n\n\ndef get_renovate_config(project_path: Path) -> Dict[str, SpecifierSet]:\n if not SpecifierSet:\n return {}\n\n renovate_json = project_path.joinpath('renovate.json')\n if not renovate_json.is_file():\n return {}\n\n with renovate_json.open('r', encoding='utf-8') as fh:\n data = json.load(fh)\n\n try:\n python_config: Dict[str, dict] = data['python']\n python_pkg_rules: List[Dict[str, Union[List[str], str]]] = python_config['packageRules']\n except KeyError:\n return {}\n\n constraints: Dict[str, str] = {}\n for rule in python_pkg_rules:\n try:\n names: List[str] = rule['packageNames']\n allowed_versions: str = rule['allowedVersions']\n except KeyError:\n continue\n\n constraints.update({\n name.lower(): SpecifierSet(allowed_versions)\n for name in names\n })\n\n return constraints\n\n\ndef drop_dir(path: Path, ignore_errors=False, onerror=None):\n \"\"\"Recursively delete the directory tree at `path`.\"\"\"\n shutil.rmtree(str(path), ignore_errors=ignore_errors, onerror=onerror)\n\n\ndef remove_all(paths: List[Path]):\n \"\"\"Recursively delete every file and directory tree of `paths`.\"\"\"\n for path in paths:\n if path.is_dir():\n drop_dir(path)\n else:\n path.unlink()\n","repo_name":"sharkykh/medusa_vendor_tools","sub_path":"mvt/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"1388896784","text":"from sqlalchemy.orm import Session\n\nfrom crudal.base_sync import DeclarativeCrudBase\n\n\ndef test_add(sync_model, session: Session):\n p = sync_model(name=\"Andrew\")\n p.add(session, commit=True)\n\n assert p in p.all(session)\n assert p in p.find(session, name=\"Andrew\")\n assert p not in p.find(session, name=\"Not Andrew\")\n\n\ndef test_add_many(sync_model, session: Session):\n p1 = sync_model(name=\"Andrew\")\n p2 = sync_model(name=\"John\")\n sync_model.add_many(session, items=[p1, p2], commit=True)\n\n assert p1 in sync_model.all(session)\n assert p2 in sync_model.all(session)\n\n\ndef test_delete(sync_model: DeclarativeCrudBase, session: Session):\n p = sync_model(name=\"Andrew\")\n p.add(session, commit=True)\n assert p in sync_model.all(session)\n\n sync_model.delete(session, name=\"Andrew\")\n\n assert p not in sync_model.all(session)\n","repo_name":"andrewsapw/crudal","sub_path":"tests/sync_tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"21132488094","text":"from flask import Blueprint\nfrom flask import render_template\nfrom flask import redirect\nfrom flask import request\nfrom flask import url_for\nfrom flask import session\nfrom functools import wraps\nfrom model import User\nfrom model import Weibo\nfrom model import Comments\n\n\nuser = Blueprint('user', __name__)\n\n\ndef current_user():\n user_id = session.get('user_id', '')\n u = User.query.filter_by(id=user_id).first()\n return u\n\n\ndef login_require(f):\n @wraps(f)\n def functions(*args, **kwargs):\n if current_user() is None:\n return redirect(url_for('index'))\n else:\n return f(*args, **kwargs)\n return functions\n\n\n@user.route('/index', methods=['GET'])\ndef index():\n if request.cookies:\n username = request.args.get('username')\n weibos = Weibo.query.limit(20).all()\n return render_template('login/weibo.html', weibos=weibos, username=username)\n else:\n return redirect(url_for('index'))\n\n\n@user.route('/register', methods=['POST'])\ndef register():\n form = request.form\n user = User(form)\n user.add()\n return redirect(url_for('index'))\n\n\n@user.route('/login', methods=['POST'])\ndef login():\n if request.form.get('username'):\n username = request.form.get('username')\n password_register = request.form.get('password')\n user = User.query.filter_by(username=username).first()\n if password_register == user.password:\n session['user_id'] = user.id\n return redirect(url_for('user.index', username=username))\n\n\n\n@user.route('/weibo/logout', methods=['GET'])\ndef logout():\n session.pop('user_id', None)\n return redirect(url_for('index'))\n\n@user.route('/weibo', methods=['POST'])\n@login_require\ndef weibo():\n if request.form.get('weibo_contents'):\n form = request.form\n weibo = Weibo(form)\n weibo.add()\n username = request.args.get('username')\n else:\n username = request.args.get('username')\n flash('Please input weibo contents')\n return redirect(url_for('user.index', username=username))\n\n\n@user.route('/comments', methods=['GET'])\n@login_require\ndef comments():\n weibo_id = request.args.get('weibo_id')\n username = request.args.get('username')\n return render_template('login/comments.html', weibo_id=int(weibo_id), username=username)\n\n\n@user.route('/comments/add', methods=['POST'])\ndef comments_add():\n if request.cookies:\n if request.form.get('comments_contents'):\n form = request.form\n comment = Comments(form)\n weibo_id = request.args.get('weibo_id')\n username = request.args.get('username')\n comment.add(int(weibo_id))\n else:\n username = request.args.get('username')\n return redirect(url_for('user.index', username=username))\n else:\n return redirect(url_for('index'))\n\n\n","repo_name":"ChengChiongWah/python_study","sub_path":"course17/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26719209278","text":"\nfrom selenium.webdriver.common.keys import Keys \nfrom .base import FunctionalTest\n\n\n\nclass Ckwotes(FunctionalTest):\n\n\tdef test_ckwote_render(self):\n\t\tself.browser.get('http://localhost:8000')\n\t\t# # She notices a quotation at the top of the front page\n\t\tckwote = self.browser.find_element_by_tag_name('em').text \n\t\tself.assertTrue(ckwote.startswith('\"'))\n\t\tself.assertIn('\"', ckwote)\n\t\tself.assertTrue(len(ckwote) > 10)\n\n\n#Following conditional not needed when using Django test runner\nif __name__ == '__main__':\n\tunittest.main()\n\n","repo_name":"danchay/h2100","sub_path":"functional_tests/test_ckwotes.py","file_name":"test_ckwotes.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"36047066657","text":"import os\nimport datetime\nimport json\n\nimport boto3\nfrom aws_lambda_powertools import Logger, Tracer, Metrics, single_metric\nfrom aws_lambda_powertools.metrics import MetricUnit\n\nfrom inspector.services import budgets, vpc, ec2, iam, sns, codestar_connections\n\nfrom inspector.utils.flatten import flatten\n\nMANAGEMENT_ACCOUNT_ID = \"008356366354\"\n\nSERVICE_MAP = {\n \"VPC\": vpc,\n \"EC2\": ec2,\n \"BUDGETS\": budgets,\n \"IAM\": iam,\n \"SNS\": sns,\n \"CODESTAR_CONNECTIONS\": codestar_connections\n}\n\nlogger = Logger()\ntracer = Tracer()\nmetrics = Metrics()\n\ndef assume_role(role_arn):\n sts_client = boto3.client(\"sts\")\n assumed_role_object = sts_client.assume_role(\n RoleArn=role_arn,\n RoleSessionName=\"horatio-inspector\"\n )\n return assumed_role_object[\"Credentials\"]\n\n@tracer.capture_lambda_handler\n@logger.inject_lambda_context(log_event=True)\ndef handler(event, _context):\n sqs = boto3.resource(\"sqs\")\n queue = sqs.Queue(os.environ[\"QUEUE_URL\"])\n service_name = event[\"SERVICE\"]\n\n service = SERVICE_MAP[service_name]\n\n list_accounts_credentials = assume_role(\"arn:aws:iam::008356366354:role/horatio-list-accounts-role\")\n\n organisations_client = boto3.client(\n \"organizations\",\n aws_access_key_id=list_accounts_credentials[\"AccessKeyId\"],\n aws_secret_access_key=list_accounts_credentials[\"SecretAccessKey\"],\n aws_session_token=list_accounts_credentials[\"SessionToken\"]\n )\n organisation_accounts_paginator = organisations_client.get_paginator(\"list_accounts\")\n\n organisation_accounts_results = [\n result[\"Accounts\"]\n for result in organisation_accounts_paginator.paginate()\n ]\n\n organisation_accounts = flatten(organisation_accounts_results)\n\n for account in organisation_accounts:\n account_id = str(account[\"Id\"])\n account_name = account[\"Name\"]\n account_status = account[\"Status\"]\n\n if account_status == \"SUSPENDED\":\n logger.info(f\"ignoring account {account_name} as it is suspended\")\n continue\n\n logger.info(f\"processing account {account_name} ({account_id})\")\n\n if account_id == MANAGEMENT_ACCOUNT_ID:\n logger.info(f\"{account_name} is management account, skipping\")\n continue\n\n target_account_credentials = assume_role(f\"arn:aws:iam::{account_id}:role/horatio-inspection-target-account-role\")\n\n for region in event[\"REGIONS\"]:\n results = service.inspect(target_account_credentials, region)\n\n for result in results:\n today = datetime.datetime.today()\n\n report = result[\"report\"]\n report[\"account_id\"] = account_id\n\n rule_name = result[\"rule_name\"]\n\n logger.info(f\"sending message to queue {account_id}|{rule_name}\")\n\n queue.send_message(\n MessageBody=json.dumps({\n \"account_id\": account_id,\n \"rule_name\": rule_name,\n \"inspection_date\": f\"{today.year}-{today.month}-{today.day}\",\n \"report\": report\n })\n )\n\n with single_metric(name=\"Finding\", unit=MetricUnit.Count, value=1) as metric:\n metric.add_dimension(name=\"ServiceName\", value=service_name)\n metric.add_dimension(name=\"RuleName\", value=rule_name)\n metric.add_dimension(name=\"AccountID\", value=account_id)\n","repo_name":"AlexChesters/horatio","sub_path":"apps/inspector/inspector/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15999112673","text":"#!/usr/bin/python \n# -*- coding:utf-8 -*-\n#python 3.5.x\n\nimport os\nimport shutil\nimport zipfile,os.path\nimport platform\n\nVS_VERSION_9 = \"Visual Studio 9 2008\"\nVS_VERSION_9_x64 = \"Visual Studio 9 2008 Win64\"\nVS_VERSION_14 = \"Visual Studio 14 2015\"\nVS_VERSION_14_x64 = \"Visual Studio 14 2015 Win64\"\n\n#define FFMPEG win32 & win64 version\n#FFMPEG_WIN32_VERSION = \"ffmpeg-3.2.4-win32\"\n#FFMPEG_WIN64_VERSION = \"ffmpeg-3.2.4-win64\"\nFFMPEG_WIN32_VERSION = \"ffmpeg-2.2.1-win32\"\nFFMPEG_WIN64_VERSION = \"ffmpeg-2.8.3-win64\"\n\ndef copy_files(path, target):\n # returns a list of names (with extension, without full path) of all files \n # in folder path\n #files = []\n for name in os.listdir(path):\n if os.path.isfile(os.path.join(path, name)):\n #files.append(name)\n shutil.copy(os.path.join(path, name), os.path.join(path,target))\n #return files \n\n#for Windows cmake project usage\ndef Windows_cmake():\n\n vs_choice = input(\"Select Visual studio version(1 for VS2008 32bit, 2 for VS2008 64bit, 3 for VS2015 32bit, 4 for VS2015 64bit)?\")\n\n if (vs_choice == '1'):\n VS_VERSION = VS_VERSION_9\n elif (vs_choice == '2'):\n VS_VERSION = VS_VERSION_9_x64\n elif (vs_choice == '3'):\n VS_VERSION = VS_VERSION_14\n elif (vs_choice == '4'):\n VS_VERSION = VS_VERSION_14_x64\n else:\n VS_VERSION = VS_VERSION_9\n \n path = os.getcwd()\n os.chdir(path)\n \n #create depends folder\n shadow = path+\"/shadow\"\n if os.path.exists(shadow):\n shutil.rmtree(shadow)\n os.makedirs(shadow)\n \n os.chdir(shadow)\n\n\n #2.4.12 support VS2008 32bit&64bit, VS2015 32bit\n #OPENCV_ROOT = \"D:/OpenSource/OpenCV-2.4.12/install/\"\n #if want to use opencv 3.1.0, support VS2015 64bit\n OPENCV_ROOT = \"D:/OpenSource/OpenCV-3.1.0/install/\"\n\n if (VS_VERSION == VS_VERSION_14):\n opencv_bin = OPENCV_ROOT + \"/x86/vc14/bin/\"\n FFMPEG_ROOT = \"D:/OpenSource/\" + FFMPEG_WIN32_VERSION\n BOOST_ROOT = \"D:/OpenSource/boost_1_58_0\"\n elif (VS_VERSION == VS_VERSION_14_x64):\n opencv_bin = OPENCV_ROOT + \"/x64/vc14/bin/\"\n FFMPEG_ROOT = \"D:/OpenSource/\" + FFMPEG_WIN64_VERSION\n BOOST_ROOT = \"D:/OpenSource/boost_1_58_0_x64\"\n elif (VS_VERSION == VS_VERSION_9_x64):\n opencv_bin = OPENCV_ROOT + \"/x64/vc9/bin/\"\n FFMPEG_ROOT = \"D:/OpenSource/\" + FFMPEG_WIN64_VERSION\n BOOST_ROOT = \"D:/OpenSource/boost_1_58_0_x64\"\n else:\n #default for VS2008 32bit\n opencv_bin = OPENCV_ROOT + \"/x86/vc9/bin/\"\n FFMPEG_ROOT = \"D:/OpenSource/\"+FFMPEG_WIN32_VERSION\n BOOST_ROOT = \"D:/OpenSource/boost_1_58_0\"\n \n #CMAKE_COMMAND = \"cmake-gui -G \\\"\"+VS_VERSION+\"\\\" -DBOOST_ROOT_SET:STRING=\"+BOOST_ROOT+\" -DOPENCV_ROOT_SET:STRING=\"+OPENCV_ROOT+\" -DFFMPEG_ROOT_SET:STRING=\"+FFMPEG_ROOT+\" ../\"\n CMAKE_COMMAND = \"cmake -G \\\"\"+VS_VERSION+\"\\\" -DBOOST_ROOT_SET:STRING=\"+BOOST_ROOT+\" -DOPENCV_ROOT_SET:STRING=\"+OPENCV_ROOT+\" -DFFMPEG_ROOT_SET:STRING=\"+FFMPEG_ROOT+\" ../\"\n os.system(CMAKE_COMMAND)\n \n #try to copy dlls\n shadowDebugDir = shadow+\"/Debug\"\n os.makedirs(shadowDebugDir)\n os.chmod(shadowDebugDir, 0o777)\n \n shadowReleaseDir = shadow+\"/Release\"\n os.makedirs(shadowReleaseDir)\n os.chmod(shadowReleaseDir, 0o777)\n\n #copy files\n copy_files(opencv_bin, shadowReleaseDir)\n copy_files(opencv_bin, shadowDebugDir)\n\n #log4cxx_bin = LOG4CXX_ROOT + \"/lib/\"\n #copy_files(log4cxx_bin, shadowReleaseDir)\n #copy_files(log4cxx_bin, shadowDebugDir)\n\n ffmpeg_bin = FFMPEG_ROOT + \"/bin/\"\n copy_files(ffmpeg_bin, shadowReleaseDir)\n copy_files(ffmpeg_bin, shadowDebugDir)\n\n choice = input(\"Do you want to open ImageBusy.sln(y/n, default to n)?\")\n if choice == 'y' :\n os.startfile(shadow+\"/ImageBusy.sln\")\n\ndef Linux_cmake():\n path = os.getcwd()\n\n #create depends folder\n shadow = path+\"/shadow\"\n if os.path.exists(shadow):\n shutil.rmtree(shadow)\n os.makedirs(shadow)\n \n os.chdir(shadow)\n\n CMAKE_COMMAND = \"cmake ../\"\n os.system(CMAKE_COMMAND)\n\ndef Apple_cmake():\n path = os.getcwd()\n \n #create depends folder\n shadow = path+\"/shadow\"\n if os.path.exists(shadow):\n shutil.rmtree(shadow)\n os.makedirs(shadow)\n\n os.chdir(shadow)\n \n CMAKE_COMMAND = \"cmake-gui ../\"\n os.system(CMAKE_COMMAND)\n \nif __name__ == \"__main__\":\n\n if platform.system() == \"Windows\":\n print(\"system is Windows\")\n Windows_cmake()\n elif platform.system() == \"Darwin\":\n print(\"system is Mac\")\n Apple_cmake()\n elif platform.system() == \"linux\":\n print(\"system is Linux\")\n Linux_cmake()\n else:\n print(\"system is unknow! the name is: \"+platform.system())\n","repo_name":"SeoiLyn/ImageBusy","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22023865257","text":"# -*- coding: utf-8 -*-\nfrom django import forms\n\n\nclass BaseFilterForm(forms.Form):\n FIELDS = {}\n\n start = forms.IntegerField(initial=1)\n length = forms.IntegerField(initial=10)\n sort_asc = forms.ChoiceField(choices=(('', 'asc'), ('-', 'desc')), required=False)\n sort_column = forms.TypedChoiceField(coerce=int)\n search = forms.CharField(required=False)\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super(BaseFilterForm, self).__init__(*args, **kwargs)\n self.fields['sort_column'].choices = zip(map(str, range(len(self.FIELDS))), range(len(self.FIELDS)))\n\n def clean(self):\n cd = super().clean()\n cd['search'] = self.data.get('search[value]', '').lower()\n return cd\n","repo_name":"anndoc/django-datatable-snippet","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"29187746963","text":"import lists as ls\nimport text_arhive as tx\n\n\ndef buttons_processing_first(message):\n if message.text.strip() == tx.text_button(\"spec\", 0):\n send_text = tx.text_button('spec', 0)\n elif message.text.strip() == tx.text_button(\"main\", 0):\n send_text = ls.text_collector(1)\n elif message.text.strip() == tx.text_button(\"main\", 1):\n send_text = ls.text_collector(2)\n elif message.text.strip() == tx.text_button(\"main\", 2):\n send_text = ls.text_collector(3)\n else:\n send_text = \"error buttons_processing_first\"\n return send_text\n\n\ndef command_processing(massage):\n ml = massage.text.split()\n if len(ml) == 1:\n return\n elif len(ml) != 4:\n print('Неверное кол-во аргументов для command_processing')\n return\n elif len(ml) >= 4 and ml[0] == '/redact':\n redact_processing(ml)\n else:\n print('Error: command_processing')\n return\n # print(ml)\n\n\ndef redact_processing(ml):\n action = ml[1]\n spisok = ml[2]\n food = ml[3]\n if action == tx.text_command_arg('comand', 0):\n action_f = ls.add_to_list\n elif action == tx.text_command_arg('comand', 1):\n action_f = ls.del_to_list\n elif action == tx.text_command_arg('comand', 2):\n action_f = ls.change_to_list\n else:\n print('Error: redact_processing - action')\n action_f(spisok, food)\n # print(action, spisok, food)\n","repo_name":"Notker367/Bot_eda","sub_path":"input_response.py","file_name":"input_response.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3647985746","text":"with open('data/06.txt') as f:\n data = f.read()\n\nt = 0\nknown = {}\nsubknown = {}\n\nfor d in data.split(','):\n if int(d) in known:\n t += known[int(d)]\n continue\n subt = 0\n f = [int(d)]\n for i in range(128):\n for j in range(len(f)):\n if f[j] == 0:\n f[j] = 6\n f.append(8)\n else:\n f[j] -= 1\n for f_n in f:\n if f_n in subknown:\n subt += subknown[f_n]\n continue\n ff = [f_n]\n for i in range(128):\n for j in range(len(ff)):\n if ff[j] == 0:\n ff[j] = 6\n ff.append(8)\n else:\n ff[j] -= 1\n subt += len(ff)\n subknown[f_n] = len(ff)\n known[int(d)] = subt\n t += subt\n\nprint(t)\n","repo_name":"robpieke/advent2021","sub_path":"06b.py","file_name":"06b.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9943290591","text":"import example\n\n# function\n\n# def smth():\n# pass # ключевое слово (пропустить)\n#\n# def foo():\n# print('I`m function')\n# a = 6\n# b = 8\n# print('Rsult:', a + b)\n#\n# foo()\n\n# Arguments\n# def add_nums(a, b):\n# c = a + b\n# print(c)\n#\n#\n# add_nums(12, 15)\n# result = add_nums(12, 15) # None\n# print(result)\n\ndef add_nums(a, b):\n # a и b позиционные аргументы\n c = a + b\n return c\n\n\ndef guess_num(num):\n start_num = 10\n result = None\n while True: # бесконечный цикл, но его можно прервать\n if start_num == num:\n print('Ваше число:', start_num)\n break\n question = 'Ваше число больше ' + str(start_num) + '?'\n answer = input(question) # y - yes or n - no\n if answer == 'y':\n start_num *= 2\n elif answer == 'n':\n start_num = start_num // 2 + 1 # деление нацело\n else:\n print('Вы не правильно ввели ответ, можно y или n')\n\n\ndef main():\n # result = add_nums(12, 15)\n # print('Result:', result)\n #\n # result_1 = add_nums(b=14, a=21)\n # print('Result_1:', result_1)\n\n # imports\n # result_2 = example.mul_nums(70, 3)\n # print('Result_2:', result_2)\n\n # for i in range(4, 13, 2):\n # print(\"Номер иттерации:\", i)\n\n # for i in range(10):\n # if i % 3 == 0:\n # print('Делится на 3:', i)\n # elif i % 3 == 1:\n # print('Почти делится на 3:', i)\n # else:\n # print('Другой остаток', i)\n\n # num = 100\n # while num != 0:\n # print('num =', num)\n # num -= 10\n guess_num(50)\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"EvgeniiMorozov/studying_Python","sub_path":"main/lessons/lesson_2.py","file_name":"lesson_2.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39291405719","text":"# https://github.com/first20hours/google-10000-english\ndef load_words():\n with open('../Word_Finding/words_alpha.txt') as word_file:\n valid_words = set(word_file.read().split())\n return valid_words\n\n\ndef complete_word(revealed_letters, given_letters):\n all_words = load_words()\n length_revealed_letters = len(revealed_letters)\n list_given_letters = list(given_letters)\n total_letters = len(given_letters)\n picked_words, result = ([] for i in range(2))\n count = 0\n for word in all_words:\n word_temp = word\n list_revealed_letters2 = list_given_letters.copy()\n for letter in list_given_letters:\n if letter in word_temp:\n del list_revealed_letters2[list_revealed_letters2.index(letter)]\n word_temp = word_temp.replace(letter, '', 1)\n if len(list_given_letters) - len(list_revealed_letters2) == len(word) and (\n len(word) <= total_letters) and len(word) == length_revealed_letters:\n picked_words.append(word)\n for picked_word in picked_words:\n for i in range(0, len(picked_word)):\n try:\n if picked_word[i] == revealed_letters[i] or revealed_letters[i] == '_':\n count += 1\n except:\n pass\n if total_letters == i + 1:\n break\n if count == length_revealed_letters:\n result.append(picked_word)\n count = 0\n return result\n\n\nprint(complete_word(\"_______\", \"yccende\"))\n","repo_name":"Paradiddle131/Scrabble-Solver","sub_path":"Word_Finding/find_word.py","file_name":"find_word.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74896331377","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections.abc import Mapping\n\nfrom quri_parts.circuit import NonParametricQuantumCircuit, QuantumCircuit\n\nfrom .transpiler import CircuitTranspilerProtocol\n\n\nclass QubitRemappingTranspiler(CircuitTranspilerProtocol):\n \"\"\"Remap qubits in the circuit with the specified mapping.\n\n The mapping ``qubit_mapping`` should be specified with \"from\" qubit\n indices as keys and \"to\" qubit indices as values. For example, if\n you want to convert a circuit using qubits 0, 1, 2, 3 by mapping\n them as 0 → 4, 1 → 2, 2 → 5, 3 → 0, then the ``qubit_mapping``\n should be ``{0: 4, 1: 2, 2: 5, 3: 0}``. The ``qubit_count`` of the\n converted circuit is determined by the largest destination qubit\n index. In the above example, the largest index is 5, so the\n converted circuit is for 6 qubits.\n \"\"\"\n\n def __init__(self, qubit_mapping: Mapping[int, int]):\n if len(qubit_mapping) != len(set(qubit_mapping.values())):\n raise ValueError(\n f\"qubit_mapping has duplicated indices in values: {qubit_mapping}\"\n )\n self._qubit_mapping = qubit_mapping\n self._max_index = max(qubit_mapping.values())\n\n def __call__(\n self, circuit: NonParametricQuantumCircuit\n ) -> NonParametricQuantumCircuit:\n transpiled = QuantumCircuit(self._max_index + 1)\n qm = self._qubit_mapping\n try:\n for gate in circuit.gates:\n ci = tuple(qm[index] for index in gate.control_indices)\n ti = tuple(qm[index] for index in gate.target_indices)\n g = gate._replace(control_indices=ci, target_indices=ti)\n transpiled.add_gate(g)\n except KeyError as e:\n raise ValueError(f\"Mapping for qubit {e.args} was not specified\")\n\n return transpiled\n","repo_name":"QunaSys/quri-parts","sub_path":"packages/circuit/quri_parts/circuit/transpile/qubit_remapping.py","file_name":"qubit_remapping.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"57"} +{"seq_id":"17941733830","text":"from amat import *\n\n# load DataFrame, add chat ID mapping, and set local timezone\ndf = load_data(\"chat_df.pkl\", \"id_map.yaml\", \"US/Pacific\")\n\n# preview the most recent 10 messages\ndf.tail(10)[['text', 'contact', 'chat_id', 'timestamp']]\n\n# plot total texts per month\nplot_count(df, 'M', legend=False, cmap=\"Blues\")\n\n# plot length of texts from family since Jan 1, 2019 as rainbow line graph\nfamily = filt_func(df, 'contact', lambda x : 'Mom' in x or 'Dad' in x or 'Wombat' in x)\nplot_length(filt_date(family, start='Jan 1, 2019'), '3W', cmap='rainbow', kind='line')\n\n# pie chart breakdown of message activity per contact\nbreakdown(df, (2,1), by='contact', cmap='nipy_spectral')\n\n# heatmap of activity per weekday with blue colormap\nweekly_heatmap(df, cmap='Blues')\n\n# weekly heatmap of recent texts from me to Mom\nrecent = filt_date(df, 'September 25, 2019')\nweekly_heatmap(filt_any(filt_any(recent, 'contact', 'Mom'), 'is_from_me', False))\n\n# all texts containing the word frabjous (case-insensitive)\ncontext_search(df, 'frabjous', False, 3)\n\n# print last ten messages from unmapped chat IDs\nfilt_any(df, 'contact', 'other').tail(10)[['date_local', 'text', 'chat_id', 'contact']]\n\n","repo_name":"parkersruth/amat","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"23295982458","text":"# insertion sort\n# time: O^2\n\n\n# for loop goes 'forward'\n# while loop goes 'backward'\n\n\ndef insertion_sort(lst):\n if len(lst) < 2:\n return lst\n\n for i in range(1, len(lst)):\n while lst[i] < lst[i - 1]:\n # this is an inplace swap\n # give them each a new value (swap)\n lst[i], lst[i - 1] = lst[i - 1], lst[i]\n if (i - 1) == 0:\n break\n i -= 1\n\n return lst\n","repo_name":"Patricia888/data-structures-and-algorithms","sub_path":"sorting_algos/insertion_sort/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26512676388","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport json\nimport pymysql\n\nclass WanfangPipeline(object):\n\n def __init__(self):\n self.mysql_client = pymysql.Connect(\n '127.0.0.1','root','ljh1314',\n 'wanfang',charset='utf8',\n )\n\n self.cursor = self.mysql_client.cursor()\n\n def process_item(self, item, spider):\n print('1111111111111111111111111111111111')\n\n insertSql,insertData = item.get_sql_data(dict(item))\n # print(insertSql,insertData)\n\n try:\n self.cursor.execute(insertSql,insertData)\n self.mysql_client.commit()\n print('数据插入成功',dict(item)['title'])\n except Exception as err:\n print(err)\n print('数据插入失败',dict(item)['title'])\n self.mysql_client.rollback()\n return item\n\n\n def close_spider(self,spider0):\n self.mysql_client.close()\n self.cursor.close()\n\n","repo_name":"wangxuyongkang/chengxuyuanhh","sub_path":"Code/wanfang分布式 2/wanfang/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"4836059216","text":"from urllib.parse import urlparse\nfrom argparse import ArgumentTypeError\n\nfrom .target import Target\n\n\nclass HostParseError(ValueError, ArgumentTypeError):\n # Note: need to inherit ArgumentTypeError so the custom exception\n # messages get shown to the users properly\n # by L{argparse.ArgumentParser._get_value}\n\n def __init__(self, message):\n super(HostParseError, self).__init__(\"Target host: \" + message)\n\n\nclass PortNotIntError(HostParseError):\n def __init__(self, hostname):\n super(PortNotIntError, self).__init__(\n \"Wrong port specification on Host: {}\".format(hostname)\n )\n\n\nclass ParseHosts(dict):\n def __init__(self, arg):\n \"\"\"\n arg is string with hosts in socket format username@host:port\n \"\"\"\n x = urlparse(\"{}{}\".format(\"//\", arg))\n try:\n if x.port:\n keyname = \"{}:{}\".format(x.hostname, x.port)\n port = x.port\n else:\n keyname = x.hostname\n port = 22\n\n username = x.username if x.username else \"root\"\n\n host = [(keyname, Target(x.hostname, port, username))]\n except ValueError:\n raise PortNotIntError(x.hostname)\n super(ParseHosts, self).__init__(host)\n","repo_name":"openSUSE/repose","sub_path":"repose/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"17146257864","text":"\"\"\"Quick-and-dirty text classifier based on phrase weights in CSV.\n\nFor example::\n\n classify('A pudding cup is the perfect lunch idea or '\n 'wholesome snack for your kids.')\n\nmight result in::\n\n {'food': 0.313, 'children': 0.125}\n\n\"\"\"\nimport collections\nimport csv\nimport itertools\nimport re\n\nimport boto\nfrom django.conf import settings\n\nfrom targetshare.utils import atan_norm\n\n\ndef _iter_strip(iterable):\n for item in iterable:\n yield item.strip()\n\n\nclass SimpleWeights(dict):\n\n __slots__ = ()\n\n COLUMNS = ('topic', 'phrase', 'weight', 'skip')\n\n AbstractPhraseWeight = collections.namedtuple('AbstractPhraseWeight', COLUMNS[1:] + ('pattern',))\n\n class PhraseWeight(AbstractPhraseWeight):\n\n __slots__ = ()\n\n def __new__(cls, phrase, weight, skip, pattern=None):\n if pattern is None:\n space_optional = re.sub(r'\\s+', r'(?:-|\\s+)?', phrase)\n pattern = re.compile(r'\\b{}\\b'.format(space_optional), re.I)\n return super(SimpleWeights.PhraseWeight, cls).__new__(cls, phrase, weight, skip, pattern)\n\n def __repr__(self):\n return '<{}: {!r} [{}]>'.format(\n self.__class__.__name__,\n self.phrase,\n 'SKIP' if self.skip else self.weight,\n )\n\n def get_phrase_weight(self, corpus):\n count = 0\n\n for match in self.pattern.finditer(corpus):\n if self.skip:\n raise SimpleWeights.SkipPhrase(self.phrase, match)\n\n count += self.weight\n\n return count\n\n class SkipPhrase(Exception):\n pass\n\n @classmethod\n def _read(cls, handle):\n reader = csv.reader(handle)\n\n # Check for header row:\n start = next(reader)\n if any(value != column_name for (column_name, value) in itertools.izip(cls.COLUMNS, start)):\n # No header, this looks like a data row\n yield _iter_strip(start)\n\n for row in reader:\n yield _iter_strip(row)\n\n @classmethod\n def load(cls, handle):\n self = cls()\n\n for (topic, phrase, weight, skip) in cls._read(handle):\n weight = float(weight) if weight else None\n skip = bool(int(skip)) if skip else False\n phrase_weight = cls.PhraseWeight(phrase, weight, skip)\n phrase_list = self.setdefault(topic, [])\n if skip:\n # Insert skips at beginning, so classify() checks these first:\n phrase_list.insert(0, phrase_weight)\n else:\n phrase_list.append(phrase_weight)\n\n return self\n\n def iter_topics(self, corpus, *topics):\n for topic in (topics or self.iterkeys()):\n weight = 0\n for phrase_info in self.get(topic, ()):\n try:\n weight += phrase_info.get_phrase_weight(corpus)\n except self.SkipPhrase:\n yield (topic, 0)\n break\n else:\n yield (topic, weight)\n\n def classify(self, corpus, *topics):\n \"\"\"Classify `corpus` based on number of occurrences of words and phrases, and their\n weights, in the SimpleWeights dictionary.\n\n By default, `corpus` is classified for all topics for which there are weights.\n Alternatively, topics may be specified as arbitrary arguments:\n\n SIMPLE_WEIGHTS.classify(corpus, 'healthcare', 'cooking', ...)\n\n \"\"\"\n return {topic: atan_norm(score) for (topic, score) in self.iter_topics(corpus, *topics)}\n\n\ndef s3_key_xreadlines(bucket_name='ef-techops', key_name='data/topics.csv'):\n if not (settings.AWS_ACCESS_KEY_ID and settings.AWS_SECRET_ACCESS_KEY):\n return\n\n conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)\n bucket = conn.get_bucket(bucket_name, validate=False)\n key = bucket.get_key(key_name) # FIXME: upgrade boto and use validate=False?\n\n # Re-chunk by line:\n line = ''\n for chunk in key:\n for byte in chunk:\n line += byte\n if byte == '\\n':\n yield line\n line = ''\n\n # Yield any remainder (for files that do not end with newline):\n if line:\n yield line\n\n\nSIMPLE_WEIGHTS = SimpleWeights.load(s3_key_xreadlines()) # TODO: improve process?\n\nclassify = SIMPLE_WEIGHTS.classify\n","repo_name":"edgeflip/edgeflip","sub_path":"targetshare/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"16011424740","text":"# -*- coding: utf-8 -*-\n\n#\n# HMM model base of HMM Aligner\n# Simon Fraser University\n# NLP Lab\n#\n# This is the base model for HMM\n#\nimport sys\nimport time\nimport numpy as np\nfrom math import log\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nfrom loggers import logging\nfrom models.modelBase import Task\nfrom models.modelBase import AlignmentModelBase as Base\nfrom evaluators.evaluator import evaluate\n__version__ = \"0.4a\"\n\n\nclass AlignmentModelBase(Base):\n def __init__(self):\n if \"nullEmissionProb\" not in vars(self):\n self.nullEmissionProb = 0.000005\n if \"task\" not in vars(self):\n self.task = None\n\n if \"t\" not in vars(self):\n self.t = defaultdict(float)\n if \"eLengthSet\" not in vars(self):\n self.eLengthSet = defaultdict(int)\n if \"a\" not in vars(self):\n self.a = [[[]]]\n if \"pi\" not in vars(self):\n self.pi = []\n\n if \"logger\" not in vars(self):\n self.logger = logging.getLogger('HMMBASE')\n if \"modelComponents\" not in vars(self):\n self.modelComponents = [\"t\", \"pi\", \"a\", \"eLengthSet\"]\n Base.__init__(self)\n return\n\n def initialiseParameter(self, Len):\n doubleLen = 2 * Len\n tmp = 1.0 / Len\n for z in range(Len):\n for y in range(Len):\n for x in range(Len + 1):\n self.a[x][z][y] = tmp\n tmp = 1.0 / doubleLen\n for x in range(Len):\n self.pi[x] = tmp\n return\n\n def forwardBackward(self, f, e, tSmall, a):\n alpha = [[0.0 for x in range(len(e))] for y in range(len(f))]\n alphaScale = [0.0 for x in range(len(f))]\n alphaSum = 0\n\n for j in range(len(e)):\n alpha[0][j] = self.pi[j] * tSmall[0][j]\n alphaSum += alpha[0][j]\n\n alphaScale[0] = 1 / alphaSum\n for j in range(len(e)):\n alpha[0][j] *= alphaScale[0]\n\n for i in range(1, len(f)):\n alphaSum = 0\n for j in range(len(e)):\n total = 0\n for prev_j in range(len(e)):\n total += alpha[i - 1][prev_j] * a[prev_j][j]\n alpha[i][j] = tSmall[i][j] * total\n alphaSum += alpha[i][j]\n\n alphaScale[i] = 1.0 / alphaSum\n for j in range(len(e)):\n alpha[i][j] = alphaScale[i] * alpha[i][j]\n\n beta = [[0.0 for x in range(len(e))] for y in range(len(f))]\n for j in range(len(e)):\n beta[len(f) - 1][j] = alphaScale[len(f) - 1]\n\n for i in range(len(f) - 2, -1, -1):\n for j in range(len(e)):\n total = 0\n for next_j in range(len(e)):\n total += (beta[i + 1][next_j] * a[j][next_j] *\n tSmall[i + 1][next_j])\n beta[i][j] = alphaScale[i] * total\n return alpha, alphaScale, beta\n\n def maxTargetSentenceLength(self, dataset):\n maxLength = 0\n eLengthSet = defaultdict(int)\n for (f, e, alignment) in dataset:\n tempLength = len(e)\n if tempLength > maxLength:\n maxLength = tempLength\n eLengthSet[tempLength] += 1\n return (maxLength, eLengthSet)\n\n def baumWelch(self, dataset, iterations=5, index=0):\n if not self.task:\n self.task = Task(\"Aligner\", \"HMMBaumWelchOI\" + str(iterations))\n self.logger.info(\"Starting Training Process\")\n self.logger.info(\"Training size: \" + str(len(dataset)))\n startTime = time.time()\n\n maxE, self.eLengthSet = self.maxTargetSentenceLength(dataset)\n self.logger.info(\"Maximum Target sentence length: \" + str(maxE))\n\n self.a = [[[0.0 for x in range(maxE * 2)] for y in range(maxE * 2)]\n for z in range(maxE + 1)]\n self.pi = [0.0 for x in range(maxE * 2)]\n\n for iteration in range(iterations):\n self.logger.info(\"BaumWelch Iteration \" + str(iteration))\n\n logLikelihood = 0\n\n gamma = [[0.0 for x in range(maxE)] for y in range(maxE * 2)]\n gammaBiword = defaultdict(float)\n gammaSum_0 = [0.0 for x in range(maxE)]\n delta = [[[0.0 for x in range(maxE)] for y in range(maxE)]\n for z in range(maxE + 1)]\n\n self._beginningOfIteration(dataset)\n\n counter = 0\n for (f, e, alignment) in dataset:\n self.task.progress(\"BaumWelch iter %d, %d of %d\" %\n (iteration, counter, len(dataset),))\n counter += 1\n if iteration == 0:\n self.initialiseParameter(len(e))\n\n fLen, eLen = len(f), len(e)\n a = self.a[eLen]\n tSmall = [[self.t[(f[i][index], e[j][index])]\n for j in range(eLen)]\n for i in range(fLen)]\n\n alpha, alphaScale, beta = self.forwardBackward(f, e, tSmall, a)\n\n # Update logLikelihood\n for i in range(fLen):\n logLikelihood -= log(alphaScale[i])\n\n # Setting gamma\n self._updateGamma(f, e, gamma, alpha, beta, alphaScale)\n\n for i in range(fLen):\n for j in range(eLen):\n gammaBiword[(f[i][index], e[j][index])] += gamma[i][j]\n for j in range(eLen):\n gammaSum_0[j] += gamma[0][j]\n\n # Update delta\n c = [0.0 for i in range(eLen * 2)]\n for i in range(1, fLen):\n for prev_j in range(eLen):\n for j in range(eLen):\n c[eLen - 1 + j - prev_j] += (alpha[i - 1][prev_j] *\n beta[i][j] *\n a[prev_j][j] *\n tSmall[i][j])\n\n for prev_j in range(eLen):\n for j in range(eLen):\n delta[eLen][prev_j][j] += c[eLen - 1 + j - prev_j]\n # end of loop over dataset\n\n self.logger.info(\"likelihood \" + str(logLikelihood))\n # M-Step\n self._updateEndOfIteration(maxE, delta, gammaSum_0, gammaBiword)\n\n self.endOfBaumWelch()\n endTime = time.time()\n self.logger.info(\"Training Complete, total time(seconds): %f\" %\n (endTime - startTime,))\n return\n\n def _beginningOfIteration(self, dataset):\n # self.lenDataset = len(dataset)\n # return\n raise NotImplementedError\n\n def _updateGamma(self, f, e, gamma, alpha, beta, alphaScale):\n # for i in range(len(f)):\n # for j in range(len(e)):\n # gamma[i][j] = alpha[i][j] * beta[i][j] / alphaScale[i]\n raise NotImplementedError\n\n def _updateEndOfIteration(self, maxE, delta, gammaSum_0, gammaBiword):\n # self.t.clear()\n # for Len in self.eLengthSet:\n # for prev_j in range(Len):\n # deltaSum = 0.0\n # for j in range(Len):\n # deltaSum += delta[Len][prev_j][j]\n # for j in range(Len):\n # self.a[Len][prev_j][j] = delta[Len][prev_j][j] /\\\n # (deltaSum + 1e-37)\n\n # for i in range(maxE):\n # self.pi[i] = gammaSum_0[i] * (1.0 / self.lenDataset)\n\n # gammaEWord = defaultdict(float)\n # for f, e in gammaBiword:\n # gammaEWord[e] += gammaBiword[(f, e)]\n # for f, e in gammaBiword:\n # self.t[(f, e)] = gammaBiword[(f, e)] / (gammaEWord[e] + 1e-37)\n # return\n raise NotImplementedError\n\n def endOfBaumWelch(self):\n # Apply final smoothing here\n raise NotImplementedError\n\n def tProbability(self, f, e, index=0):\n v = 163303\n if (f[index], e[index]) in self.t:\n return self.t[(f[index], e[index])]\n if e[index] == \"null\":\n return self.nullEmissionProb\n return 1.0 / v\n\n def aProbability(self, prev_j, j, targetLength):\n if targetLength in self.eLengthSet:\n return self.a[targetLength][prev_j][j]\n return 1.0 / targetLength\n\n def logViterbi(self, f, e):\n e = deepcopy(e)\n fLen, eLen = len(f), len(e)\n for i in range(eLen):\n e.append((\"null\", \"null\"))\n score = np.zeros((fLen, eLen * 2))\n prev_j = np.zeros((fLen, eLen * 2))\n\n for i in range(fLen):\n for j in range(eLen * 2):\n score[i][j] = log(self.tProbability(f[i], e[j]))\n if i == 0:\n if j < len(self.pi) and self.pi[j] != 0:\n score[i][j] += log(self.pi[j])\n else:\n score[i][j] = - sys.maxint - 1\n else:\n # Find the best alignment for f[i-1]\n maxScore = -sys.maxint - 1\n bestPrev_j = -sys.maxint - 1\n for jPrev in range(eLen * 2):\n aPr = self.aProbability(jPrev, j, eLen)\n if aPr == 0:\n continue\n temp = score[i - 1][jPrev] + log(aPr)\n if temp > maxScore:\n maxScore = temp\n bestPrev_j = jPrev\n\n score[i][j] += maxScore\n prev_j[i][j] = bestPrev_j\n\n maxScore = -sys.maxint - 1\n best_j = 0\n for j in range(eLen * 2):\n if score[fLen - 1][j] > maxScore:\n maxScore = score[fLen - 1][j]\n best_j = j\n\n trace = [(best_j + 1, )]\n i = fLen - 1\n j = best_j\n\n while (i > 0):\n j = int(prev_j[i][j])\n trace = [(j + 1, )] + trace\n i = i - 1\n return trace\n\n def decodeSentence(self, sentence):\n f, e, alignment = sentence\n sentenceAlignment = []\n bestAlign = self.logViterbi(f, e)\n\n for i in range(len(bestAlign)):\n\n if bestAlign[i][0] <= len(e):\n if len(bestAlign[i]) > 1 and \"typeList\" in vars(self):\n sentenceAlignment.append(\n (i + 1, bestAlign[i][0],\n self.typeList[bestAlign[i][1]]))\n else:\n sentenceAlignment.append((i + 1, bestAlign[i][0]))\n return sentenceAlignment\n","repo_name":"sfu-natlang/HMM-Aligner","sub_path":"src/models/Old/HMMBase.py","file_name":"HMMBase.py","file_ext":"py","file_size_in_byte":10631,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"57"} +{"seq_id":"38352213675","text":"from . import Library\n\ntry:\n import click\nexcept ImportError as e:\n raise ImportError(\"click must be installed to use the slider cli\") from e\n\n\n@click.group()\ndef main():\n \"\"\"Slider utilities.\n \"\"\"\n\n\n@main.command()\n@click.argument(\n 'beatmaps',\n type=click.Path(exists=True, file_okay=False),\n)\n@click.option(\n '--recurse/--no-recurse',\n help='Recurse through ``path`` searching for beatmaps?',\n default=True,\n)\n@click.option(\n '--progress/--no-progress',\n help='Show a progress bar?',\n default=True,\n)\n@click.option(\n '--skip-exceptions/--no-skip-exceptions',\n help='Skip beatmap files that cause exceptions rather than exiting?',\n default=False,\n)\ndef library(beatmaps, recurse, progress, skip_exceptions):\n \"\"\"Create a slider database from a directory of beatmaps.\n \"\"\"\n Library.create_db(\n beatmaps,\n recurse=recurse,\n show_progress=progress,\n skip_exceptions=skip_exceptions\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"llllllllll/slider","sub_path":"slider/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"57"} +{"seq_id":"26006154775","text":"import os\nimport argparse\nfrom utils import load_json, save_json\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_dir', type=str, default='handa/H32384_detection_result')\n args = parser.parse_args()\n\n images = [img for img in os.listdir(args.input_dir) if img.endswith('.jpg')]\n images.sort(key=lambda x: int(x.replace('-', '_').split('_')[2]))\n\n ret, all_images = [], []\n for img in images:\n ret.append({'img': os.path.join(args.input_dir, img), 'src': 'other', 'lab': 0})\n all_images.append(os.path.join(args.input_dir, img))\n save_json([ret], 'handa/H32384_classification.json')\n\n with open('handa/H32384_detection_result/log_case_test_2.txt', encoding='utf-8') as fin:\n chars = [line.split('\\t')[2] for line in fin.readlines()]\n print(chars)\n assert len(chars) == len(images)\n all_chars = [(ch, img) for ch, img in zip(chars, all_images)]\n com_ret = []\n for idx in range(len(all_chars)):\n com_ret.append(({\n 'book_name': 'H32384', 'row_order': 0, 'characters': all_chars,\n }, idx))\n save_json(com_ret, 'handa/H32384_complete.json')\n\n\ndef main2():\n input_dir = 'handa/H32384_detection_with_label'\n images = [img for img in os.listdir(input_dir) if img.endswith('.jpg')]\n images.sort(key=lambda x: int(x.replace('-', '_').split('_')[2]))\n\n with open(f'{input_dir}/labels.txt', encoding='utf-8') as fin:\n lines = fin.readlines()\n assert len(lines) == len(images)\n ret = []\n all_chars, characters = [], []\n chat_to_label = load_json('handa/oracle_classification_chant_char_to_label.json')\n for idx, (img, line) in enumerate(zip(images, lines)):\n line = line.strip()\n to_complete = '——' in line\n ch = line.split('——')[0]\n img = os.path.join(input_dir, img)\n all_chars.append((img, ch, to_complete, idx))\n characters.append((ch, img))\n if to_complete:\n ret.append({'img': img, 'src': 'other', 'lab': chat_to_label[ch]})\n save_json([ret], 'handa/H32384_classification_with_label.json')\n ret = []\n for img, ch, to_complete, idx in all_chars:\n if to_complete:\n ret.append(({\n 'book_name': 'H32384', 'row_order': 0, 'characters': characters,\n }, idx))\n save_json(ret, 'handa/H32384_complete_with_label.json')\n\n\nif __name__ == '__main__':\n # main()\n main2()\n","repo_name":"Raincleared-Song/oracle_complete_albef","sub_path":"scripts/process_dir.py","file_name":"process_dir.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"12413805289","text":"# Based on\n# https://github.com/intel-isl/Open3D/blob/master/examples/python/open3d_tutorial.py\n\nimport IPython.display\nimport numpy as np\nimport open3d as o3d\nimport PIL.Image\n\n\ndef custom_draw_geometries(\n geoms,\n # Window Options\n window_name=\"PlaneSegmentation Python\",\n width=1920,\n height=1080,\n left=50,\n top=50,\n # Camera Viewpoint Parameters\n lookat=None,\n up=None,\n front=None,\n zoom=None,\n # Render Options\n point_size=1,\n):\n # Create a new Window\n vis = o3d.visualization.Visualizer()\n vis.create_window(\n window_name=window_name,\n width=width,\n height=height,\n left=left,\n top=top,\n visible=True,\n )\n\n # Add the provided geometries to the canvas\n for geom in geoms:\n vis.add_geometry(geom)\n\n # Change the render options\n render_options = vis.get_render_option()\n render_options.point_size = point_size\n\n # Change the viewpoint of the camera\n view_control = vis.get_view_control()\n view_control.set_lookat(lookat) if lookat else None\n view_control.set_up(up) if up else None\n view_control.set_front(front) if front else None\n view_control.set_zoom(zoom) if zoom else None\n vis.run()\n\n # Capture the image and display it in the jupyter notebook\n im = vis.capture_screen_float_buffer()\n vis.destroy_window()\n im = PIL.Image.fromarray((255 * np.asarray(im)).astype(np.uint8), \"RGB\")\n IPython.display.display(im)\n\n\no3d.visualization.draw_geometries = custom_draw_geometries\n","repo_name":"nachovizzo/open3d_cpp_python","sub_path":"custom_draw_geometries.py","file_name":"custom_draw_geometries.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"57"} +{"seq_id":"27679558112","text":"import unittest\nfrom odin.config.environment import config,HermodError,HermodWarning\nimport logging\nimport logging.config\nfrom logging import StreamHandler\nfrom logging.handlers import SocketHandler\nfrom StringIO import StringIO\nfrom pkg_resources import resource_stream\n\ndef getexcept(e):\n raise e\n\nclass EnvironmentTestCase(unittest.TestCase):\n def setUp(self):\n self.config = config()\n\n def config(self):\n sections = self.config.sections()\n testsection = ['GEM']\n for section in testsection:\n self.assertTrue(section in sections)\n\n def hermodWarning(self):\n self.assertRaises(HermodWarning,getexcept,HermodWarning)\n\n def hermodError(self):\n self.assertRaises(HermodError,getexcept,HermodError)\n\n def logger(self):\n log = StringIO()\n name=self.config.get('logging','configfile')\n file = resource_stream(\"odin.config\",name)\n logging.config.fileConfig(file)\n logger = logging.getLogger('')\n logger.handlers.pop(0)#(SocketHandler('localhost',9020))\n logger.addHandler(StreamHandler(log))\n #logger.warning(\"test root\")\n logger2=logging.getLogger('hermod')\n logger2.critical(\"test hermod\")\n log.seek(0)\n #self.assertEqual(log.readline(),\"test root\\n\")\n self.assertEqual(log.readline(),\"test hermod\\n\")\n\ndef test_suite():\n tests = [\n 'config',\n 'logger',\n 'hermodWarning',\n 'hermodError',\n ]\n return unittest.TestSuite(map(EnvironmentTestCase,tests))\n\nif __name__=='__main__':\n unittest.TextTestRunner(verbosity=3).run(test_suite())\n\n","repo_name":"Odin-SMR/hermod","sub_path":"src/odin.config/odin/config/tests/test_environment.py","file_name":"test_environment.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"4585257129","text":"\"\"\"\ntest if a tree satisfies the BST property\n\nWhat is a BST? \n\"\"\"\nclass TreeNode:\n def __init__(self, left = None, right = None, key = None):\n self.left = left\n self.right = right\n self.key = key\n\ndef check_bst(root):\n max_num = float('inf')\n min_num = float('-inf')\n return check_bst_helper(root, max_num, min_num)\n\ndef check_bst_helper(root, max_num, min_num):\n if not root:\n return True\n elif root.key > max_num or root.key < min_num:\n print(root.key, max_num, min_num)\n return False\n return (check_bst_helper(root.left, root.key, min_num) and check_bst_helper(root.right, max_num, root.key))\n\n\nif __name__ == '__main__':\n nodeE = TreeNode(None,None, 9)\n nodeD = TreeNode(None,nodeE,7)\n left = TreeNode(None,nodeD,5)\n right = TreeNode(None,None,15)\n root = TreeNode(left, right, 10)\n print(check_bst(root))\n\n # 10\n # 5 15\n # 7\n # 2","repo_name":"zmatteson/epi-python","sub_path":"chapter_14/14_1.py","file_name":"14_1.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31204969824","text":"from rest_framework import serializers\n\nfrom product.models import ProductVersion, Product\n\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Product\n fields = (\n 'id',\n 'name',\n 'status',\n 'created_at'\n )\n\n\nclass ProductVersionSerializer(serializers.HyperlinkedModelSerializer):\n product = serializers.SerializerMethodField(read_only=True)\n\n def get_product(self, obj: ProductVersion):\n return ProductSerializer(obj.product).data\n\n class Meta:\n model = ProductVersion\n fields = (\n 'id',\n 'name',\n 'product',\n 'created_at'\n )\n","repo_name":"savelja-s/pv-py","sub_path":"product/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28891045272","text":"def is_small(node):\n return node == node.lower()\n\ndef dfs(nodes, connections, node, small_visited, sum, double_small) -> int:\n if node == \"end\":\n sum += 1\n return sum\n\n # Add this to visited and count paths\n if is_small(node):\n small_visited.add(node)\n\n # Count paths\n for next_node in connections[node]:\n if next_node == \"start\":\n continue\n\n if not next_node in small_visited:\n sum = dfs(nodes, connections, next_node, small_visited, sum, double_small) # Visit small node without changing double_small\n\n if double_small is False:\n if next_node in small_visited:\n small_visited.remove(next_node)\n sum = dfs(nodes, connections, next_node, small_visited, sum, True) # V\n small_visited.add(next_node)\n # Remove from visited\n if is_small(node):\n small_visited.remove(node)\n return sum\n\ndef solve1(nodes, connections) -> int:\n\n small_visited = set()\n sum = 0\n return dfs(nodes, connections, \"start\", small_visited, sum, True)\n\ndef solve2(nodes, connections) -> int:\n\n small_visited = set()\n sum = 0\n return dfs(nodes, connections, \"start\", small_visited, sum, False)\n\nwith open(\"12.txt\", \"r\") as file:\n s = file.read()\n\nconnections_raw = s[:-1].split(\"\\n\")\nnodes = set()\nconnections = {}\n\nfor c in connections_raw:\n n1, n2 = c.split(\"-\")\n\n if n1 not in nodes:\n nodes.add(n1)\n connections[n1] = []\n if n2 not in nodes:\n nodes.add(n2)\n connections[n2] = []\n\n connections[n1].append(n2)\n connections[n2].append(n1)\n\nprint(solve1(nodes, connections))\nprint(solve2(nodes, connections))\n","repo_name":"vuk119/AdventOfCode2021","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22942363065","text":"# https://leetcode.com/explore/challenge/card/august-leetcoding-challenge/549/week-1-august-1st-august-7th/3411/\n\n# Valid Palindrome\n\n# Given a string, determine if it is a palindrome, \n# considering only alphanumeric characters and ignoring cases.\n\n# Note: For the purpose of this problem, we define empty string as valid palindrome.\n\n# Example 1:\n# Input: \"A man, a plan, a canal: Panama\"\n# Output: true\n\n# Example 2:\n# Input: \"race a car\"\n# Output: false\n \n# Constraints:\n# s consists only of printable ASCII characters.\n\n\ndef isPalindrome(s: str) -> bool:\n\ts = s.lower()\n\tleft, right = 0, len(s) - 1\n\twhile left < right:\n\t\twhile left < right and not s[left].isalnum():\n\t\t\tleft += 1\n\t\twhile left < right and not s[right].isalnum():\n\t\t\tright -= 1\n\t\tif s[left] != s[right]:\n\t\t\treturn False\n\t\telse:\n\t\t\tleft += 1\n\t\t\tright -= 1\n\treturn True\n\nassert(isPalindrome(\"A man, a plan, a canal: Panama\") == True)\nassert(isPalindrome(\"race a car\") == False)\nassert(isPalindrome(\"<.\") == True)\nassert(isPalindrome(\"1s\") == False)\n\n\n","repo_name":"candyer/leetcode","sub_path":"2020 August LeetCoding Challenge/03_isPalindrome.py","file_name":"03_isPalindrome.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"11791183335","text":"import os\nimport json\nimport traceback\n\nfrom flask import request, jsonify\nfrom flasgger.utils import swag_from\n\nfrom . import api, compare_answers, show_differences\n\nINCORRECT_ANSWER_THRESHOLD = float(os.getenv('INCORRECT_ANSWER_THRESHOLD'))\n\n\n@api.route(\"/get_similarity\", methods=[\"GET\"])\n@swag_from(\"swaggers/get_similarity_api.yml\")\ndef get_similarity_api():\n try:\n req = request.get_json()\n language = req[\"language\"]\n correct_phrase = req[\"phrase1\"]\n comparing_phrase = req[\"phrase2\"]\n\n # compare_phrases\n comparing_result = compare_answers(language, correct_phrase, comparing_phrase)\n is_equal = comparing_result[\"is_equal\"]\n equality_rate = comparing_result[\"equality_rate\"]\n\n # generate tips for user\n differences = f\"{correct_phrase}\"\n if equality_rate > INCORRECT_ANSWER_THRESHOLD:\n differences = show_differences(correct_phrase, comparing_phrase)\n\n return jsonify(\n {\n \"status\": 200,\n \"is_equal\": is_equal,\n \"equality_rate\": equality_rate,\n \"differences\": \"\",\n }\n )\n except Exception as e:\n return jsonify(\n {\n \"status\": 500,\n \"message\": f\"Server internal error. {e}\",\n \"traceback\": f\"{traceback.format_exc()}\",\n }\n )\n","repo_name":"Alkhimovmv/langflow","sub_path":"nlp_module/api/get_similarity_api.py","file_name":"get_similarity_api.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6223101161","text":"def partition(array, start, end):\n pivot = array[start]\n left = start + 1\n right = end\n\n while left <= right:\n while left <= right and array[right] >= pivot:\n right = right - 1\n while left <= right and array[left] <= pivot:\n left = left + 1\n if left <= right:\n array[left], array[right] = array[right], array[left]\n\n array[start], array[right] = array[right], array[start]\n return right\n\n\ndef quick_sort(array, start=None, end=None):\n if start is None:\n start = 0\n if end is None:\n end = len(array) - 1\n if start >= end:\n return\n p = partition(array, start, end)\n quick_sort(array, start, p-1)\n quick_sort(array, p+1, end)\n\n\narray = [29, 99, 27, 41, 66, 28, 44, 78, 87,\n 19, 31, 76, 58, 88, 83, 97, 12, 21, 44]\n\nquick_sort(array)\nprint(array)\n","repo_name":"kiruh/python-interview","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73221510259","text":"# Given two integer arrays nums1 and nums2, return an array of their intersection. Each element in the result must appear as many times as it shows in both arrays and you may return the result in any order.\n# Example 1:\n\n# Input: nums1 = [1,2,2,1], nums2 = [2,2]\n# Output: [2,2]\n# Example 2:\n\n# Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]\n# Output: [4,9]\n# Explanation: [9,4] is also accepted.\n\n\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n if len(nums1) > len(nums2): return self.intersect(nums2, nums1)\n \n count = Counter(nums1)\n vect = []\n for x in nums2:\n if count[x] > 0:\n vect.append(x)\n count[x] -= 1\n return vect\n \n","repo_name":"AnaMTF/LeetCode","sub_path":"350. Intersection of Two Arrays II.py","file_name":"350. Intersection of Two Arrays II.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"23811564004","text":"from discord import *\n\ndef admin_get(self):\n return self.server_permissions.administrator\n\nMember.admin = property(admin_get)\n\ndef dict_get(self):\n data = {}\n data['message_id'] = getattr(self,'id',None)\n channel = getattr(self,'channel',None)\n if channel != None:\n data['channel_id'] = getattr(channel,'id',None)\n else:\n data['channel_id'] = None\n return data\n\nMessage.id_dict = property(dict_get)\n\ndef in_server(self,serverid):\n found = False\n for server in self.servers:\n if server.id == serverid:\n found = True\n break\n return found\n\nClient.in_server = property(in_server)\n","repo_name":"codacy-badger/royale-bot","sub_path":"discord_wrapper.py","file_name":"discord_wrapper.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11454196338","text":"from ast import literal_eval\nfrom copy import deepcopy\nfrom itertools import permutations\n\n\ndef load_data(path):\n with open(path, 'r') as f:\n return [literal_eval(l) for l in f.read().splitlines()]\n\n\ndef iter_nodes_with_literal(number):\n if isinstance(number[0], int):\n yield number\n if isinstance(number[1], int):\n return\n else:\n yield from iter_nodes_with_literal(number[0])\n if isinstance(number[1], int):\n yield number\n else:\n yield from iter_nodes_with_literal(number[1])\n\n\ndef snail_sum(numbers):\n total = numbers[0]\n for number in numbers[1:]:\n total = add(total, number)\n return total\n\n\ndef add(left, right):\n return reduce([left, right])\n\n\ndef reduce(number):\n while True:\n if try_explode(number, root=number, depth=0):\n continue\n if try_split(number):\n continue\n break\n return number\n\n\ndef try_explode(number, root, depth=0):\n for i, n in enumerate(number):\n if isinstance(n, int):\n continue\n\n if depth >= 3:\n # explode\n explode(root, n)\n number[i] = 0\n return True\n\n # recurse\n elif try_explode(n, root=root, depth=depth+1):\n return True\n\n return False\n\n\ndef try_split(number):\n for i, n in enumerate(number):\n if isinstance(n, int):\n # split\n if n >= 10:\n number[i] = [n//2, n//2 + n % 2]\n return True\n\n # recurse\n elif try_split(n):\n return True\n\n return False\n\n\ndef explode(root, node):\n prev = None\n for n in iter_nodes_with_literal(root):\n if n is node:\n if prev is None:\n pass\n elif isinstance(prev[1], int):\n prev[1] += node[0]\n else:\n prev[0] += node[0]\n elif prev is node:\n if isinstance(n[0], int):\n n[0] += node[1]\n else:\n n[1] += node[1]\n prev = n\n\n\ndef magnitude(number):\n if isinstance(number, int):\n return number\n return 3 * magnitude(number[0]) + 2 * magnitude(number[1])\n\n\ndef part1(data):\n total = snail_sum(deepcopy(data))\n return magnitude(total)\n\n\ndef part2(data):\n magnitudes = []\n for a, b in permutations(data, 2):\n magnitudes.append(\n magnitude(add(deepcopy(a), deepcopy(b)))\n )\n return max(magnitudes)\n\n\ndef main():\n data = load_data('input.txt')\n print(part1(data))\n print(part2(data))\n\n\nif __name__ == '__main__':\n main()\n\n\nclass Test:\n import pytest\n\n def test_iter_nodes(self):\n assert list(iter_nodes_with_literal([1, 2])) == [[1, 2]]\n assert list(iter_nodes_with_literal([[1, 2], 3])) == [\n [1, 2], [[1, 2], 3]]\n assert list(iter_nodes_with_literal([1, [2, 3]])) == [\n [1, [2, 3]], [2, 3]]\n assert list(iter_nodes_with_literal([[1, 2], [3, 4]])) == [\n [1, 2], [3, 4]]\n\n def test_split_even(self):\n assert reduce([10, 0]) == [[5, 5], 0]\n\n def test_split_odd(self):\n assert reduce([11, 0]) == [[5, 6], 0]\n\n def test_split_multiple(self):\n assert reduce([11, 10]) == [[5, 6], [5, 5]]\n\n def test_split_example(self):\n number = [[[[0, 7], 4], [15, [0, 13]]], [1, 1]]\n assert try_split(number)\n assert number == [[[[0, 7], 4], [[7, 8], [0, 13]]], [1, 1]]\n assert try_split(number)\n assert number == [[[[0, 7], 4], [[7, 8], [0, [6, 7]]]], [1, 1]]\n\n def test_explode(self):\n assert reduce([[[[[9, 8], 1], 2], 3], 4]) == [[[[0, 9], 2], 3], 4]\n assert reduce([7, [6, [5, [4, [3, 2]]]]]) == [7, [6, [5, [7, 0]]]]\n assert reduce([[6, [5, [4, [3, 2]]]], 1]) == [[6, [5, [7, 0]]], 3]\n #assert reduce([[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]) == [[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]\n assert reduce([[3, [2, [8, 0]]], [9, [5, [4, [3, 2]]]]]) == [\n [3, [2, [8, 0]]], [9, [5, [7, 0]]]]\n\n def test_explode_example(self):\n number = [[[[[4, 3], 4], 4], [7, [[8, 4], 9]]], [1, 1]]\n assert try_explode(number, root=number)\n assert number == [[[[0, 7], 4], [7, [[8, 4], 9]]], [1, 1]]\n assert try_explode(number, root=number)\n assert number == [[[[0, 7], 4], [15, [0, 13]]], [1, 1]]\n\n def test_add(self):\n assert add([[[[4, 3], 4], 4], [7, [[8, 4], 9]]], [1, 1]) == [\n [[[0, 7], 4], [[7, 8], [6, 0]]], [8, 1]]\n\n def test_add_basic(self):\n assert add([1, 2], [[3, 4], 5]) == [[1, 2], [[3, 4], 5]]\n\n def test_add_multi_step(self):\n number = [[[0, [4, 5]], [0, 0]], [[[4, 5], [2, 6]], [9, 5]]]\n number = add(number, [7, [[[3, 7], [4, 3]], [[6, 3], [8, 8]]]])\n assert number == [[[[4, 0], [5, 4]], [[7, 7], [6, 0]]], [\n [8, [7, 7]], [[7, 9], [5, 0]]]]\n\n def test_magnitude(self):\n assert magnitude([[1, 2], [[3, 4], 5]]) == 143\n assert magnitude([[[[0, 7], 4], [[7, 8], [6, 0]]], [8, 1]]) == 1384\n assert magnitude([[[[1, 1], [2, 2]], [3, 3]], [4, 4]]) == 445\n assert magnitude([[[[3, 0], [5, 3]], [4, 4]], [5, 5]]) == 791\n assert magnitude([[[[5, 0], [7, 4]], [5, 5]], [6, 6]]) == 1137\n assert magnitude([[[[8, 7], [7, 7]], [[8, 6], [7, 7]]], [\n [[0, 7], [6, 6]], [8, 7]]]) == 3488\n\n def test_snail_sum1(self):\n numbers = [[1, 1], [2, 2], [3, 3], [4, 4]]\n assert snail_sum(numbers) == [[[[1, 1], [2, 2]], [3, 3]], [4, 4]]\n\n def test_snail_sum2(self):\n numbers = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]\n assert snail_sum(numbers) == [[[[3, 0], [5, 3]], [4, 4]], [5, 5]]\n\n def test_snail_sum3(self):\n numbers = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]]\n assert snail_sum(numbers) == [[[[5, 0], [7, 4]], [5, 5]], [6, 6]]\n\n def test_snail_sum_test_txt(self):\n assert snail_sum(load_data('test.txt')) == [[[[6, 6], [7, 6]], [[7, 7], [7, 0]]], [\n [[7, 7], [7, 7]], [[7, 8], [9, 9]]]]\n\n def test_snail_sum_test1_txt(self):\n assert snail_sum(load_data('test1.txt')) == [\n [[[8, 7], [7, 7]], [[8, 6], [7, 7]]], [[[0, 7], [6, 6]], [8, 7]]]\n\n def test_part1(self):\n assert part1(load_data('test.txt')) == 4140\n\n def test_part2(self):\n assert part2(load_data('test.txt')) == 3993\n","repo_name":"oatzy/advent_of_code_2021","sub_path":"day18/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13076973677","text":"import random\nimport types\n\n\ndef lottery():\n for i in range(6):\n yield random.randint(1, 40)\n\n yield random.randint(1, 15)\n\n\nprint(lottery())\n\nfor random_number in lottery():\n print(\"And the next number is... %d!\" % random_number)\n\n\ndef fib():\n a = 0\n b = 1\n while True:\n yield a\n a, b = b, a + b\n\n\nif type(fib()) == types.GeneratorType:\n print(\"Good, The fib function is a generator.\")\n\n counter = 0\n for n in fib():\n print(n)\n counter += 1\n if counter == 10:\n break\n","repo_name":"kento-sama/learn-python","sub_path":"generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39894627529","text":"from __future__ import print_function\nimport numpy as np\nfrom zsdg.utils import Pack\nfrom zsdg.dataset.corpora import SimDialCorpus, SYS, USR\nfrom zsdg.dataset.dataloader_bases import DataLoader, LongDataLoader\nimport logging\n\n\nclass ZslSMDDialDataLoader(DataLoader):\n def __init__(self, name, data, config, warmup_data=None):\n super(ZslSMDDialDataLoader, self).__init__(name)\n self.max_utt_size = config.max_utt_len\n\n self.data = self.flatten_dialog(data, config.backward_size)\n self.data_size = len(self.data)\n data_lens = [len(line.context) for line in self.data]\n if False:\n self.indexes = list(np.argsort(data_lens))[::-1]\n else:\n self.indexes = range(len(data_lens))\n\n # prepare indexes for warm up\n self.warmup_data = warmup_data\n if self.warmup_data is not None:\n self.warmup_size = len(self.warmup_data)\n self.warmup_indexes = range(self.warmup_size)\n self.warmup_flags = None\n self.warmup_num_batch = None\n\n def flatten_dialog(self, data, backward_size):\n results = []\n for dialog in data:\n for i in range(1, len(dialog)):\n e_id = i\n s_id = max(0, e_id - backward_size)\n response = dialog[i].copy()\n if response.speaker == USR:\n continue\n response['utt'] = self.pad_to(self.max_utt_size, response.utt, do_pad=False)\n # response['kb'] = [self.pad_to(self.max_utt_size, item, do_pad=True) for item in response.kb]\n\n contexts = []\n for turn in dialog[s_id:e_id]:\n turn['utt'] = self.pad_to(self.max_utt_size, turn.utt, do_pad=False)\n contexts.append(turn)\n results.append(Pack(context=contexts, response=response))\n return results\n\n def epoch_init(self, config, shuffle=True, verbose=True):\n super(ZslSMDDialDataLoader, self).epoch_init(config, shuffle, verbose)\n self.warmup_flags = [False] * self.num_batch\n\n if self.warmup_data is None:\n return\n\n self.warmup_num_batch = int(self.warmup_size / config.batch_size)\n for i in range(self.warmup_num_batch):\n self.batch_indexes.append(np.random.choice(self.warmup_indexes, config.batch_size, replace=False))\n self.warmup_flags.append(True)\n\n if shuffle:\n temp_batch_id = range(len(self.warmup_flags))\n np.random.shuffle(temp_batch_id)\n self.batch_indexes = [self.batch_indexes[i] for i in temp_batch_id]\n self.warmup_flags = [self.warmup_flags[i] for i in temp_batch_id]\n\n if verbose:\n self.logger.info(\"%s add with %d warm up batches\" % (self.name, self.warmup_num_batch))\n\n def next_batch(self):\n if self.ptr < self.num_batch:\n is_warmup = self.warmup_flags[self.ptr]\n selected_ids = self.batch_indexes[self.ptr]\n self.ptr += 1\n\n if is_warmup:\n return self._prepare_warmup_batch(selected_ids)\n else:\n return self._prepare_batch(selected_ids)\n else:\n return None\n\n def _prepare_batch(self, selected_index):\n # the batch index, the starting point and end point for segment\n rows = [self.data[idx] for idx in selected_index]\n\n cxt_lens, ctx_utts = [], []\n out_utts, out_lens = [], []\n domains, domain_metas = [], []\n\n for row in rows:\n in_row, out_row = row.context, row.response\n\n # source context\n batch_ctx = []\n #for item in out_row.kb:\n # batch_ctx.append(item)\n for turn in in_row:\n batch_ctx.append(self.pad_to(self.max_utt_size, turn.utt))\n\n cxt_lens.append(len(batch_ctx))\n ctx_utts.append(batch_ctx)\n\n # target response\n out_utt = [t for idx, t in enumerate(out_row.utt)]\n out_utts.append(out_utt)\n out_lens.append(len(out_utt))\n domains.append(out_row.domain)\n domain_metas.append(out_row.domain_id)\n\n domain_metas = np.array(domain_metas)\n vec_ctx_lens = np.array(cxt_lens)\n max_ctx_len = np.max(vec_ctx_lens)\n vec_ctx_utts = np.zeros((self.batch_size, max_ctx_len, self.max_utt_size), dtype=np.int32)\n vec_ctx_confs = np.ones((self.batch_size, max_ctx_len), dtype=np.float32)\n\n vec_out_utts = np.zeros((self.batch_size, np.max(out_lens)), dtype=np.int32)\n vec_out_lens = np.array(out_lens)\n\n for b_id in range(self.batch_size):\n vec_out_utts[b_id, 0:vec_out_lens[b_id]] = out_utts[b_id]\n vec_ctx_utts[b_id, 0:vec_ctx_lens[b_id], :] = ctx_utts[b_id]\n\n return Pack(context_lens=vec_ctx_lens, contexts=vec_ctx_utts, context_confs=vec_ctx_confs,\n output_lens=vec_out_lens, outputs=vec_out_utts,\n domains=domains, domain_metas=domain_metas)\n\n def _prepare_warmup_batch(self, selected_ids):\n # the batch index, the starting point and end point for segment\n rows = [self.warmup_data[idx] for idx in selected_ids]\n out_utts, out_lens = [], []\n out_acts, out_act_lens = [], []\n domains, domain_metas = [], []\n\n for row in rows:\n out_utt = [t for idx, t in enumerate(row.utt)]\n\n # target response\n out_acts.append(row.actions)\n out_act_lens.append(len(row.actions))\n\n out_utts.append(out_utt)\n out_lens.append(len(out_utt))\n\n domains.append(row.domain)\n domain_metas.append(row.domain_id)\n\n vec_out_lens = np.array(out_lens)\n domain_metas = np.array(domain_metas)\n vec_out_utts = np.zeros((self.batch_size, np.max(out_lens)), dtype=np.int32)\n vec_out_acts = np.zeros((self.batch_size, np.max(out_act_lens)), dtype=np.int32)\n\n for b_id in range(self.batch_size):\n vec_out_utts[b_id, 0:vec_out_lens[b_id]] = out_utts[b_id]\n vec_out_acts[b_id, 0:out_act_lens[b_id]] = out_acts[b_id]\n\n return Pack(output_lens=vec_out_lens, outputs=vec_out_utts, output_actions=vec_out_acts,\n domains=domains, domain_metas=domain_metas)\n\n\nclass SimDialDataLoader(LongDataLoader):\n def __init__(self, name, data, domain_meta, config, warmup_data=None):\n super(SimDialDataLoader, self).__init__(name)\n self.max_utt_size = config.max_utt_len\n self.data = data\n self.domain_meta = self.prepare_domain_meta(domain_meta)\n self.data_size = len(data)\n self.data_lens = [len(line) for line in self.data]\n self.indexes = list(np.argsort(self.data_lens))[::-1]\n\n # prepare indexes for warm up\n self.warmup_data = warmup_data\n if self.warmup_data is not None:\n self.warmup_size = len(self.warmup_data)\n self.warmup_indexes = range(self.warmup_size)\n self.warmup_flags = None\n self.warmup_num_batch = None\n\n # Pretty printing\n covered_data = np.where(np.array(self.data_lens) < config.backward_size)[0]\n coverage = len(covered_data) / float(self.data_size)\n msg = \"Initialized {} Max len {} Min len {} Avg len {} Max ctx {} covers {}\" \\\n .format(self.name, np.max(self.data_lens), np.min(self.data_lens),\n np.average(self.data_lens), config.backward_size, coverage)\n self.logger.info(msg)\n\n def prepare_domain_meta(self, domain_meta):\n # pre-compute domain meta since it's independent of dialogs\n # domain description just slot names\n # domain sys/usr templates example sys or user uttearnecs\n vec_domain_meta = {}\n\n for domain, meta in domain_meta.items():\n if type(meta) is not Pack:\n continue\n sys_templates = []\n sys_acts = []\n usr_templates = []\n usr_acts = []\n for template, act in zip(meta.templates, meta.acts):\n padded_template = self.pad_to(self.max_utt_size, template)\n if domain_meta.sys_id in padded_template:\n # warmup_data.append(Pack(domain=domain, utt=template, act=act))\n sys_templates.append(padded_template)\n sys_acts.append(act)\n else:\n usr_templates.append(padded_template)\n usr_acts.append(act)\n\n padded_desc = self.pad_to(self.max_utt_size, meta.description)\n vec_domain_meta[domain] = Pack(sys_templates=sys_templates,\n sys_acts=sys_acts,\n usr_templates=usr_templates,\n usr_acts=usr_acts,\n description=padded_desc)\n\n return vec_domain_meta\n\n def epoch_init(self, config, shuffle=True, verbose=True):\n super(SimDialDataLoader, self).epoch_init(config, shuffle, verbose)\n self.warmup_flags = [False] * self.num_batch\n\n if self.warmup_data is None:\n return\n\n self.warmup_num_batch = int(self.warmup_size/config.batch_size)\n for i in range(self.warmup_num_batch):\n self.grid_indexes.append(np.random.choice(self.warmup_indexes, config.batch_size, replace=False))\n self.warmup_flags.append(True)\n\n if shuffle:\n temp_batch_id = range(len(self.warmup_flags))\n np.random.shuffle(temp_batch_id)\n self.grid_indexes = [self.grid_indexes[i] for i in temp_batch_id]\n self.warmup_flags = [self.warmup_flags[i] for i in temp_batch_id]\n\n if verbose:\n self.logger.info(\"%s add with %d warm up batches\" % (self.name, self.warmup_num_batch))\n\n def next_batch(self):\n if self.ptr < self.num_batch:\n is_warmup = self.warmup_flags[self.ptr]\n current_grid = self.grid_indexes[self.ptr]\n\n if is_warmup:\n self.ptr += 1\n return self._prepare_warmup_batch(current_grid)\n else:\n if self.ptr > 0:\n prev_grid = self.grid_indexes[self.ptr - 1]\n else:\n prev_grid = None\n self.ptr += 1\n return self._prepare_batch(cur_grid=current_grid,\n prev_grid=prev_grid)\n else:\n return None\n\n def _prepare_batch(self, cur_grid, prev_grid):\n # the batch index, the starting point and end point for segment\n b_id, s_id, e_id = cur_grid\n\n batch_ids = self.batch_indexes[b_id]\n rows = [self.data[idx] for idx in batch_ids]\n cxt_lens, ctx_utts, ctx_confs = [], [], []\n out_utts, out_lens = [], []\n out_acts, out_act_lens = [], []\n # sys_templates, sys_acts, sys_lens = [], [], []\n # usr_templates, usr_acts, usr_lens = [], [], []\n domains, domain_metas= [], []\n\n for row in rows:\n if s_id < len(row) - 1:\n if s_id > 0:\n cut_row = row[0:1] + row[s_id+1:e_id]\n else:\n cut_row = row[s_id:e_id]\n\n in_row, out_row = cut_row[0:-1], cut_row[-1]\n out_utt = out_row.utt\n\n # source context\n cxt_lens.append(len(in_row))\n batch_ctx, batch_confs = [], []\n for turn in in_row:\n batch_ctx.append(self.pad_to(self.max_utt_size, turn.utt))\n batch_confs.append(turn.conf)\n\n ctx_utts.append(batch_ctx)\n ctx_confs.append(batch_confs)\n\n # target response\n out_utts.append(out_utt)\n out_lens.append(len(out_utt))\n\n out_acts.append(out_row.actions)\n out_act_lens.append(len(out_row.actions))\n\n domains.append(out_row.domain)\n domain_metas.append(self.domain_meta[out_row.domain].description)\n\n #sys_templates.append(self.domain_meta[out_row.domain].sys_templates)\n #sys_acts.append(self.domain_meta[out_row.domain].sys_acts)\n #sys_lens.append(len(sys_templates[-1]))\n\n #usr_templates.append(self.domain_meta[out_row.domain].usr_templates)\n #usr_acts.append(self.domain_meta[out_row.domain].usr_acts)\n #usr_lens.append(len(usr_templates[-1]))\n\n else:\n raise ValueError(\"s_id %d larger than row\" % s_id)\n\n vec_ctx_lens = np.array(cxt_lens)\n max_ctx_len = np.max(vec_ctx_lens)\n vec_ctx_utts = np.zeros((self.batch_size, max_ctx_len, self.max_utt_size), dtype=np.int32)\n vec_ctx_confs = np.zeros((self.batch_size, max_ctx_len), dtype=np.float32)\n\n vec_out_utts = np.zeros((self.batch_size, np.max(out_lens)), dtype=np.int32)\n vec_out_acts = np.zeros((self.batch_size, np.max(out_act_lens)), dtype=np.int32)\n vec_out_lens = np.array(out_lens)\n\n #vec_sys_templates = np.zeros((self.batch_size, np.max(sys_lens), self.max_utt_size), dtype=np.int32)\n #vec_sys_acts = np.zeros((self.batch_size, np.max(sys_lens), len(sys_acts[0][0])), dtype=np.int32)\n\n #vec_usr_templates = np.zeros((self.batch_size, np.max(usr_lens), self.max_utt_size), dtype=np.int32)\n #vec_usr_acts = np.zeros((self.batch_size, np.max(usr_lens), len(usr_acts[0][0])), dtype=np.int32)\n\n vec_domain_metas = np.zeros((self.batch_size, self.max_utt_size), dtype=np.int32)\n\n for b_id in range(self.batch_size):\n vec_out_utts[b_id, 0:vec_out_lens[b_id]] = out_utts[b_id]\n vec_out_acts[b_id, 0:out_act_lens[b_id]] = out_acts[b_id]\n\n vec_ctx_confs[b_id, 0:vec_ctx_lens[b_id]] = ctx_confs[b_id]\n vec_ctx_utts[b_id, 0:vec_ctx_lens[b_id], :] = ctx_utts[b_id]\n\n #vec_sys_templates[b_id, 0:sys_lens[b_id], :] = sys_templates[b_id]\n #vec_sys_acts[b_id, 0:sys_lens[b_id], :] = sys_acts[b_id]\n\n vec_domain_metas[b_id, :] = domain_metas[b_id]\n\n #vec_usr_templates[b_id, 0:usr_lens[b_id], :] = usr_templates[b_id]\n #vec_usr_acts[b_id, 0:usr_lens[b_id]] = usr_acts[b_id]\n\n return Pack(context_lens=vec_ctx_lens, contexts=vec_ctx_utts, context_confs=vec_ctx_confs,\n output_lens=vec_out_lens, outputs=vec_out_utts, output_actions=vec_out_acts,\n domains=domains, domain_metas=vec_domain_metas)\n\n def _prepare_warmup_batch(self, selected_ids):\n # the batch index, the starting point and end point for segment\n rows = [self.warmup_data[idx] for idx in selected_ids]\n out_utts, out_lens = [], []\n out_acts, out_act_lens = [], []\n domains, domain_metas = [], []\n\n for row in rows:\n out_utt = row.utt\n # target response\n out_acts.append(row.actions)\n out_act_lens.append(len(row.actions))\n\n out_utts.append(out_utt)\n out_lens.append(len(out_utt))\n\n domains.append(row.domain)\n domain_metas.append(self.domain_meta[row.domain].description)\n\n vec_out_lens = np.array(out_lens)\n vec_out_utts = np.zeros((self.batch_size, np.max(out_lens)), dtype=np.int32)\n vec_out_acts = np.zeros((self.batch_size, np.max(out_act_lens)), dtype=np.int32)\n vec_domain_metas = np.zeros((self.batch_size, self.max_utt_size), dtype=np.int32)\n\n for b_id in range(self.batch_size):\n vec_out_utts[b_id, 0:vec_out_lens[b_id]] = out_utts[b_id]\n vec_out_acts[b_id, 0:out_act_lens[b_id]] = out_acts[b_id]\n vec_domain_metas[b_id, :] = domain_metas[b_id]\n\n return Pack(output_lens=vec_out_lens, outputs=vec_out_utts, output_actions=vec_out_acts,\n domains=domains, domain_metas=vec_domain_metas)\n\n","repo_name":"snakeztc/NeuralDialog-ZSDG","sub_path":"zsdg/dataset/data_loaders.py","file_name":"data_loaders.py","file_ext":"py","file_size_in_byte":16053,"program_lang":"python","lang":"en","doc_type":"code","stars":132,"dataset":"github-code","pt":"57"} +{"seq_id":"29482024819","text":"def representacion_matricial(matriz_coeficientes, matriz_terminos_independientes):\n for i in range(3):\n print(f\"Ingresa los datos de la ecuacion {i+1}: \")\n\n matriz_coeficientes[i][0] = int(input(\"Ingresa el coeficiente de x: \"))\n matriz_coeficientes[i][1] = int(input(\"Ingresa el coeficiente de y: \"))\n matriz_coeficientes[i][2] = int(input(\"Ingresa el coeficiente de z: \"))\n\n matriz_terminos_independientes[i][0] = int(input(\"Ingresa el termino independiente: \"))\n print(\"\\n\")\n \ndef calcular_determinante(M):\n pt1 = M[\"a\"] * ((M[\"e\"] * M[\"i\"]) - (M[\"h\"] * M[\"f\"]))\n pt2 = M[\"b\"] * ((M[\"i\"] * M[\"d\"]) - (M[\"g\"] * M[\"f\"]))\n pt3 = M[\"c\"] * ((M[\"d\"] * M[\"h\"]) - (M[\"g\"] * M[\"e\"]))\n return pt1 - pt2 + pt3\n\ndef obtener_matriz_cofactores(M):\n C11 = (M[\"e\"] * M[\"i\"]) - (M[\"h\"] * M[\"f\"])\n C12 = -1*((M[\"d\"] * M[\"i\"]) - (M[\"g\"] * M[\"f\"]))\n C13 = (M[\"d\"] * M[\"h\"]) - (M[\"g\"] * M[\"e\"])\n\n C21 = -1*((M[\"b\"] * M[\"i\"]) - (M[\"h\"] * M[\"c\"]))\n C22 = (M[\"a\"] * M[\"i\"]) - (M[\"g\"] * M[\"c\"]) \n C23 = -1*((M[\"a\"] * M[\"h\"]) - (M[\"g\"] * M[\"b\"]))\n\n C31 = (M[\"b\"] * M[\"f\"]) - (M[\"e\"] * M[\"c\"])\n C32 = -1*((M[\"a\"] * M[\"f\"]) - (M[\"d\" ] * M[\"c\"]))\n C33 = (M[\"a\"] * M[\"e\"]) - (M[\"d\"] * M[\"b\"])\n\n return [[C11, C12, C13],[C21, C22, C23],[C31,C32, C33]]\n\ndef obtener_adjunta(matriz_cofactores):\n matriz_adjunta = [[0,0,0],[0,0,0],[0,0,0]]\n for i in range(3):\n for j in range(3):\n matriz_adjunta[i][j] = matriz_cofactores[j][i]\n\n return matriz_adjunta\n\ndef obtener_inversa(matriz_adjunta, det):\n if det > 0:\n for i in range(3):\n for j in range(3):\n matriz_adjunta[i][j] *= (1/det)\n \n return matriz_adjunta\n else:\n print(\"La matriz no tiene inversa.\")\n print(\"Por lo tanto no se puede resolver con este metodo.\\n\")\n\ndef multiplicar_matrices(matriz1, matriz2):\n resultado = [[0], [0], [0]]\n for i in range(len(matriz1)):\n for j in range(len(matriz2[0])):\n for k in range(len(matriz2)):\n resultado[i][j] += matriz1[i][k] * matriz2[k][j]\n \n return resultado\n\n\n\ndef main():\n #Matriz de coeficientes y termiinos independientes vacias\n matriz_coeficientes = [[0,0,0],[0,0,0],[0,0,0]]\n matriz_terminos_independientes = [[0],[0],[0]] \n\n #Paso 1 (Representacion matricial del sistema de ecuaciones)\n representacion_matricial(matriz_coeficientes, matriz_terminos_independientes)\n elementos_matriz = {\n \"a\": matriz_coeficientes[0][0],\n \"b\": matriz_coeficientes[0][1],\n \"c\": matriz_coeficientes[0][2],\n \"d\": matriz_coeficientes[1][0],\n \"e\": matriz_coeficientes[1][1],\n \"f\": matriz_coeficientes[1][2],\n \"g\": matriz_coeficientes[2][0],\n \"h\": matriz_coeficientes[2][1],\n \"i\": matriz_coeficientes[2][2]\n }\n\n #Paso 2 (Calcular determinante)\n det = calcular_determinante(elementos_matriz)\n\n #Paso 3(Obetner matriz)\n matriz_cofactores = obtener_matriz_cofactores(elementos_matriz)\n\n #Paso 4(Obtener matriz adjunta)\n matriz_adjunta = obtener_adjunta(matriz_cofactores)\n\n #Paso 5(Obtener matriz inversa)\n matriz_inversa = obtener_inversa(matriz_adjunta, det)\n\n #Paso 6(Muntiplicar A^-1 * X)\n resultado = multiplicar_matrices(matriz_inversa, matriz_terminos_independientes)\n\n print(\"Resultados:\")\n print(f\"x = {resultado[0][0]}\")\n print(f\"y = {resultado[1][0]}\")\n print(f\"z = {resultado[2][0]}\")\n\n print(\"\\n\")","repo_name":"francisco-oro/eda-2","sub_path":"proyecto-3/ACT36.py","file_name":"ACT36.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"21043408811","text":"# coding: utf8\n\nimport logging\nfrom typing import Tuple, Dict, Callable\nfrom uuid import uuid4\nfrom concurrent.futures import ThreadPoolExecutor\nfrom random import randint\n# noinspection PyPackageRequirements\nimport pytest\nfrom pandas import DataFrame\n# noinspection PyProtectedMember\nfrom dfqueue.core.dfqueue import QueuesHandler\nfrom dfqueue import adding, managing, synchronized, assign_dataframe\nfrom . import add_row, change_row_value, create_queue_item\n\nlogging.getLogger().setLevel(\"DEBUG\")\n\n\n@pytest.mark.parametrize(\"queue_name\", [\n None,\n 'TEST_1',\n 'TEST_2'\n])\ndef test_parallel_1(queue_name):\n selected_columns = [\"A\", \"C\"]\n queue_name = queue_name if queue_name is not None else QueuesHandler().default_queue_name\n\n @synchronized(queue_name=queue_name)\n @managing(queue_name=queue_name)\n @adding(queue_items_creation_function=create_queue_item,\n other_args={\"selected_columns\": selected_columns},\n queue_name=queue_name)\n def parallel_add_row(dataframe: DataFrame, index: str, columns_dict: dict) -> Tuple[str, Dict]:\n return add_row(dataframe, index, columns_dict)\n\n @synchronized(queue_name=queue_name)\n @adding(queue_items_creation_function=create_queue_item,\n other_args={\"selected_columns\": selected_columns},\n queue_name=queue_name)\n def parallel_change_row_value(dataframe: DataFrame,\n index: str,\n new_columns_dict: dict) -> Tuple[str, Dict]:\n return change_row_value(dataframe, index, new_columns_dict)\n\n def thread_adding(operation_number: int, dataframe: DataFrame):\n for _ in range(operation_number):\n parallel_add_row(dataframe, str(uuid4()), {'A': str(uuid4()), 'B': str(uuid4()),\n 'C': str(uuid4()), 'D': str(uuid4())})\n\n def thread_change(operation_number: int, dataframe: DataFrame):\n for _ in range(operation_number):\n parallel_change_row_value(dataframe,\n dataframe.index.values[randint(0, len(dataframe)-1)],\n {'A': str(uuid4()), 'B': str(uuid4()), 'C': str(uuid4()),\n 'D': str(uuid4())})\n\n dataframe = DataFrame(columns=['A', 'B', 'C', 'D'])\n assign_dataframe(dataframe, 1000, selected_columns, queue_name)\n\n assert dataframe.empty\n\n with ThreadPoolExecutor(max_workers=2) as executor:\n future_a = executor.submit(thread_adding, 4000, dataframe)\n future_b = executor.submit(thread_adding, 4000, dataframe)\n future_c = executor.submit(thread_change, 1000, dataframe)\n future_a.result()\n future_b.result()\n future_c.result()\n\n assert len(dataframe) == 1000\n\n\n# Two queues share the same dataframe\ndef test_parallel_2():\n selected_columns_a = [\"A\", \"B\"]\n selected_columns_b = [\"C\", \"D\"]\n\n @synchronized(queue_name='TEST_3')\n @managing(queue_name='TEST_3')\n @adding(queue_items_creation_function=create_queue_item,\n other_args={\"selected_columns\": selected_columns_a},\n queue_name='TEST_3')\n def parallel_add_row_a(dataframe: DataFrame,\n index: str,\n columns_dict: dict) -> Tuple[str, Dict]:\n return add_row(dataframe, index, columns_dict)\n\n @synchronized(queue_name='TEST_4')\n @managing(queue_name='TEST_4')\n @adding(queue_items_creation_function=create_queue_item,\n other_args={\"selected_columns\": selected_columns_b},\n queue_name='TEST_4')\n def parallel_add_row_b(dataframe: DataFrame,\n index: str,\n columns_dict: dict) -> Tuple[str, Dict]:\n return add_row(dataframe, index, columns_dict)\n\n def thread_adding(operation_number: int, dataframe: DataFrame, adding_function: Callable):\n for _ in range(operation_number):\n adding_function(dataframe, str(uuid4()), {'A': str(uuid4()), 'B': str(uuid4()),\n 'C': str(uuid4()), 'D': str(uuid4())})\n\n dataframe = DataFrame(columns=['A', 'B', 'C', 'D'])\n assign_dataframe(dataframe, 1000, selected_columns_a, 'TEST_3')\n assign_dataframe(dataframe, 500, selected_columns_b, 'TEST_4')\n\n assert dataframe.empty\n\n # noinspection PyProtectedMember\n queue_handler_instance = QueuesHandler._QueuesHandler__instance\n assert id(QueuesHandler._QueuesHandler__instance.get_assigned_lock('TEST_3')) != \\\n id(queue_handler_instance.get_assigned_lock(QueuesHandler().default_queue_name))\n assert id(QueuesHandler._QueuesHandler__instance.get_assigned_lock('TEST_4')) != \\\n id(queue_handler_instance.get_assigned_lock(QueuesHandler().default_queue_name))\n assert id(QueuesHandler._QueuesHandler__instance.get_assigned_lock('TEST_3')) == \\\n id(queue_handler_instance.get_assigned_lock('TEST_4'))\n\n with ThreadPoolExecutor(max_workers=2) as executor:\n future_a = executor.submit(thread_adding, 4000, dataframe, parallel_add_row_a)\n future_b = executor.submit(thread_adding, 4000, dataframe, parallel_add_row_b)\n future_a.result()\n future_b.result()\n\n # We can't predict if dataframe's size will be 500 or 1000\n assert len(dataframe) in [500, 1000]\n","repo_name":"JCH222/dfqueue","sub_path":"dfqueue/tests/scenarios/test_parallel.py","file_name":"test_parallel.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15046110230","text":"from django.shortcuts import resolve_url\nfrom django.test import TestCase\n\nfrom .models import Address\n\n\nclass AddressTestCase(TestCase):\n def setUp(self):\n Address.objects.create(\n postal_code='12345678',\n address='Rua Rua',\n number='1',\n neighbourhood='Centro',\n state='MG',\n complement='',\n description='',\n city='Varginha'\n )\n\n def test_address_creation(self):\n address = Address.objects.get(address='Rua Rua')\n self.assertEqual(address.postal_code, '12345678')\n self.assertEqual(address.address, 'Rua Rua')\n self.assertEqual(address.number, '1')\n self.assertEqual(address.neighbourhood, 'Centro')\n self.assertEqual(address.state, 'MG')\n self.assertEqual(address.complement, '')\n self.assertEqual(address.description, '')\n self.assertEqual(address.city, 'Varginha')\n \n def test_address_creation_template(self):\n response = self.client.get(resolve_url('address_create'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'address_create.html')\n\n def test_address_list_template(self):\n response = self.client.get(resolve_url('address_list'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'address_list.html')\n\n def test_address_form(self):\n response = self.client.post(resolve_url('address_create'), {\n 'postal_code': '87654321',\n 'address': 'Outra Rua',\n 'number': '2',\n 'neighbourhood': 'Centro',\n 'state': 'MG',\n 'complement': 'Test',\n 'description': 'Test',\n 'city': 'Varginha'\n })\n self.assertEqual(response.status_code, 302)\n address2 = Address.objects.get(address='Outra Rua')\n self.assertEqual(address2.postal_code, '87654321')\n self.assertEqual(address2.address, 'Outra Rua')\n","repo_name":"welbjr/esoft_project","sub_path":"addresses/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"69925579380","text":"from __future__ import division\n\nimport numpy as np\nimport torch\n\n# GEOMETRIC UTILS\ndef pose_distance(reference_pose, measurement_pose):\n \"\"\"\n :param reference_pose: 4x4 numpy array, reference frame camera-to-world pose (not extrinsic matrix!)\n :param measurement_pose: 4x4 numpy array, measurement frame camera-to-world pose (not extrinsic matrix!)\n :return combined_measure: float, combined pose distance measure\n :return R_measure: float, rotation distance measure\n :return t_measure: float, translation distance measure\n \"\"\"\n rel_pose = np.dot(np.linalg.inv(reference_pose), measurement_pose)\n R = rel_pose[:3, :3]\n t = rel_pose[:3, 3]\n R_measure = np.sqrt(2 * (1 - min(3.0, np.matrix.trace(R)) / 3))\n t_measure = np.linalg.norm(t)\n combined_measure = np.sqrt(t_measure ** 2 + R_measure ** 2)\n return combined_measure, R_measure, t_measure\n\n\ndef is_pose_available(pose):\n is_nan = np.isnan(pose).any()\n is_inf = np.isinf(pose).any()\n is_neg_inf = np.isneginf(pose).any()\n if is_nan or is_inf or is_neg_inf:\n return False\n else:\n return True\n","repo_name":"hashi0203/fadec","sub_path":"dev/dataset/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38026626971","text":"import network\nimport utime\n#import env var.\nimport config\n\nwlan = network.WLAN(network.STA_IF)\nwlan.active(True)\nwlan.connect(config.WLAN_ID, config.WLAN_PASS)\n\nwhile not wlan.isconnected() and wlan.status() >= 0:\n print(\"Waiting to connect:\")\nutime.sleep(1)\nprint(wlan.ifconfig())\n\n#HTTP REQUEST\nimport urequests\nimport random\nwhile True:\n ambient = random.randrange(0,100,1)\n url = \"http://192.168.1.10:3000/daily-probes\"\n payload = {\n 'probe': ambient\n }\n print(url)\n r = urequests.post(url, json=payload)\n print(r.status_code) #need to add timeout func for when server is down\n utime.sleep(1)\n r.close()","repo_name":"SuperJSBros/roaster-monitor-sensor","sub_path":"request-example.py","file_name":"request-example.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3896942488","text":"from aiogram import Dispatcher, types\nfrom aiogram.dispatcher import FSMContext\n\nfrom bot.services.database.commands.quote import is_in_db, add_quote, all_quotes, random_quote\nfrom bot.states.AddQuoteState import AddQuote\n\n\nasync def command_add_quote_handler(message: types.Message):\n await AddQuote.enterQuote.set()\n await message.answer(text=f'Введите цитату:')\n\n\nasync def get_quote_handler(message: types.Message, state: FSMContext):\n quote = message.text\n if is_in_db(quote):\n await message.answer(f'Такая цитата уже хранится в базе данных! Попробуй другую')\n else:\n add_quote(quote)\n await state.finish()\n await message.answer(\"Цитата успешно добавлена✨\")\n\n\nasync def command_quotes_handler(message: types.Message):\n await message.answer('✨Все мотивационные цитаты✨:')\n for quote in all_quotes():\n await message.answer(f'\"{str(quote)}\"')\n\n\nasync def random_quote_handler(message: types.Message):\n await message.answer('✨Мотивационная цитата✨:')\n await message.answer(f'\"{str(random_quote())}\"')\n\n\ndef register_quotes_control(dp: Dispatcher):\n dp.register_message_handler(command_add_quote_handler, commands=['add_quote'], state='*')\n dp.register_message_handler(get_quote_handler, state=AddQuote.enterQuote)\n dp.register_message_handler(command_quotes_handler, commands=['quotes'], state='*')\n dp.register_message_handler(random_quote_handler, commands=['rand_quote'], state='*')\n","repo_name":"elizabeth-honcharova/planerBot","sub_path":"bot/handlers/motivational_mode_control.py","file_name":"motivational_mode_control.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29002274155","text":"from myui import Ui_MainWindow\nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom PyQt5.QtWidgets import QFileDialog, QMessageBox\nimport process\nimport os\nimport torch\nfrom PyQt5.QtGui import QPixmap\nfrom torch import nn\nfrom unet import UNet\n\n\nclass mywindow(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(mywindow, self).__init__()\n self.setupUi(self)\n self.pushBt_reset_10.clicked.connect(self.open_all) # 打开图像文件路径\n self.pushBt_up_10.clicked.connect(self.pre)\n self.pushBt_dwn_10.clicked.connect(self.next)\n self.pushBt_cntinu_10.clicked.connect(self.continu)\n # n_class = 3\n # model = UNet(n_channels=3, n_classes=n_class)\n # model = nn.DataParallel(model, device_ids=[0])\n # model.load_state_dict(torch.load('trainmodels.pth'))\n\n\n # def open_one(self): # 处理单张图像\n # file_name = QFileDialog.getOpenFileName()\n # path = file_name[0] # [0]是完整路径,[1]是‘All files *’\n # # print(path)\n # self.label_10_response(path)\n\n def open_all(self): # 批量处理图像\n self.label_10.setStyleSheet('font: 28pt \"Agency FB\"')\n self.label_10.setStyleSheet('color: rgb(255, 0, 0)')\n # self.label_10.setText(\"开始读入图像文件,请稍等......\")\n try:\n self.directory = QFileDialog.getExistingDirectory(self, \"选择文件夹\", \"/\")\n self.file_list = [] # 存储所有文件的文件名\n path_list = os.listdir(self.directory)\n for file_name in path_list:\n if os.path.splitext(file_name)[1] == \".jpg\":\n self.file_list.append(file_name)\n self.im_idx = 0\n self.file_num = len(self.file_list)\n # self.label_imNum_10.setStyleSheet('font: 20pt \"Agency FB\"')\n # self.label_imNum_10.setStyleSheet('color: rgb(0, 0, 0)')\n strIm_num = \"图像总数: \" + str(self.file_num)\n self.label_imNum_10.setText(strIm_num)\n strIm_num = \"处理第 \" + str(self.im_idx + 1) + \" 张\"\n self.label_i_10.setText(strIm_num)\n pathImg = self.directory + \"/\" + self.file_list[self.im_idx]\n self.label_10_response(pathImg)\n except Exception:\n pass\n\n def pre(self):\n try:\n if self.im_idx < 0:\n return\n else:\n self.im_idx -= 1\n pathImg = self.directory + \"/\" + self.file_list[self.im_idx]\n # self.label_i_10.setStyleSheet('font: 20pt \"Agency FB\"')\n # self.label_i_10.setStyleSheet('color: rgb(0, 0, 0)')\n strIm_num = \"处理第 \" + str(self.im_idx + 1) + \" 张\"\n self.label_i_10.setText(strIm_num)\n self.label_10_response(pathImg)\n except Exception:\n pass\n\n def next(self):\n try:\n if self.im_idx >= self.file_num - 1:\n return\n else:\n self.im_idx += 1\n pathImg = self.directory + \"/\" + self.file_list[self.im_idx]\n # print(pathImg)\n # self.label_i_10.setStyleSheet('font: 26pt \"Agency FB\"')\n # self.label_i_10.setStyleSheet('color: rgb(0, 0, 0)')\n strIm_num = \"处理第 \" + str(self.im_idx + 1) + \" 张\"\n self.label_i_10.setText(strIm_num)\n self.label_10_response(pathImg)\n except Exception as e:\n print(e)\n\n def continu(self):\n if self.count:\n img = QPixmap(self.fp2)\n self.label_10.setPixmap(img)\n self.count = 0\n else:\n img = QPixmap(self.fp1)\n self.label_10.setPixmap(img)\n self.count = 1\n\n def label_10_response(self, pathImg):\n (fp1, fp2, results) = process.pro(pathImg)\n self.count = 1\n self.fp1 = fp1\n self.fp2 = fp2\n w = self.label_10.width()\n h = self.label_10.height()\n img = QPixmap(fp1)\n img = img.scaled(w, h)\n self.label_10.setScaledContents(True) # 让图片自适应label大小\n self.label_10.setPixmap(img)\n if results == '良性':\n self.label_res_10.setStyleSheet('color: rgb(0, 0, 127)')\n else:\n self.label_res_10.setStyleSheet('color: rgb(254, 0, 0)')\n self.label_res_10.setText('预测结果为:' + results)\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n window = mywindow()\n window.show()\n app.exec_()\n","repo_name":"sunbrown/SoftUi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"70897944180","text":"# Calling the vehicle.py for each vehicle \n\n\nfrom vehicleDataFormat import *\n\naltitude = 8.0\n\nmac = vehicleDataFormat.dataFormat()\nclass callVehicles():\n#vehicleEntry = vehicleDataFormat.dataFormat()\n#print(vehicleEntry['Altitude'])\n#vehicleEntry['Altitude'] = altitude\n#print(vehicleEntry['Altitude'])\n\n#print(vehicleDataFormat.get_altitude(mac))\n#vehicleDataFormat.set_altitude(mac, altitude)\n#print(vehicleDataFormat.get_altitude(mac))\n\n#print(mac)\n\n def macVehicle():\n vehicleDataFormat.set_altitude(mac, altitude)\n #print(mac)\n\n return mac ","repo_name":"tyabbi/DB-Practice","sub_path":"firstmethod/callVehicles.py","file_name":"callVehicles.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17504977361","text":"# Problem 5: Blockchain\n\nimport hashlib\nimport time\n\n\nclass Block:\n \n def __init__(self, timestamp, data, previous_hash):\n self.timestamp = timestamp\n self.data = data\n self.previous_hash = previous_hash\n self.hash = self.calc_hash()\n self.next = None\n\n def calc_hash(self):\n sha = hashlib.sha256()\n hash_str = str(self.data).encode('utf-8')\n sha.update(hash_str)\n return sha.hexdigest()\n\n\nclass Blockchain:\n\n def __init__(self):\n self.head = None\n self.size = 0\n\n def append_block(self, value):\n if value is None:\n return\n\n node = self.head\n self.size += 1\n\n if node is None:\n block = Block(time.gmtime(), value, None)\n self.head = block\n else:\n while node.next:\n node = node.next\n node.next = Block(time.gmtime(), value, node.hash)\n\n\n# Test Cases\n\nprint('----- Test Case 1 -----')\nblockchain = Blockchain()\nblockchain.append_block('Block 0')\nprint(blockchain.head.data) # prints Block 0\nprint(blockchain.head.hash) # prints 9a80074454a08bd893afd50ad666803e636718850e57916174e535f504bed58d\nprint(blockchain.head.next) # prints None\n\n\nprint('\\n----- Test Case 2 -----')\nblockchain_2 = Blockchain()\nblockchain_2.append_block('Block 0')\nblockchain_2.append_block('Block 1')\nblockchain_2.append_block('Block 2')\nblockchain_2.append_block(None)\nprint(blockchain_2.head.data) # prints Block 0\nnext_block = blockchain_2.head.next\nanother_block = blockchain_2.head.next.next\nprint(next_block.hash == another_block.previous_hash) # prints True\nprint(next_block.data) # prints Block 1\nprint(another_block.data) # prints Block 2\n\n\nprint('\\n----- Test Case 3 -----')\nblockchain_3 = Blockchain()\nblockchain_3.append_block('Block 0')\nblockchain_3.append_block('Block 1')\nblockchain_3.append_block('Block 2')\nblockchain_3.append_block('Block 3')\nblockchain_3.append_block('Block 4')\nblockchain_3.append_block('Block 5')\nprint(blockchain_3.head.data) # prints Block 0\nnext_block = blockchain_3.head.next\nanother_block = blockchain_3.head.next.next\nyet_another_block = blockchain_3.head.next.next.next\nprint(next_block.hash == another_block.previous_hash) # prints True\nprint(next_block.hash == yet_another_block.previous_hash) # prints False\nprint(next_block.data) # prints Block 1\nprint(another_block.data) # prints Block 2\nprint(yet_another_block.data) # prints Block 3","repo_name":"mgrybel/Udacity-Data-Structures-and-Algorithms","sub_path":"Project 2: Show Me the Data Structures/Problem_5/problem_5.py","file_name":"problem_5.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18314366084","text":"import pyautogui\nimport subprocess\nimport shutil\nimport click\nimport time\nimport sys\nimport cv2\nimport re\nimport os\n\n# Image directory\ncurrent_working_dir = os.path.dirname(os.path.abspath(__file__))\nvc_image_dir = f\"{current_working_dir}\\\\dmat\\\\images\\\\voice_channel\"\nserver_image_dir = f\"{current_working_dir}\\\\dmat\\\\images\\\\server_img\"\nchat_image_dir = f\"{current_working_dir}\\\\dmat\\\\images\\\\chat_channel\"\n\n# File inside the image directory\nserver_img = os.listdir(server_image_dir)\nvc_img = os.listdir(vc_image_dir)\nchat_img = os.listdir(chat_image_dir)\n\npyautogui.PAUSE = 0.5\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\n@click.option('-pre', '--prefix', type=str, required=True, help='type your bot command prefix')\n@click.option('-vc', type=int, default=1, show_default=True, help='Choose which voice channel to connect.')\n@click.option('-c', '--chat', type=int, default=1, show_default=True, help='Choose which chat to click.')\n@click.option('-s', '--server', type=int, default=1, show_default=True, help='Choose which server logo to click.')\n@click.option('-play', '--playlist', type=int, required=True, help='Choose what playlist to play')\ndef main(prefix, vc, chat, server, playlist):\n \"\"\"The main function\"\"\"\n try:\n \"\"\"Validate if the file in image directory exist of not\"\"\"\n\n # Checking the file in .\\images\\server_img directory\n os.path.isfile(\n f\"{server_image_dir}\\\\{server_img[server-1]}\")\n\n # Checking file in .\\images\\voice_channel directory\n os.path.isfile(\n f\"{vc_image_dir}\\\\{vc_img[vc-1]}\")\n\n # Checking file in .\\images\\chat_channel directory\n os.path.isfile(\n f\"{chat_image_dir}\\\\{chat_img[chat-1]}\")\n\n # Checking the chatbox image\n os.path.isfile(\n \".\\\\dmat\\\\images\\\\chatbox.png\")\n\n except FileNotFoundError:\n click.secho(\n \"ERROR: Playlist is empty. Use register -p[PATH] command to add playlist\", fg='bright_red')\n\n except IndexError:\n click.secho('ERROR: Image file not available. Use the view command to see the file available or --help command to see available commands', fg='bright_red')\n else:\n try:\n # Open discord app\n with open(\".\\\\dmat\\\\discord_path.txt\", \"r\") as file:\n discord_path = file.read()\n subprocess.Popen(discord_path)\n time.sleep(2.5)\n\n # Locate server logo location and click it\n logo_location = pyautogui.locateCenterOnScreen(\n f\"{server_image_dir}\\\\{server_img[server-1]}\", confidence=0.6)\n pyautogui.click(logo_location)\n\n # Locate voice channel and click it\n voice_channel_location = pyautogui.locateCenterOnScreen(\n f\"{vc_image_dir}\\\\{vc_img[vc-1]}\", confidence=0.8)\n if(voice_channel_location == None):\n # dc_channel = pyautogui.locateCenterOnScreen(\n # \".\\\\images\\\\dc_channel.png\", confidence=0.4)\n pyautogui.moveRel(100,100)\n print(voice_channel_location)\n\n while (voice_channel_location == None):\n pyautogui.PAUSE=0.2\n pyautogui.scroll(-120)\n voice_channel_location = pyautogui.locateCenterOnScreen(\n f\"{vc_image_dir}\\\\{vc_img[vc-1]}\", confidence=0.8)\n else:\n pyautogui.click(voice_channel_location)\n else:\n pyautogui.click(voice_channel_location)\n pyautogui.PAUSE = 2\n time.sleep(1)\n\n # Locate chat channel and click it\n chat_channel_location = pyautogui.locateCenterOnScreen(\n f\"{chat_image_dir}\\\\{chat_img[chat-1]}\", confidence=0.7)\n while chat_channel_location == None:\n print(chat_channel_location)\n pyautogui.moveRel(0,-300)\n pyautogui.scroll(-50)\n chat_channel_location = pyautogui.locateCenterOnScreen(\n f\"{chat_image_dir}\\\\{chat_img[chat-1]}\", confidence=0.7)\n else:\n pyautogui.click(chat_channel_location)\n\n # Locate the chatbox and click it\n chatbox_location = pyautogui.locateCenterOnScreen(\n \".\\\\dmat\\\\images\\\\chatbox.png\", confidence=0.4)\n pyautogui.click(chatbox_location)\n\n # Open playlist.txt in read mode\n with open('.\\\\dmat\\\\playlist.txt', 'r') as file:\n file_content = file.read()\n stripped_content = file_content.strip(\" \")\n playlists = stripped_content.split('\\n')\n # Open the textfile path stored inside playlist.txt\n with open(playlists[playlist-1], 'r', encoding=\"cp437\", errors=\"ignore\") as songs:\n # Read and split the content into list based on line break\n song_content = songs.read()\n splitted_content = song_content.split('\\n')\n\n # Looping over the list and write the content.\n for i in splitted_content:\n # Check for empty string e.g.line break\n if i != '':\n pyautogui.write(f\"{prefix}play {i}\", interval=0.03)\n pyautogui.press('enter')\n \n # If exist, skip it and continue the loop\n else:\n continue\n # If the loop has completed\n else:\n time.sleep(1)\n if prefix == \"-\":\n # Write loop queue command\n pyautogui.write(\n f\"{prefix}loop queue\", interval=0.03)\n else:\n pyautogui.write(\n f\"{prefix}loopqueue\", interval=0.03)\n\n pyautogui.press('enter')\n click.secho('done!', fg='green')\n sys.exit()\n except pyautogui.FailSafeException:\n click.secho('Execution has been stopped', fg='yellow')\n except OSError:\n click.secho(\n \"ERROR: Discord.exe path couldn't be found or missing. Use register -d[PATH] command to add discord path\", fg='bright_red')\n\n\nclass CopyImage:\n \"\"\"This class validate the file type and copy the file into images subdirectory\"\"\"\n\n def __init__(self, image, name, dir):\n self.image = image\n self.name = name\n self.dir = dir\n # Check if the file ends with .png, .jpg, .jpeg\n if (self.name == None and self.image.endswith((\".jpg\", \".png\", \".jpeg\"))):\n shutil.copy(src=image, dst=self.dir)\n elif (self.name != None and self.image.endswith((\".jpg\", \".png\", \".jpeg\")) and self.name.endswith((\".jpg\", \".png\", \".jpeg\"))):\n shutil.copyfile(src=image, dst=f\"{self.dir}\\\\{self.name}\")\n # Give error if the file type is invalid\n else:\n click.secho(\n \"FILE TYPE ERROR: Please put the file type e.g.filename.png or invalid image file type\", fg='bright_red')\n\n\n@cli.command()\n@click.argument(\"image\", type=click.Path(exists=True))\n@click.option(\"-n\", \"--name\", help=\"Rename file\")\ndef add_vc(image, name):\n \"\"\"Add voice channel image\"\"\"\n CopyImage(image, name, vc_image_dir)\n\n\n@cli.command()\n@click.argument(\"image\", type=click.Path(exists=True))\n@click.option(\"-n\", \"--name\", help=\"Rename file\")\ndef add_logo(image, name):\n \"\"\"Add server logo image\"\"\"\n CopyImage(image, name, server_image_dir)\n\n\n@cli.command()\n@click.argument(\"image\", type=click.Path(exists=True))\n@click.option(\"-n\", \"--name\", help=\"Rename file\")\ndef add_channel(image, name):\n \"\"\"Add chat channel image\"\"\"\n CopyImage(image, name, chat_image_dir)\n\n\n@cli.command()\n@click.option(\"-vc\", \"--voice-channel\", \"dir\", flag_value=vc_image_dir)\n@click.option(\"-c\", \"--chat-channel\", \"dir\", flag_value=chat_image_dir)\n@click.option(\"-s\", \"--server-logo\", \"dir\", flag_value=server_image_dir)\n@click.argument('file-index', type=int)\ndef remove_image(file_index, dir):\n \"\"\"Remove image\"\"\"\n if dir != None:\n image = os.listdir(dir)\n os.remove(f\"{dir}\\\\{image[file_index-1]}\")\n click.echo(\n f\"image {click.style(image[file_index-1], fg='yellow')} removed\")\n else:\n click.secho(\"Error: Missing one of these options \", fg=\"red\", nl=False)\n click.secho(\n \"['-vc' / '--voice-channel', '-c' / '--chat-channel', '-s' / '--server-image'].\", fg='yellow', bold=True)\n\n\n@cli.command()\n@click.option(\"-d\", \"--discord\", type=click.Path(exists=True), help=\"Register discord path\")\n@click.option(\"-p\", \"--playlist\", type=click.Path(exists=True), help=\"Register songs in textfile\")\ndef register(discord, playlist):\n \"\"\"Initialize discord path and song_playlist.txt\"\"\"\n # File type and input validation\n try:\n if (playlist == None):\n if (discord.endswith(\".exe\")):\n with open(\"discord_path.txt\", \"w\") as file:\n file.write(discord)\n\n elif (discord == None):\n if (playlist.endswith(\".txt\")):\n with open(\"playlist.txt\", \"a\") as file:\n file.write(playlist + \"\\n\")\n\n elif (discord.endswith(\".exe\") and playlist.endswith(\".txt\")):\n with open(\"discord_path.txt\", \"w\") as file:\n file.write(discord)\n with open(\"playlist.txt\", \"a\") as file:\n file.write(playlist + \"\\n\")\n else:\n click.secho(\"ERROR: Invalid file type\", fg=\"red\")\n # If playlist.txt don't exist or deleted(just in case). Create a new one\n except FileNotFoundError:\n with open(\"playlist.txt\", \"w\") as file:\n file.write(playlist + \"\\n\")\n\n\n@cli.command()\ndef view():\n \"\"\"View list images\"\"\"\n\n # List all the file in the images sub-directory.\n # ==================================================\n # list the file inside voice_channel directory\n click.secho(\"\\nVoice Channel\", fg='cyan', bold=True, underline=True)\n if len(os.listdir(vc_image_dir)) != 0:\n for index, file in enumerate(os.listdir(vc_image_dir)):\n filename, file_extension = os.path.splitext(file)\n click.secho(f\" {index+1}. \" + filename, fg='bright_green')\n else:\n click.secho(\" none\", fg='red')\n\n # list the file inside server_img directory\n click.secho(\"\\nServer\", fg='cyan', bold=True, underline=True)\n if len(os.listdir(server_image_dir)) != 0:\n for index, file in enumerate(os.listdir(server_image_dir)):\n filename, file_extension = os.path.splitext(file)\n click.secho(f\" {index+1}. \" + filename, fg='bright_green')\n else:\n click.secho(\" none\", fg='red')\n\n # list the file inside chat_channel directory\n click.secho(\"\\nChat Channel\", fg='cyan', bold=True, underline=True)\n if (len(os.listdir(chat_image_dir)) != 0):\n for index, file in enumerate(os.listdir(chat_image_dir)):\n filename, file_extension = os.path.splitext(file)\n click.secho(f\" {index+1}. \" + filename, fg='bright_green')\n else:\n click.secho(\" none\", fg='red')\n\n # List the playlist inside playlist.txt\n click.secho(\"\\nPlaylist\", fg=\"cyan\", bold=True, underline=True)\n try:\n with open(\".\\\\dmat\\\\playlist.txt\", \"r\") as file:\n file_content = file.read()\n splitted_content = file_content.split('\\n')\n\n # Check if the file is empty\n if splitted_content != ['']:\n for index, playlist in enumerate(splitted_content):\n # If the list is not empty\n if len(playlist) > 1:\n # Normalize the path from \"\\\\\" to \"\\\" and\n # turn it into a list based on \"\\\"\n playlist_path = os.path.normpath(playlist).split(os.sep)\n\n click.secho(f\" {index+1}. \" +\n playlist_path[-1][:-4], fg='bright_green')\n else:\n continue\n # Add spacing\n else:\n click.echo(\"\")\n else:\n click.secho(\" none\\n\", fg='red')\n except FileNotFoundError:\n click.secho(\" File doesn't exist\\n\", fg='red')\n\nif __name__ == '__main__':\n cli()\n","repo_name":"BryanEgbert/discord_musicbot_auto_typer","sub_path":"activate.py","file_name":"activate.py","file_ext":"py","file_size_in_byte":12547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"25559386058","text":"import pygame\nfrom init_fonts import Font\nfrom datetime import datetime\nfrom random import choice\nfrom bible_names import BIBLE_NAMES\nfrom asset_classes import KeyboardAssets\n\n\nclass Keyboard:\n def __init__(self, sounds):\n self.sounds = sounds\n self.text = []\n self.start_dt = datetime.now()\n self.secondary_font = Font(50)\n self.user_text = ''\n self.input_rect = pygame.Rect(200, 200, 80, 3)\n self.color_active = pygame.Color((127, 127, 127))\n self.color_passive = pygame.Color((255, 255, 255))\n self.color = self.color_passive\n self.count = 0\n self.active = True\n self.assets = KeyboardAssets()\n self.search_noun = BIBLE_NAMES\n self.user_text += choice(self.search_noun)\n\n def blit_quit_btn(self, surface, title_rect, controls):\n btn_rect = pygame.Rect(0, 0, title_rect.w / 2, 50)\n btn_rect.x = title_rect.bottomright[0] - (btn_rect.w - 10)\n btn_rect.y = title_rect.bottomright[1] + 85\n rect = btn_rect\n text_quit = \" QUIT (-)\" if controls.joysticks else \" QUIT\"\n text = self.secondary_font.mago_bold.render(text_quit, True, (255, 255, 255))\n pygame.draw.rect(surface, (71, 77, 91), rect)\n pygame.draw.rect(surface, (95, 101, 115), (rect.x, (rect.y + rect.h) - 5, rect.w, 5))\n pygame.draw.rect(surface, (120, 126, 141), ((rect.x + rect.w) - 5, rect.y, 5, rect.h))\n surface.blit(text, (rect.x + (rect.w / 6) + 10, rect.y))\n pos = pygame.mouse.get_pos()\n if rect.collidepoint(pos):\n # for keyboard\n pygame.draw.rect(surface, (71, 77, 91), rect)\n pygame.draw.rect(surface, (120, 126, 141), (rect.x, rect.y, rect.w, 5))\n pygame.draw.rect(surface, (95, 101, 115), (rect.x, rect.y, 5, rect.h))\n surface.blit(text, (rect.x + (rect.w / 6) + 5, rect.y + 5))\n return True\n elif controls.joysticks and controls.obj['esc']:\n # for joysticks\n pygame.draw.rect(surface, (71, 77, 91), rect)\n pygame.draw.rect(surface, (120, 126, 141), (rect.x, rect.y, rect.w, 5))\n pygame.draw.rect(surface, (95, 101, 115), (rect.x, rect.y, 5, rect.h))\n surface.blit(text, (rect.x + (rect.w / 6) + 5, rect.y + 5))\n return True\n return False\n\n def blit_start_btn(self, surface, title_rect, controls):\n btn_rect = pygame.Rect(0, 0, title_rect.w / 2, 50)\n btn_rect.x = title_rect.bottomleft[0] - 10\n btn_rect.y = title_rect.bottomleft[1] + 85\n rect = btn_rect\n text_start = \" START (+)\" if controls.joysticks else \" START\"\n text = self.secondary_font.mago_bold.render(text_start, True, (255, 255, 255))\n pygame.draw.rect(surface, (71, 77, 91), rect)\n pygame.draw.rect(surface, (95, 101, 115), (rect.x, (rect.y + rect.h) - 5, rect.w, 5))\n pygame.draw.rect(surface, (120, 126, 141), ((rect.x + rect.w) - 5, rect.y, 5, rect.h))\n surface.blit(text, (rect.x, rect.y))\n pos = pygame.mouse.get_pos()\n if rect.collidepoint(pos):\n # for keyboard\n pygame.draw.rect(surface, (71, 77, 91), rect)\n pygame.draw.rect(surface, (120, 126, 141), (rect.x, rect.y, rect.w, 5))\n pygame.draw.rect(surface, (95, 101, 115), (rect.x, rect.y, 5, rect.h))\n surface.blit(text, (rect.x + 5, rect.y + 5))\n return True\n elif controls.joysticks and controls.obj['restart']:\n # for joysticks\n pygame.draw.rect(surface, (71, 77, 91), rect)\n pygame.draw.rect(surface, (120, 126, 141), (rect.x, rect.y, rect.w, 5))\n pygame.draw.rect(surface, (95, 101, 115), (rect.x, rect.y, 5, rect.h))\n surface.blit(text, (rect.x + 5, rect.y + 5))\n return True\n return False\n\n def get_letter(self, pressed):\n text = \"\"\n for letter in self.assets.letters:\n code = letter['item']\n let = letter['let']\n if pressed[code]:\n self.sounds.play_menu_select()\n text += let\n return text.upper()\n\n def type_name(self, surface, controls, title_obj):\n self.count += .08\n if round(self.count) == 1:\n self.count = 0\n char = self.get_letter(controls.obj['pressed'])\n if controls.obj['pressed'][pygame.K_BACKSPACE]:\n self.color = self.color_passive\n # self.user_text = self.user_text[:-1]\n self.user_text = \"\"\n elif char != \"\" and self.active:\n self.color = self.color_active\n self.user_text = self.user_text + char if len(self.user_text) < 25 else self.user_text\n\n self.input_rect.w = title_obj.get_width() / 2\n self.input_rect.x = (surface.get_width() / 2) - 40\n self.input_rect.y = surface.get_height() / 2\n pygame.draw.rect(surface, self.color, self.input_rect)\n text = self.secondary_font.mago_bold.render(self.user_text, True, (255, 255, 255))\n surface.blit(text, (self.input_rect.x, self.input_rect.y - text.get_height()))\n text_bg = self.secondary_font.mago_bold.render(\" ENTER NAME:\", True, (0, 0, 0))\n text = self.secondary_font.mago_bold.render(\" ENTER NAME:\", True, (255, 255, 255))\n surface.blit(text_bg, (((surface.get_width() / 2) - title_obj.get_width() / 2) - 4,\n (self.input_rect.y - text.get_height()) - 4))\n surface.blit(text, ((surface.get_width() / 2) - title_obj.get_width() / 2,\n self.input_rect.y - text.get_height()))\n if text.get_width() >= self.input_rect.w - 50:\n self.active = False\n else:\n self.active = True","repo_name":"g-ulrich/TinKnight","sub_path":"keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":5799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32978021432","text":"import evaluate_tools\nfrom model import Modified3DUNet\nfrom torch.utils.data import DataLoader\n# from kidney_dataloader_pred import KidneyDatasetPred\nfrom kidney_dataloader_1_c import KidneyDataset_1_c\nimport torch.nn\nimport SimpleITK as sitk\nimport numpy as np\nfrom tqdm import tqdm\nimport pandas as pd\nimport os\nimport pdb\n\ndef save_mask_to_nii(case_id, mask, mask_meta, mask_gt_shape,save_root,is_mask):\n '''\n save fp_map and fn_map\n '''\n # mask size\n mask = mask.view(mask_gt_shape)\n # numpy array to medical image\n mask_arr = mask.cpu().detach().numpy() \n mask_img = sitk.GetImageFromArray(mask_arr)\n # set origin, direction, spacing\n mask_img.SetSpacing([x.numpy()[0] for x in mask_meta['spacing']])\n mask_img.SetOrigin([x.numpy()[0] for x in mask_meta['origin']])\n mask_img.SetDirection([x.numpy()[0] for x in mask_meta['direction']])\n # save mdical image\n dir_ele = case_id.split('_')\n dir_name = dir_ele[0] + '_' + dir_ele[1]\n kidney_id = dir_ele[2]\n \n if is_mask:\n file_name = 'pred_mask_' + kidney_id + '.nii.gz'\n case_path = os.path.join(os.path.join(save_root, dir_name), file_name)\n else:\n file_name = 'pred_prob_' + kidney_id + '.nii.gz'\n case_path = os.path.join(os.path.join(save_root, dir_name), file_name)\n # print(case_path)\n sitk.WriteImage(mask_img, case_path)\n \n# dataset\n# csv_pred_path = '../pred_small.csv'\n# csv_pred_path ='../val_new.csv'\ncsv_pred_path = '../kidneys.csv'\n# kidney_dataset_pred = KidneyDatasetPred(csv_pred_path)\nkidney_dataset_pred = KidneyDataset_1_c(csv_pred_path)\npred_loader = DataLoader(kidney_dataset_pred, batch_size = 1, shuffle=False, num_workers=1)\n\n# gpu\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# model\nin_channels = 1\nn_classes = 1\nbase_n_filter = 16\n# modelpath = '/data/ccusr/xinyug/annotation/Modified-3D-UNet-Pytorch/checkpoints/softdice/checkpoint_84_20191217_1208.pth.tar'\n\nmodelpath = '/data/ccusr/xinyug/annotation/Modified-3D-UNet-Pytorch/checkpoints/softdice_1c/checkpoint_71_20191225_1636.pth.tar'\nmodel = Modified3DUNet(in_channels, n_classes, base_n_filter).to(device)\nmodel_pred = evaluate_tools.load_checkpoint_with_date(model, modelpath)\n\nsave_root = '/data/ccusr/xinyug/annotation/kidney/ori/'\nsum_iou = torch.tensor(0.0).to(device)\ncase_ious = []\ncase_ids = []\n\nfor pred_batch in tqdm(pred_loader):\n out = model_pred(torch.unsqueeze(pred_batch['data'],1).to(device))\n data_key = pred_batch['kidney_id']\n print(data_key[0]) \n # print(data_key)\n # pdb.set_trace()\n mask_meta = pred_batch['mask_meta']\n mask_gt_shape = torch.squeeze(pred_batch['mask'],0).shape\n mask_tensor = torch.sigmoid(out[0])\n\n # save mask prob\n save_mask_to_nii(data_key[0], mask_tensor, mask_meta, mask_gt_shape,save_root,False)\n\n mask_tensor[mask_tensor>0.5] = 1\n mask_tensor[mask_tensor<=0.5] = 0\n \n # save predicted mask\n save_mask_to_nii(data_key[0], mask_tensor, mask_meta, mask_gt_shape,save_root,True)\n\n iou = evaluate_tools.getIOU_two_classes(mask_tensor, pred_batch['mask'].view(1,-1).squeeze(0).to(device))\n case_ious.append(iou)\n case_ids.append(data_key[0])\n sum_iou+=iou\n # print(pred_batch['mask_path'])\n # print(iou)\n # pdb.set_trace()\n\nprint(sum_iou/len(pred_loader))\ndf_res = pd.DataFrame({'case_id':case_ids, 'case_iou': case_ious})\ndf_res.to_csv('one_chanel_model_whole_performance.csv',index=False)\n # mask_arr = mask_tensor.view(128,128,64).detach().cpu().clone().numpy().astype(np.uint16)\n # pred_mask = sitk.GetImageFromArray(mask_arr)\n # # ori_mask =sitk.GetImageFromArray(pred_batch['mask_img']) \n # # print(mask_meta['spacing'])\n # # print ([x.numpy()[0] for x in mask_meta['spacing']])\n # # pdb.set_trace() \n # pred_mask.SetSpacing([x.numpy()[0] for x in mask_meta['spacing']])\n # pred_mask.SetDirection([x.numpy()[0] for x in mask_meta['direction']])\n # pred_mask.SetOrigin([x.numpy()[0] for x in mask_meta['origin']])\n # sitk.WriteImage(pred_mask, 'pred_00206_1.nii.gz')\n\n\n# device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# modelpath = '/data/ccusr/xinyug/annotation/Modified-3D-UNet-Pytorch/checkpoints/bce/checkpoint_8_20191216_1551.pth.tar'\n\n# model = Modified3DUNet(1, 2, 16).to(device)\n# model_eval = evaluate_tools.load_checkpoint_with_date(model, modelpath)\n\n# data = '/data/ccusr/xinyug/annotation/kidney/ori/case_00071/kidney_1_resampled.nii.gz'\n# mask_data = '/data/ccusr/xinyug/annotation/kidney/ori/case_00071/mask_1_resampled.nii.gz'\n# pro_data = '/data/ccusr/xinyug/annotation/kidney/ori/case_00071/prob_map_1_resmapled.nii.gz'\n# data_arr = sitk.GetArrayFromImage(sitk.ReadImage(data))\n# prob_arr = sitk.GetArrayFromImage(sitk.ReadImage(data))\n\n# data_tensor = torch.FloatTensor(np.array([[data_arr]])).to(device)\n\n# print(data_tensor.shape)\n\n# mask = model_eval(data_tensor)\n\n# print (mask[0].shape)\n# print (mask[0].argmax(dim=1))\n\n# img = sitk.ReadImage(data)\n# print(img.GetSpacing())\n# print(img.GetOrigin())\n# print(img.GetDirection())\n# # pdb.set_trace()\n\n# pred_cpu_arr = mask[0].argmax(dim=1).view(128,128,64).detach().cpu().clone().numpy().astype(np.uint16)\n# print (pred_cpu_arr)\n\n# # print (pred_cpu.shape)\n# # pdb.set_trace()\n# pred_mask =sitk.GetImageFromArray(pred_cpu_arr)\n# pred_mask.SetSpacing(img.GetSpacing())\n# pred_mask.SetDirection(img.GetDirection())\n# pred_mask.SetOrigin(img.GetOrigin())\n\n# # pred_mask = pred_mask.CopyInformation(sitk.ReadImage(mask_data))\n# sitk.WriteImage( pred_mask, 'pred_00071_1.nii.gz')","repo_name":"XinyuGuo/interactive_annotation","sub_path":"Modified-3D-UNet-Pytorch/eval/predict_1c.py","file_name":"predict_1c.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"70622555691","text":"#!/usr/bin/env python3\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"smnar-lidar-controller\",\n version=\"0.0.1\",\n author=\"Federico Verstraeten\",\n description=\"SMN Argentina LICEL controller\",\n long_description=\"A controller for 'Servicio Meteorológico Nacional de Argentina (Argentine National Meteorological Service)' LIDAR system using LICEL (Lidar Transient Recorder)\",\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/FedeVerstraeten/smnar-licel-controller\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6'\n)","repo_name":"FedeVerstraeten/smnar-lidar-controller","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"17400236874","text":"# # Using Variables in TensorFlow\n\nimport tensorflow as tf\nsess = tf.InteractiveSession()\n\n\n# Create a boolean variable called `spike` to detect sudden a sudden increase in a series of numbers.\n# \n# Since all variables must be initialized, initialize the variable by calling `run()` on its `initializer`.\n\nraw_data = [1., 2., 8., -1., 0., 5.5, 6., 13]\nspike = tf.Variable(False)\nspike.initializer.run()\n\n\n# Loop through the data and update the spike variable when there is a significant increase \n\nfor i in range(1, len(raw_data)):\n if raw_data[i] - raw_data[i-1] > 5:\n updater = tf.assign(spike, tf.constant(True))\n updater.eval()\n else:\n tf.assign(spike, False).eval()\n print(\"Spike\", spike.eval())\n\nsess.close()\n\n\n","repo_name":"BinRoot/TensorFlow-Book","sub_path":"ch02_basics/spikes.py","file_name":"spikes.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":4458,"dataset":"github-code","pt":"55"} +{"seq_id":"74151186411","text":"import time\n\nimport numpy as np\n\nfrom actions import Actions\nfrom concepts.concept_base import ConceptBase\nfrom concepts.letter_addition import LetterAddition\nfrom random_ng import rand_ng\nfrom .base_learner import BaseLearner\n\n\nclass SimMemorylessLearner(BaseLearner):\n def __init__(self, concept: ConceptBase, prior_distribution: np.ndarray):\n super().__init__(concept)\n\n self.verbose = True\n self.pause = 0\n\n self.transition_noise = concept.TRANS_NOISE['memoryless']\n self.production_noise = concept.PROD_NOISE['memoryless']\n\n self.number_range = None\n if isinstance(concept, LetterAddition):\n self.number_range = concept.numbers\n\n # what should it be initialized?\n self.concept_space = concept.get_concept_space()\n concept_space_len = len(self.concept_space)\n self.prior_distribution = prior_distribution\n\n self.concept_belief = self.concept_space[rand_ng.rg.choice(range(concept_space_len),\n p=self.prior_distribution)]\n\n self.total_time = 0\n\n self.mode = \"stoch\"\n\n def see_example(self, example):\n self.print(self.concept.gen_readable_format(example))\n time.sleep(self.pause)\n\n believed_answer = self.self_evaluate(example[0])\n if believed_answer == example[1]:\n # current concept consistent with example\n pass\n else:\n self.update_state(example)\n\n self.total_time += self.concept.ACTION_COSTS[Actions.EXAMPLE]\n\n def see_quiz(self, quiz):\n response = self.generate_answer(quiz)\n\n self.total_time += self.concept.ACTION_COSTS[Actions.QUIZ]\n\n return response\n\n def generate_answer(self, quiz):\n self.print(self.concept.gen_readable_format(quiz, False))\n time.sleep(self.pause)\n\n if rand_ng.rg.random() < self.production_noise:\n response = rand_ng.rg.choice(list(self.concept.get_observation_space())) # random answer\n else:\n response = self.self_evaluate(quiz[0])\n\n self.print(\"I think it is %d\" % response)\n\n return response\n\n def see_question_question(self, question):\n return self.generate_answer(question)\n\n def see_question_feedback(self, question, correct):\n if not correct:\n self.print(\"Not quite, the correct answer is %d\" % question[1])\n\n self.update_state(question)\n else:\n self.print(\"Correct\")\n\n self.total_time += self.concept.ACTION_COSTS[Actions.FEEDBACK]\n time.sleep(self.pause)\n\n def update_state(self, example):\n if rand_ng.rg.random() < self.transition_noise:\n # ignore change\n return\n\n if self.mode == \"pair\":\n # TODO improve: properly calculate state distances\n possible_pairs = self.generate_possible_pairs(example[1])\n\n # TODO improve: prefer options with a match of current belief? i.e. least changes\n pair = rand_ng.rg.choice(possible_pairs)\n\n self.update_values_with_pair(example[0], pair)\n\n self.fill_empty_mappings()\n else:\n # Sample concept consistent with action according to prior\n concepts_results = np.array([self.concept.evaluate_concept(example[0], c) for c in self.concept_space])\n consistent_concepts_filter = concepts_results == example[1]\n\n consistent_concepts = np.flatnonzero(consistent_concepts_filter)\n\n consistent_concepts_prob = self.prior_distribution[consistent_concepts_filter]\n consistent_concepts_prob /= np.sum(consistent_concepts_prob)\n\n new_belief_idx = rand_ng.rg.choice(consistent_concepts, p=consistent_concepts_prob)\n self.concept_belief = self.concept_space[new_belief_idx]\n\n def update_values_with_pair(self, letters, pair):\n # mark values from the pick as invalid\n for idx, val in enumerate(self.concept_belief):\n if val == pair[0] or val == pair[1]:\n self.concept_belief[idx] = -1\n\n # set new values from the picked pair\n self.concept_belief[letters[0]] = pair[0]\n self.concept_belief[letters[1]] = pair[1]\n\n def generate_possible_pairs(self, result):\n possible_pairs = []\n for i in range(int(result) + 1):\n pair = (i, int(result) - i)\n if max(pair[0], pair[1]) > max(self.number_range):\n continue\n\n if pair[0] == pair[1]:\n continue\n\n possible_pairs.append(pair)\n return possible_pairs\n\n def fill_empty_mappings(self):\n num_reassign, refill_idx = self.get_idx_val_to_fill()\n\n num_reassign = rand_ng.rg.choice(num_reassign, len(refill_idx), replace=False).tolist()\n for i in refill_idx:\n if self.concept_belief[i] == -1:\n self.concept_belief[i] = num_reassign.pop(0)\n\n def get_idx_val_to_fill(self):\n refill_idx = []\n num_reassign = np.array(self.number_range.copy())\n for i, val in enumerate(self.concept_belief):\n if val > -1:\n num_reassign[val] = -1\n else:\n refill_idx.append(i)\n\n num_reassign = num_reassign[num_reassign > -1]\n\n return num_reassign, refill_idx\n\n def self_evaluate(self, equation):\n return self.concept.evaluate_concept(equation, self.concept_belief)\n\n def answer(self, item):\n curr_guess = self.concept.evaluate_concept(item[0], self.concept_belief)\n curr_guess = self.concept.format_response(curr_guess)\n\n self.print(\"I think %s is %d\" % (item[1], curr_guess))\n\n return curr_guess\n\n def print(self, message):\n if self.verbose:\n print(message)\n","repo_name":"luksurious/faster-teaching","sub_path":"learners/sim_memoryless_learner.py","file_name":"sim_memoryless_learner.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"36866150125","text":"#!/usr/bin/python3\n# Write a method that determines if all the boxes can be opened.\n\n\ndef canUnlockAll(boxes):\n\n keybox = [0]\n\n for box in keybox:\n for key in boxes[box]:\n if key not in keybox and key < len(boxes):\n keybox.append(key)\n\n if len(keybox) == len(boxes):\n return (True)\n else:\n return (False)\n","repo_name":"AlejandroArbelaez21/holbertonschool-interview","sub_path":"0x00-lockboxes/0-lockboxes.py","file_name":"0-lockboxes.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"5841091555","text":"import cv2\nimport numpy as np\nimport imutils\n\ndef mapp(h):\n h = h.reshape((4,2))\n hnew = np.zeros((4,2),dtype = np.float32)\n\n add = h.sum(1)\n hnew[0] = h[np.argmin(add)]\n hnew[2] = h[np.argmax(add)]\n\n diff = np.diff(h,axis = 1)\n hnew[1] = h[np.argmin(diff)]\n hnew[3] = h[np.argmax(diff)]\n\n return hnew\n\nimage=cv2.imread(\"picture1.jpg\")\ngray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\nblurred=cv2.GaussianBlur(gray,(5,5),0)\nedged=cv2.Canny(blurred,30,50)\n\ncnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(cnts)\ncnts=sorted(cnts,key=cv2.contourArea,reverse=True)\n\ncv2.drawContours(image, cnts, 0, (0, 0, 0), 2)\n\np = cv2.arcLength(cnts[0], True)\napprox = cv2.approxPolyDP(cnts[0], 0.02 * p, True)\n\napprox=mapp(approx)\n\npts=np.float32([[0,0],[800,0],[800,800],[0,800]])\n\nop=cv2.getPerspectiveTransform(approx,pts)\ndst=cv2.warpPerspective(image,op,(800,800))\n\n\ncv2.imshow(\"Scanned\",dst)\ncv2.imshow(\"Image\", image)\ncv2.imshow(\"Gray\", gray)\ncv2.imshow(\"Blurred\", blurred)\ncv2.imshow(\"Edged\", edged)\n\ncv2.imwrite(\"output/Image.jpg\", image)\ncv2.imwrite(\"output/Gray.jpg\", gray)\ncv2.imwrite(\"output/Blurred.jpg\", blurred)\ncv2.imwrite(\"output/Edged.jpg\", edged)\ncv2.imwrite(\"output/Scanned.jpg\", dst)\n\ncv2.waitKey(0)\n\n\n","repo_name":"sadhiman7/Loan_Analysis","sub_path":"verification/countour.py","file_name":"countour.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"72377983852","text":"n = input()\na = [int(x) for x in raw_input().split()]\nl = len(a)\nres = 0\nt1 = True\np = 0\nfor x in range(0,l):\n\tif a[x] == x:\n\t\tres+=1\n\t\tp+=1\n\telif t1 == True and a[a[x]] == x:\n\t\tt1 = False\n\t\tres+=2 \t\nif res == p and res < l:\n\tres+=1\n\t\nprint(res)\t\t\t\t\t\t\n\t\t\t\t\n","repo_name":"AlgoStuff/CompetitiveProgramming","sub_path":"Codeforces/CODEFORCES201_500.py","file_name":"CODEFORCES201_500.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"15592462724","text":"import requests\nimport time\n\nurl = 'https://entree.igetget.com/onepiece/v1/user/init?phone=17521359419'\nurl2 = 'https://httpbin.org/get'\nurl3 = 'http://www.baidu.com'\nproxies = {\n \"http\": \"http://175.8.109.48:8118\",\n \"https\": \"https://113.121.20.170:9999\"\n}\n\nheaders = {\n \"X-App-Key\": \"android-6.0.1\",\n \"X-Uid\": \"0\",\n \"X-Thumb\": \"m\",\n \"X-Dt\": \"phone\",\n \"X-Ov\": \"4.4.2\",\n \"X-Net\": \"WIFI\",\n \"X-Os\": \"ANDROID\",\n \"X-D\": \"244032cc40441432\",\n \"X-Dv\": \"SM-G955F\",\n \"X-T\": \"json\",\n \"X-Chil\": \"175\",\n \"X-V\": \"2\",\n \"X-Av\": \"6.0.1\",\n \"X-Scr\": \"1.5\",\n \"X-Adv\": \"1\",\n \"X-Seid\": \"4ecb4596360141748382e7723ee1dab0\",\n \"X-Hitdot\": \"\",\n \"G-Auth-Sign\": \"OWNjNzk1MjhlNjg1M2FhM2Y2YmVhMjFlMjllZmQ5YTg=\",\n \"G-Auth-Nonce\": \"54bdf159d0d58b34ec36f0670b7efa90\",\n \"G-Auth-Ts\": \"1560498398\",\n \"G-Auth-Appid\": \"5a27d122ee4638163f594393\",\n \"Host\": \"entree.igetget.com\",\n \"Connection\": \"Keep-Alive\",\n \"Accept-Encoding\": \"gzip\",\n \"User-Agent\": \"okhttp/3.11.0\",\n}\nr = requests.get(url=url2,headers=headers,proxies=proxies)\nprint(r.text)","repo_name":"sheaking/GetSpider","sub_path":"test/test_dedao_interface2.py","file_name":"test_dedao_interface2.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"15338688000","text":"from typing import List\nimport re\n\n\ndef isPalindrome(self , s: str) -> bool:\n s = s.lower() # 소문자로 변환하여 변수에 저장\n '''\n #교체함수(매치객체)\n #re.sub('패턴', 교체함수, '문자열', 바꿀횟수)\n\n '''\n #a-z , 0-9, 정규표현식을 이용하여 문자와 숫자만 변수에 저장\n \n s = re.sub('[^a-z0-9]','',s) \n \n return s == s[::-1] # 슬라이싱 -> 뒤집어진 문자열 반환\n\nstr = ['']\nisPalindrome('tomot',str)","repo_name":"AIFFEL-coma-team01/Sanghyo","sub_path":"chapter_7/125_Valid_Palindrome.py","file_name":"125_Valid_Palindrome.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"17336841523","text":"from bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport numpy as np\r\nimport requests\r\nfrom random import randint\r\n\r\n# !pip install -U selenium\\\\\r\n# !apt-get update \r\n# !apt install chromium-chromedr/iver\r\nfrom selenium import webdriver\r\nfrom time import sleep\r\n\r\nchrome_options = webdriver.ChromeOptions()\r\nchrome_options.add_argument('--headless')\r\nchrome_options.add_argument('--no-sandbox')\r\nchrome_options.add_argument('--disable-dev-shm-usage')\r\ndriver = webdriver.Chrome('chromedriver',chrome_options=chrome_options)\r\n\r\nteams = ['htx', 'kan', 'nyj', 'buf', 'sea', 'atl', 'phi', 'was', 'cle', 'rav', 'mia', 'nwe', 'gnb', 'min', 'clt', 'jax', 'chi', \r\n 'det', 'rai', 'car', 'sdg', 'cin', 'crd', 'sfo', 'tam', 'nor', 'dal', 'ram', 'pit', 'nyg', 'oti', 'den']\r\n\r\nno_table = []\r\ndata = pd.DataFrame()\r\nurl = 'https://www.pro-football-reference.com'\r\nyear = 2020\r\n\r\nfor team in teams:\r\n driver.get(url + '/teams/' + str(team) + '/2020.htm') \r\n sleep(randint(2,10))\r\n table = pd.read_html(driver.page_source)\r\n week = 16\r\n cols = ['Week', 'Day', 'Date', 'Time', 'BoxS', 'Result', 'OT',\t'Rec', 'Home', 'Opp_Name',\t'Tm',\t'Opp',\t'OFF1stD',\t'OFFTotYd',\t'OFFPassY',\t'OFFRushY',\t'TOOFF',\t'DEF1stD',\t'DEFTotYd',\t'DEFPassY',\t'DEFRushY',\t'TODEF',\t'OffenseEP',\t'DefenseEP',\t'Sp_TmsEP']\r\n dft = table[2]\r\n dft = dft[0:week]\r\n dft.columns = cols\r\n dft = dft[~dft.Opp_Name.str.contains(\"Bye\")]\r\n dft = dft.drop(['Day', 'Date', 'Time', 'BoxS', 'OT', 'Rec'], axis=1)\r\n dft['Result'] = [0 if r=='L' else 1 for r in dft['Result']]\r\n dft['Home'] = [0 if r=='@' else 1 for r in dft['Home']]\r\n dft['TOOFF'] = dft['TOOFF'].fillna(0)\r\n dft['TODEF'] = dft['TODEF'].fillna(0)\r\n dft['Team'] = str(team)\r\n dft = dft.set_index('Team')\r\n dft.reset_index(inplace=True)\r\n no_table.append(dft)\r\n \r\ndf = pd.concat(no_table)\r\ndf['Opp_Name'] = df['Opp_Name'].astype('category')\r\ndf['Team'] = df['Team'] .astype('category')\r\ndf.to_csv('D:/Documents/ML DOCS/ML Project Files/preds/2020df_week16.csv')","repo_name":"barbabb/fantastic-adventure","sub_path":"dfparserv1.py","file_name":"dfparserv1.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"34528289891","text":"import logging\nfrom pyspark.sql.functions import count\nimport pandas\n\n\n\ndef df_count(df,dfName):\n \"\"\"This function validates the ingested data for the city dimension data and usa_prescriber data\"\"\"\n try:\n logging.info(f\"The DataFrame Validation by count df_count() is started for Dataframe {dfName}...\")\n df_count=df.count()\n logging.info(f\"The DataFrame count is {df_count}.\")\n except Exception as exp:\n logging.error(\"Error in the method - df_count(). Please check the Stack Trace. \" + str(exp))\n raise\n else:\n logging.info(f\"The DataFrame Validation by count df_count() is completed.\")\n\n\ndef df_print_schema(df,dfName):\n try:\n logging.info(f\"The DataFrame Schema Validation for Dataframe {dfName}...\")\n sch=df.schema.fields\n logging.info(f\"The DataFrame {dfName} schema is: \")\n for i in sch:\n logging.info(f\"\\t{i}\")\n except Exception as exp:\n logging.error(\"Error in the method - df_show_schema(). Please check the Stack Trace. \" + str(exp))\n raise\n else:\n logging.info(\"The DataFrame Schema Validation is completed.\")\n\n\ndef df_top10_rec(df,dfName):\n try:\n logging.info(f\"The DataFrame Validation by top 10 record df_top10_rec() is started for Dataframe {dfName}...\")\n logging.info(f\"The DataFrame top 10 records are:.\")\n df_pandas=df.limit(10).toPandas()\n logging.info('\\n \\t'+ df_pandas.to_string(index=False))\n except Exception as exp:\n logging.error(\"Error in the method - df_top10_rec(). Please check the Stack Trace. \" + str(exp))\n raise\n else:\n logging.info(\"The DataFrame Validation by top 10 record df_top10_rec() is completed.\")","repo_name":"judeleonard/Prescriber-ETL-data-pipeline","sub_path":"scripts/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"55"} +{"seq_id":"3547226330","text":"import mujoco_py\nimport robosuite as suite\n\nfrom custom_task import LiftRandomObject\nfrom time import sleep\n\nfrom custom_utils import euler_angel_to_quat\nfrom custom_gym_wrapper import CustomGymWrapper\nfrom robosuite import load_controller_config\nfrom objects import LemonObject, BreadObject\n\nfrom domain_randomization_wrapper_args import CUSTOM_CAMERA_ARGS, CUSTOM_COLOR_ARGS, CUSTOM_DYNAMICS_ARGS, CUSTOM_LIGHTING_ARGS, NO_CAMERA_ARGS, NO_COLOR_ARGS, NO_LIGHTING_ARGS\n\nfrom robosuite.wrappers import GymWrapper, DomainRandomizationWrapper\n\n\"\"\"If you are using 2d camera, 55,84 mm has to be added to the Y translation in camera frame\"\"\"\n\ncontroller_config = load_controller_config(default_controller=\"OSC_POSE\")\n\nobjects = [LemonObject(name = \"Lemon\"),BreadObject(name = \"Bread\")]\nimport numpy as np\n\n\ntrans_matrix_org = np.array([[ 0.171221, 0.730116, -0.661524, 1124.551880], \n [ 0.985078, -0.138769, 0.101808, -46.181087], \n [-0.017467, -0.669085, -0.742981, 815.163208+800], \n [ 0.000000, 0.000000, 0.000000, 1.000000]])\n\ntrans_matrix = np.array([[ 5.55111512e-17, 2.57400000e-01, -9.63200000e-01, 1.60000000e+00],\n [ 9.97000000e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],\n [ 0.00000000e+00, -9.63200000e-01, -2.57400000e-01, 1.45000000e+00],\n [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])\n\n\nfrom robosuite.utils.transform_utils import mat2quat, convert_quat, mat2pose\ncamera_quat = mat2quat(trans_matrix_org[:3,:3])\n#camera_quat = convert_quat(camera_quat, to=\"wxyz\")\ncamera_quat = [-camera_quat[0],camera_quat[2],camera_quat[3],-camera_quat[1]]\nprint(\"hand_eye\", camera_quat)\n#camera_quat = [0.7071068, 0.7071068, 0, 0]\npos = trans_matrix_org[0:3,3]*0.001\nprint(\"pos\",pos)\n\nheight_vs_width_relattion = 754/449\ncamera_attribs = {'fovy': 31.0350747}\ncamera_h = 1280\ncamera_w = int(camera_h * height_vs_width_relattion)\nenv = suite.make(\n camera_pos = pos,#(1.1124,-0.046,1.615),#(1.341772827, -0.312295471 , 0.182150085+1.5), \n camera_quat = camera_quat,#(0.5608417987823486, 0.4306466281414032, 0.4306466579437256, 0.5608419179916382),# frontview quat\n camera_attribs = camera_attribs,\n env_name=\"LiftRandomObject\", # try with other tasks like \"Stack\" and \"Door\"\n robots=\"IIWA\", # try with other robots like \"Sawyer\" and \"Jaco\"\n gripper_types=\"Robotiq85Gripper\",\n has_renderer=False,\n has_offscreen_renderer=True,\n use_camera_obs=True,\n camera_names =['calibrated_camera','frontview','sideview'],\n camera_widths =[camera_w,2560,2560],\n camera_heights=[camera_h,2560,2560],\n camera_depths=[True,True,True],\n use_object_obs=False,\n controller_configs=controller_config,\n objects = objects,\n)\n\ngym\n\n\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom scipy import ndimage\n\n\n\n\nprint(get_camera_intrinsic_matrix(env.sim,'calibrated_camera',2560,2560))\n\nobs = env.reset()\nobs, r,s,d = env.step([0,0,0,0,0,0,0])\nobs = env.reset()\nimg = obs['calibrated_camera_image']\nrotated_img = ndimage.rotate(img, 180)\nplt.figure(figsize = (10,10))\nplt.imshow(rotated_img)\nplt.show()\n\nsleep(6)\n ","repo_name":"ludvikka/rs_sb3","sub_path":"test_cal_camera.py","file_name":"test_cal_camera.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"74529692650","text":"from json import loads\nfrom requests import get\n\nfrom rucio.common.config import config_get\n\n\nclass MappingCollector(object):\n \"\"\"\n Provides mappings from PanDA / DDM resources to ATLAS sites and back.\n \"\"\"\n class _MappingCollector(object):\n '''\n _MappingCollector\n '''\n\n def __init__(self):\n '''\n __init__\n '''\n self._fetch_panda_mapping()\n self._fetch_ddm_mapping()\n\n def _fetch_panda_mapping(self):\n '''\n _fetch_panda_mapping\n '''\n result = get(config_get('c3po-site-mapper', 'panda_url'))\n data = loads(result.text)\n self.panda_to_site = {}\n self.site_to_panda = {}\n\n for entry in data:\n self.panda_to_site[entry['panda_resource']] = entry['atlas_site']\n if entry['atlas_site'] not in self.site_to_panda:\n self.site_to_panda[entry['atlas_site']] = []\n self.site_to_panda[entry['atlas_site']].append(entry['panda_resource'])\n\n def _fetch_ddm_mapping(self):\n '''\n _fetch_ddm_mapping\n '''\n result = get(config_get('c3po-site-mapper', 'ddm_url'))\n data = loads(result.text)\n self.site_to_ddm = {}\n self.ddm_to_site = {}\n\n for entry in data:\n self.ddm_to_site[entry['name']] = entry['site']\n if entry['site'] not in self.site_to_ddm:\n self.site_to_ddm[entry['site']] = []\n self.site_to_ddm[entry['site']].append(entry['name'])\n\n instance = None\n\n def __init__(self):\n '''\n __init__\n '''\n if not MappingCollector.instance:\n MappingCollector.instance = MappingCollector._MappingCollector()\n\n def ddm_to_site(self, ddm):\n '''\n ddm_to_site\n '''\n if ddm not in self.instance.ddm_to_site:\n return None\n return self.instance.ddm_to_site[ddm]\n\n def panda_to_site(self, panda):\n '''\n panda_to_site\n '''\n if panda not in self.instance.panda_to_site:\n return None\n return self.instance.panda_to_site[panda]\n\n def site_to_ddm(self, site):\n '''\n site_to_ddm\n '''\n if site not in self.instance.site_to_ddm:\n return None\n return self.instance.site_to_ddm[site]\n\n def site_to_panda(self, site):\n '''\n site_to_panda\n '''\n if site not in self.instance.site_to_panda:\n return None\n return self.instance.site_to_panda[site]\n","repo_name":"zzaiin/Rucio","sub_path":"lib/rucio/daemons/c3po/collectors/agis.py","file_name":"agis.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"25697455671","text":"import math\r\n\r\nvocab_simp = list('абвгдеёжзийклмнопрстуфхцчшщъыьэюя')# полный русский алфавит\r\nvocab_simp_en = list('abcdefghijklmnopqrstuvwxyz')# полный английский алфавит\r\n\r\ndef gcd_explore(alphabet):\r\n vocab_simp = list(alphabet)\r\n m = len(vocab_simp)\r\n gcd = []\r\n for i in range(1, m + 1):\r\n if math.gcd(i, m) == 1:\r\n gcd.append(i)\r\n return gcd\r\n\r\n\r\ndef alpha_inverse(a, p):\r\n for d in range(1, p):\r\n r = (d * a) % p\r\n if r == 1:\r\n break\r\n else:\r\n raise ValueError('%d has no inverse mod %d' % (a, p))\r\n return d\r\n\r\ndef aff_decode(alphabet, cipher_text, alpha, beta):\r\n open_txt = ''\r\n alpha_inv = alpha_inverse(alpha, len(alphabet))\r\n for i in cipher_text: # цикл пропускает все пробелы, чтобы их не шифровать\r\n if i == ' ' or i == ',' or i == \".\" or i == ':' or i == \";\":\r\n open_txt += i\r\n continue\r\n open_txt += alphabet[(alpha_inv * (alphabet.index(i) - beta)) % len(alphabet)]\r\n return open_txt\r\n\r\ndef analiz_bigramm(text):\r\n \"\"\"\r\n функция считает количество вхождений подстрок в исходный текст\r\n :param text: исходный текст\r\n :return: количество вхождений\r\n \"\"\"\r\n bigr = ['ст', 'но', 'ен', 'то', 'на', 'ов', 'ни', 'ра', 'во', 'ко', 'сто', 'ено', 'нов', 'тов', 'ово', 'ова']\r\n count = 0\r\n\r\n for i in range(len(bigr)):\r\n count += text.count(bigr[i])\r\n return count\r\n\r\n\r\nmessage = ['', '', '']\r\nmaxi = 0\r\nstrings = []\r\nencode_text = input('Текст: ')\r\nlang = ''\r\nif encode_text[0] in vocab_simp:\r\n lang = vocab_simp\r\nelse:\r\n lang = vocab_simp_en\r\narr_alpha = gcd_explore(lang)\r\narr_beta = [x for x in range(len(vocab_simp))]\r\nfor a in arr_alpha:\r\n for b in arr_beta:\r\n decode_text = aff_decode(lang, encode_text, a, b)\r\n m = analiz_bigramm(decode_text)\r\n\r\n if m > maxi:\r\n maxi = m\r\n message[0], message[1], message[2] = decode_text, a, b\r\nprint(message[0])\r\nprint(f'a={message[1]},b={message[2]}')","repo_name":"fedyarays/crypto_pr1","sub_path":"crypto_analysis.py","file_name":"crypto_analysis.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"28718909529","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport argparse\nimport json\nimport gzip\n\nparser = argparse.ArgumentParser(description='Convert primers/amplicons from json to tsv format')\nparser.add_argument('-j', '--json', metavar='js.gz', required=True, dest='json', help='json file (required)')\nparser.add_argument('-m', '--mode', metavar='primer', required=True, dest='mode', help='mode [primer|amplicon] (required)')\nargs = parser.parse_args()\n\n\nprimerKeys = [\"Name\", \"Tm\", \"Chrom\", \"Pos\", \"End\", \"Ori\", \"MatchTm\", \"Seq\", \"Genome\"]\nampliconKeys = [\"Id\", \"Length\", \"Penalty\", \"Chrom\", \"ForPos\", \"ForEnd\", \"ForTm\", \"ForName\", \"ForSeq\", \"Chrom\", \"RevPos\", \"RevEnd\", \"RevTm\", \"RevName\", \"RevSeq\", \"Seq\"]\n\nif args.json:\n with gzip.open(args.json, 'r') as f:\n df = json.load(f)\n if \"errors\" in df.keys():\n for err in df['errors']:\n print(err['title'])\n if \"meta\" in df.keys():\n if df['meta']['subcommand'] == \"search\":\n if \"data\" in df.keys():\n if args.mode == \"primer\":\n print(\"Primer\", end=\"\")\n for k in primerKeys:\n print(\"\\t\", k, sep=\"\", end=\"\")\n print()\n if \"primers\" in df['data'].keys():\n for hit in df['data']['primers']:\n print(\"Primer\", end=\"\")\n for k in primerKeys:\n print(\"\\t\", hit[k], sep=\"\", end=\"\")\n print()\n else:\n print(\"Amplicon\", end=\"\")\n for k in ampliconKeys:\n print(\"\\t\", k, sep=\"\", end=\"\")\n print()\n if \"amplicons\" in df['data'].keys():\n for hit in df['data']['amplicons']:\n print(\"Amplicon\", end=\"\")\n for k in ampliconKeys:\n print(\"\\t\", hit[k], sep=\"\", end=\"\")\n print()\n","repo_name":"gear-genomics/dicey","sub_path":"scripts/json2tsv.py","file_name":"json2tsv.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"55"} +{"seq_id":"4628705857","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:\n header = head\n lst = []\n while header:\n lst.append(header.val)\n header = header.next\n cur = dummy = ListNode(0)\n for x,i in enumerate(lst):\n if x!= len(lst) - n:\n cur.next = ListNode(i)\n cur = cur.next\n \n return dummy.next\n ","repo_name":"rjrockzz/leetcode","sub_path":"Linked List/19. Remove Nth Node From End Of List/19-remove-nth-node-from-end-of-list.py","file_name":"19-remove-nth-node-from-end-of-list.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"4933434443","text":"import logging\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseMailer(object):\n \"\"\"\n base class for sending emails\n\n \"\"\"\n def __init__(self, request=None):\n self.request = request\n\n def send_mail(self, subject, template_name, email, context):\n try:\n msg = self.render_mail(subject, template_name, email, context)\n msg.send()\n return True\n except Exception as ex:\n logger.error(ex)\n return False\n\n def render_mail(self, subject, template_name, email, context):\n from_email = settings.DEFAULT_FROM_EMAIL\n body = render_to_string(template_name, context).strip()\n msg = EmailMessage(\n subject,\n body,\n from_email,\n [email]\n )\n msg.content_subtype = 'html' # Main content is now text/html\n return msg\n","repo_name":"yonadav-labs/adopt-care","sub_path":"Source/Services/care_adopt_backend/mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"40870823127","text":"'''\nThis uses the data_stats_analyser.py file and creates the data needed to create the heatmap in the paper\n'''\nfrom exp import data_stats_analyser as dsa\nfrom exp import exp_util\nimport re, csv,numpy\n\n'''\nthis is for wop classification dataset\n\nreturns: a dictionary with #entries=bin size, then for each key (bin), the average (of all instances') %ratio between: \nthe #of toks in an instance found in the reference_freq (product desc corpus), and the #of total toks in that instance\n'''\ndef count_data_freq_per_bin(df, reference_word2bin:dict, outfile, dataset, totalbins):\n\n count_dataset_bin_percents={}\n total_instances=0\n for row in df:\n total_instances+=1\n text=row.strip()\n text = re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', text)\n text = re.sub(r'\\W+', ' ', text).strip().lower()\n\n instance_wtotal = 0\n instance_bin_count={}\n\n #checking for each word in the instance, which bin it belongs to\n for w in text.split(\" \"):\n # if w in nlp.stopwords:\n # continue\n instance_wtotal+=1\n if w in reference_word2bin.keys():\n bin = reference_word2bin[w]\n if bin in instance_bin_count.keys():\n instance_bin_count[bin]+=1\n else:\n instance_bin_count[bin]=1\n\n #calculating %\n for k in instance_bin_count.keys():\n v = instance_bin_count[k]\n if instance_wtotal>0:\n percent = v/instance_wtotal\n else:\n percent=0\n\n instance_bin_count[k]=percent\n\n #update for the entire dataset counter\n for k, v in instance_bin_count.items():\n if k in count_dataset_bin_percents.keys():\n count_dataset_bin_percents[k]+=v\n else:\n count_dataset_bin_percents[k]=v\n\n #gone through all instances now, let's calculate average for each bin\n outf = open(outfile, 'a', newline='\\n')\n writer = csv.writer(outf, delimiter=',',\n quotechar='\"', quoting=csv.QUOTE_ALL)\n\n\n for b in range(0, totalbins):\n b=b+1\n if b not in count_dataset_bin_percents.keys():\n avg=0\n else:\n percent = count_dataset_bin_percents[b]\n avg = percent/total_instances\n writer.writerow([dataset,b,avg])\n outf.close()\n\n\ndef load_data_classification(dataset_type, train_data_file, test_data_file, text_fields:list\n ):\n if dataset_type==\"mwpd\":\n df, train_size, test_size = exp_util. \\\n load_and_merge_train_test_data_jsonMPWD(train_data_file, test_data_file)\n elif dataset_type==\"rakuten\":\n df, train_size, test_size = exp_util. \\\n load_and_merge_train_test_csvRakuten(train_data_file, test_data_file, delimiter=\"\\t\")\n elif dataset_type==\"icecat\":\n df, train_size, test_size = exp_util. \\\n load_and_merge_train_test_data_jsonIceCAT(train_data_file, test_data_file)\n else:#wdc\n df, train_size, test_size = exp_util. \\\n load_and_merge_train_test_data_jsonWDC(train_data_file, test_data_file)\n\n data=[]\n\n for row in df:\n text=\"\"\n #merge all the text fields\n for c in text_fields:\n text+=row[c]+\" \"\n text=text.strip()\n\n data.append(text)\n\n return data\n\ndef load_data_matching(in_dir):\n train,val, test=dsa.read_wop_matching_data(in_dir)\n\n df=train.values\n data=[]\n for row in df:\n text=(row[1]+\" \"+row[2]).strip()\n data.append(text)\n return data\n\n\nif __name__ == \"__main__\":\n in_file = \"/home/zz/Work/data/wdc/prod_desc_corpus/desc_20-250.txt\"\n out_file = \"/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/stats/wordfreq/heatmap.csv\"\n totalbins=100\n ref_freq_lookup = dsa.count_reference_freq(in_file)\n bins = dsa.freq_to_bins(ref_freq_lookup, totalbins)\n\n ref_freq_freqonly=list(ref_freq_lookup.values())\n arr = numpy.array(ref_freq_freqonly)\n ref_bin_number = numpy.digitize(arr, bins, right=True)\n ref_bin_lookup = {}\n index=0\n for k in ref_freq_lookup.keys():\n ref_bin_lookup[k] = ref_bin_number[index]\n index+=1\n\n #classification\n print(\"mwpd\")\n train_file = \"/home/zz/Work/data/wop/swc/swc_dataset/train.json\"\n test_file = None\n dataset = \"MWPD-PC\"\n text_fields = [1, 2, 3] # 1=title, 2=desc, 3=cat\n mwpd_data = load_data_classification(\"mwpd\", train_file, test_file, text_fields)\n count_data_freq_per_bin(mwpd_data, ref_bin_lookup, out_file, dataset, totalbins)\n\n #wdc\n print(\"wdc\")\n train_file = \"/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/WDC_CatGS/wdc_gs_train.json\"\n test_file = None\n dataset = \"WDC-25\"\n text_fields = [1, 2, 3, 4]\n wdc_data = load_data_classification(\"wdc\", train_file, test_file, text_fields)\n count_data_freq_per_bin(wdc_data, ref_bin_lookup, out_file, dataset, totalbins)\n\n # rakuten\n print(\"rakuten\")\n train_file = \"/home/zz/Work/data/Rakuten/original/rdc-catalog-train.tsv\"\n test_file = None\n dataset = \"Rakuten\"\n text_fields = [0] # 1=title\n rak_data = load_data_classification(\"rakuten\", train_file, test_file, text_fields)\n count_data_freq_per_bin(rak_data, ref_bin_lookup, out_file, dataset, totalbins)\n\n # icecat\n print(\"icecat\")\n train_file = \"/home/zz/Work/data/IceCAT/icecat_data_train.json\"\n test_file = None\n dataset = \"IceCat\"\n text_fields = [2, 3, 4] # 1=title, 2=desc, 3=cat\n icecat_data = load_data_classification(\"icecat\", train_file, test_file, text_fields)\n count_data_freq_per_bin(icecat_data, ref_bin_lookup, out_file, dataset, totalbins)\n\n\n #matching\n in_dir = \"/home/zz/Work/data/wdc-lspc/dm_wdclspc_small_original/all_small\"\n print(\"wdc small\")\n dataset = \"WDC-small)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset, totalbins)\n\n in_dir = \"/home/zz/Work/data/entity_linking/deepmatcher/processed/Structured/Beer\"\n print(\"BeerAdvo-RateBeer (S)\")\n dataset = \"BeerAdvo-RateBeer (S)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset,totalbins)\n\n in_dir = \"/home/zz/Work/data/entity_linking/deepmatcher/processed/Structured/iTunes-Amazon\"\n print(\"iTunes-Amazon1 (S)\")\n dataset = \"iTunes-Amazon1 (S)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset,totalbins)\n\n in_dir = \"/home/zz/Work/data/entity_linking/deepmatcher/processed/Structured/Fodors-Zagats\"\n print(\"Fodors-Zagats\")\n dataset = \"Fodors-Zagats (S)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset,totalbins)\n\n in_dir = \"/home/zz/Work/data/entity_linking/deepmatcher/processed/Structured/Amazon-Google\"\n print(\"Amazon-Google (S)\")\n dataset = \"Amazon-Google (S)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset,totalbins)\n\n in_dir = \"/home/zz/Work/data/entity_linking/deepmatcher/processed/Structured/Walmart-Amazon\"\n print(\"Walmart-Amazon1 (S)\")\n dataset = \"Walmart-Amazon1 (S)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset,totalbins)\n\n in_dir = \"/home/zz/Work/data/entity_linking/deepmatcher/processed/Textual/abt_buy_exp_data\"\n print(\"Abt-Buy (T)\")\n dataset = \"Abt-Buy (T)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset,totalbins)\n\n in_dir = \"/home/zz/Work/data/entity_linking/deepmatcher/processed/Dirty/iTunes-Amazon\"\n dataset = \"iTunes-Amazon2 (D)\"\n dataset = \"iTunes-Amazon2 (D)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset,totalbins)\n\n in_dir = \"/home/zz/Work/data/entity_linking/deepmatcher/processed/Dirty/Walmart-Amazon\"\n print(\"Walmart-Amazon2 (S)\")\n dataset = \"Walmart-Amazon2 (S)\"\n data = load_data_matching(in_dir)\n count_data_freq_per_bin(data, ref_bin_lookup, out_file, dataset,totalbins)\n\n print(\"done\")","repo_name":"ziqizhang/wop","sub_path":"code/python/src/exp/data_stats_analyser_heatmap.py","file_name":"data_stats_analyser_heatmap.py","file_ext":"py","file_size_in_byte":8266,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"43705948626","text":"# Elias Obreque\n\nimport numpy as np\n\n\nclass TwoBodyProblem(object):\n def __init__(self, mu, timestep, init_position, init_velocity):\n self.position_i = init_position\n self.velocity_i = init_velocity\n self.acc_i = np.zeros(3)\n self.mu = mu\n self.g = 9.8\n self.step_width = timestep\n self.current_time = 0\n\n def update_state(self, time_array):\n self.rungeonestep()\n self.acc_i *= 0\n return self.position_i, self.velocity_i\n\n def dynamics(self, state, t):\n x = state[0]\n y = state[1]\n z = state[2]\n\n vx = state[3]\n vy = state[4]\n vz = state[5]\n\n r3 = np.linalg.norm(state[0:3]) ** 3\n\n rhs = np.zeros(6)\n rhs[0] = vx\n rhs[1] = vy\n rhs[2] = vz\n rhs[3] = -self.mu * x / r3 + self.acc_i[0]\n rhs[4] = -self.mu * y / r3 + self.acc_i[1]\n rhs[5] = -self.mu * z / r3 + self.acc_i[2]\n return rhs\n\n def add_acc_i(self, acc_i):\n self.acc_i += acc_i\n\n def rungeonestep(self):\n t = self.current_time\n dt = self.step_width\n\n x = np.concatenate((self.position_i, self.velocity_i))\n\n k1 = self.dynamics(x, t)\n xk2 = x + (dt / 2.0) * k1\n\n k2 = self.dynamics(xk2, (t + dt / 2.0))\n xk3 = x + (dt / 2.0) * k2\n\n k3 = self.dynamics(xk3, (t + dt / 2.0))\n xk4 = x + dt * k3\n\n k4 = self.dynamics(xk4, (t + dt))\n\n next_x = x + (dt / 6.0) * (k1 + 2.0 * k2 + 2.0 * k3 + k4)\n\n self.current_time += self.step_width\n self.position_i = np.array(next_x[0:3])\n self.velocity_i = np.array(next_x[3:6])\n","repo_name":"spel-uchile/LandingSimulator","sub_path":"Dynamics/SpacecraftTrajectory/TwoBodyProblem.py","file_name":"TwoBodyProblem.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"72726704811","text":"from django.urls import path\nfrom .views import AddSchedule, EmployeeLogInView, EmployeeLogOutView, EditSchedule, ScheduleList\n\nurlpatterns = [\n path('login/', EmployeeLogInView.as_view(), name='master_login'),\n path('logout/', EmployeeLogOutView.as_view(), name='logout'),\n path('schedule/', AddSchedule.as_view(), name='schedule'),\n path('schedule/edit/', EditSchedule.as_view(), name='edit_schedule'),\n path('schedule/list/', ScheduleList.as_view(), name='date_list'),\n path('schedule/delete/', EditSchedule.as_view(), name='delete_day'),\n]\n","repo_name":"sergey-nikishonkov/kto-tvoy-master","sub_path":"core/employees/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"71094355372","text":"from bsddb3 import db\n\n\ndef searchTerm(subj, body):\n DB_File = \"te.idx\"\n database = db.DB()\n database.open(DB_File)\n cur = database.cursor()\n\n subj_rows = []\n body_rows = []\n\n if subj is not None:\n if '%' not in subj:\n query = 's-'+subj\n iter = cur.set(query.encode(\"utf-8\"))\n\n if iter is not None:\n while iter[0].decode(\"utf-8\").find(query) != -1:\n subj_rows.append(iter[1].decode(\"utf-8\"))\n iter = cur.next()\n else:\n query = 's-'+subj[:-1]\n iter = cur.set_range(query.encode(\"utf-8\"))\n \n while iter[0].decode(\"utf-8\")[:len(query)].find(query) != -1:\n subj_rows.append(iter[1].decode(\"utf-8\"))\n iter = cur.next()\n\n\n\n if body is not None:\n if '%' not in body:\n query = 'b-'+body\n iter = cur.set(query.encode(\"utf-8\"))\n\n if iter is not None:\n while iter[0].decode(\"utf-8\").find(query) != -1:\n body_rows.append(iter[1].decode(\"utf-8\"))\n iter = cur.next()\n else:\n query = 'b-'+body[:-1]\n iter = cur.set_range(query.encode(\"utf-8\"))\n \n while iter[0].decode(\"utf-8\")[:len(query)].find(query) != -1:\n body_rows.append(iter[1].decode(\"utf-8\"))\n iter = cur.next()\n cur.close()\n database.close()\n\n if subj is not None and body is not None:\n return list(set(subj_rows) & set(body_rows))\n elif subj is None and body is not None:\n return body_rows\n elif subj is not None and body is None:\n return subj_rows\n\n\n\ndef main():\n \n result = searchTerm(\"c%\", None)\n\n print(result)\n\n\nmain()","repo_name":"kangxi11/291-Mini-Project-2","sub_path":"term_search.py","file_name":"term_search.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"38643064591","text":"def encodeBase64(text):\n base64chars = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\"\n \n r = \"\" #the result\n c = 3 - len(text) % 3 #the length of padding\n p = \"=\" * c #the padding\n s = text + \"\\0\" * c #the text to encode\n \n i = 0 \n while i < len(s):\n if i > 0 and ((i / 3 * 4) % 76) == 0:\n r = r + \"\\r\\n\"\n \n n = (ord(s[i]) << 16) + (ord(s[i+1]) << 8 ) + ord(s[i+2])\n \n n1 = (n >> 18) & 63\n n2 = (n >> 12) & 63\n n3 = (n >> 6) & 63\n n4 = n & 63\n \n r += base64chars[n1] + base64chars[n2] + base64chars[n3] + base64chars[n4]\n i += 3\n\n return r[0: len(r)-len(p)] + p\n \ndef decodeBase64(text):\n base64chars = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\"\n s = \"\"\n \n for i in text:\n if i in base64chars:\n s += i\n c = \"\"\n else:\n if i == '=':\n c += '='\n \n p = \"\"\n if c == \"=\":\n p = 'A'\n else:\n if c == \"==\":\n p = \"AA\"\n \n r = \"\"\n s = s + p\n \n i = 0\n while i < len(s):\n n = (base64chars.index(s[i]) << 18) + (base64chars.index(s[i+1]) << 12) + (base64chars.index(s[i+2]) << 6) +base64chars.index(s[i+3])\n \n r += chr((n >> 16) & 255) + chr((n >> 8) & 255) + chr(n & 255)\n \n i += 4\n \n return r[0: len(r) - len(p)]\n\ndef main():\n print(encodeBase64(\"WELCOME to base64 encoding\"))\n print(decodeBase64(encodeBase64(\"WELCOME to base64 encoding\")))\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"subbarayudu-j/TheAlgorithms-Python","sub_path":"ciphers/base64_cipher.py","file_name":"base64_cipher.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":2201,"dataset":"github-code","pt":"55"} +{"seq_id":"16578638118","text":"#Escreva um algoritmo que leia o nome de um vendedor, o seu salário fixo\n# e o total de vendas efetuadas por ele no mês (em dinheiro). Sabendo que\n# este vendedor ganha 15% de comissão sobre suas vendas efetuadas, informar\n# o seu nome, o salário fixo e salário no final do mês;\nnome = str(input(\"Nome do vendedor: \"))\nsalario = float(input(\"Salário fixo do vendedor: R$ \"))\nvendas = int(input(\"Total de vendas R$ \"))\ncomissao = vendas * 0.15\nnovosalario = comissao + salario\nprint('Vendedor(a) {} tem o salário fixo de R${:.2f} e seu salário com comissão foi de R${:.2f}'.format(nome,salario,novosalario))\n","repo_name":"amandadpo/pythonProject","sub_path":"ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"12491745332","text":"from torch.nn.modules.module import _addindent\nimport torch\n\n\ndef torch_summarize(model, show_weights=True, show_parameters=True):\n \"\"\"Summarizes torch model by showing trainable parameters and weights.\"\"\"\n tmpstr = model.__class__.__name__ + ' (\\n'\n for key, module in model._modules.items():\n # if it contains layers let call it recursively to get params and weights\n if type(module) in [\n torch.nn.modules.container.Container,\n torch.nn.modules.container.Sequential\n ]:\n modstr = torch_summarize(module)\n else:\n modstr = module.__repr__()\n modstr = _addindent(modstr, 2)\n\n params = sum([np.prod(p.size()) for p in module.parameters()])\n weights = tuple([tuple(p.size()) for p in module.parameters()])\n\n tmpstr += ' (' + key + '): ' + modstr\n if show_weights:\n tmpstr += ', weights={}'.format(weights)\n if show_parameters:\n tmpstr += ', parameters={}'.format(params)\n tmpstr += '\\n'\n\n tmpstr = tmpstr + ')'\n return tmpstr\n\n\ndef print_model(model, model_type, detailed_encoder=False, detailed_classifier=True):\n print('################## Printing model ##################')\n if detailed_encoder:\n if model_type == 'camembert':\n print(model.roberta)\n elif model_type == 'roberta':\n print(model.roberta)\n elif model_type == 'bert':\n print(model.bert)\n else:\n raise Exception('Not implemented for %s' % model_type)\n\n if detailed_classifier:\n print(model.classifier)\n pytorch_total_params = sum(p.numel() for p in model.parameters())\n pytorch_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('############ Total number of params // Number of trainable params')\n print(pytorch_total_params, '//', pytorch_trainable_params)\n\n if model_type == 'camembert':\n pytorch_total_params_enc = sum(p.numel() for p in model.roberta.parameters())\n pytorch_trainable_params_enc = sum(p.numel() for p in model.roberta.parameters() if p.requires_grad)\n elif model_type == 'roberta':\n pytorch_total_params_enc = sum(p.numel() for p in model.roberta.parameters())\n pytorch_trainable_params_enc = sum(p.numel() for p in model.roberta.parameters() if p.requires_grad)\n else:\n pytorch_total_params_enc = sum(p.numel() for p in model.bert.parameters())\n pytorch_trainable_params_enc = sum(p.numel() for p in model.bert.parameters() if p.requires_grad)\n\n print('### Camembert : Total number of params // Number of trainable params')\n print(pytorch_total_params_enc, '//', pytorch_trainable_params_enc)\n\n pytorch_total_params_clas = sum(p.numel() for p in model.classifier.parameters())\n pytorch_trainable_params_clas = sum(p.numel() for p in model.classifier.parameters() if p.requires_grad)\n print('### Classifier : Total number of params // Number of trainable params')\n print(pytorch_total_params_clas, '//', pytorch_trainable_params_clas)\n\n\n","repo_name":"thomas-ysance/formation_nlp_2","sub_path":"torch_utils.py","file_name":"torch_utils.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"37677690779","text":" # Soal 1\ndef Hashtag(string):\n if len(string) > 140:\n return False\n elif string == '':\n return False\n else:\n string = string.split()\n string.insert(0, '#')\n return ''.join(string)\n\nkalimat = input('Masukkan sebuah kalimat = ').title()\nprint(Hashtag(kalimat))\n # Soal 2\ndef konterhp(x):\n a =[]\n b =[]\n c =[]\n if len(x) >= 10 :\n for i in range (0,3):\n a.append(x[i])\n for i in range (3,6):\n b.append(x[i])\n for i in range (6,len(x)):\n c.append(x[i])\n else :\n print (False)\n a = \"\".join(a)\n b =\"\".join(b)\n c = \"\".join(c)\n print(f'({a}) {b}-{c}')\n\nx = input(\"Nomor Telpon = \")\nkonterhp(x)\n # Soal 3\ndef gg(numbers):\n ganjil = []\n genap = []\n for i in numbers:\n if i % 2 == 0:\n genap.append(i)\n else:\n ganjil.append(i)\n return (sorted(ganjil)+sorted(genap, reverse=True)) #ganjil ascending, genap kena reverse\n\ndata = list(map(int,input('masukan urutan angka = ').split()))\nprint(gg(data))\n # Soal 4\ndef pusing(n):\n if n == 1:\n print('#')\n return (False)\n \n pucuk = ['_'*(n-1)+'#'+'_'*(n-1)]\n dasar = ['##'*(n-1)+'#']\n tengah = []\n for i in range(n-2,0,-1):\n tengah.append(('_'*i)+'#'+('_'*((2*n)-(2*i)-3))+'#'+('_'*i))\n print(pucuk[0])\n\n for i in tengah:\n print(i)\n \n print(dasar[0])\n\nbaris = int(input('Masukkan jumlah baris: '))\npusing(baris)\n","repo_name":"achmadkurniansyah/Ujian-Modul-1","sub_path":"hasbi.py","file_name":"hasbi.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"41923815492","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom f3d.file_management import FileManagement\nfrom f3d.surface import Surface\n\n__author__ = 'neopostmodern'\n\nfrom warnings import warn\nfrom copy import deepcopy\nfrom lxml import etree\nimport json\nimport numpy as numpy\n\nfrom f3d.isvg import provider\nfrom f3d.settings import Settings\nfrom f3d.camera import Camera\nimport f3d.tools_3d.svg_3d as SvgUtility\n\nCONTAINER_SVG = \"\"\"\n\n \n \n \n \n \n image/svg+xml\n \n \n \n \n \n \n\n\"\"\"\n\nTIME_SVG = \"\"\"\n\n \n %s\n \n\n\"\"\"\n\n\nclass FakeFilm:\n def __init__(self, setting_path):\n try:\n with open(setting_path, mode='r') as setting_json:\n try:\n setting = json.load(setting_json) #, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n except ValueError:\n raise Exception(\"[FakeFilm] Parsing of setting '%s' failed.\" % setting_path)\n except FileNotFoundError as fileError:\n raise Exception(\"[FakeFilm] Settings file '%s' not found.\" % setting_path) from fileError\n\n Settings.set(setting['settings'])\n FileManagement.initialize() # hack: depends on settings being loaded!\n\n self.camera = Camera(setting['camera'])\n # todo: image object? (properties moved into common settings...)\n # self.image = Image(setting['image'])\n\n # for property_name in setting:\n # setattr(self, property_name, setting[property_name])\n\n self.surfaces = self.load_resources(setting['surfaces'])\n\n self.container_svg = etree.fromstring(CONTAINER_SVG)\n\n @staticmethod\n def load_resources(surfaces):\n def load_surface(surface):\n svg_provider = {\n \"animated\": provider.AnimatedSvgProvider,\n \"static\": provider.StaticSvgProvider\n }.get(surface['type'], None)\n\n if svg_provider is None:\n warn(\"[ERROR] FakeFilm: Unsupported surface type '%s'.\" % surface['type'])\n return None # todo: appropriate error handling?\n\n return Surface(surface, svg_provider(surface))\n\n return [load_surface(surface) for surface in surfaces]\n\n def render(self, time):\n container = deepcopy(self.container_svg)\n surface_container = container.find(\".//*[@id='container']\")\n alpha_container = container.find(\".//*[@id='defs']\")\n\n for surface in self.surfaces:\n surface = surface.get_for_time(time, self.camera)\n\n #todo: handle alpha channels\n\n if surface is not None:\n surface_container.append(surface)\n\n if Settings.add_timestamp:\n minutes, seconds = divmod(time, 60)\n surface_container.append(etree.fromstring(TIME_SVG % (\"00:%02.0f:%07.4f\" % (minutes, seconds))))\n\n return container\n\n\nclass NaiveSurface(Surface):\n # hack: redefined parameters! mapping -> projection\n def get_svg_transformed_to(self, projection, svg_element):\n lower_left = SvgUtility.into_svg(projection[0])\n lower_right = SvgUtility.into_svg(projection[1])\n upper_left = SvgUtility.into_svg(projection[2])\n\n old_coordinates = [\n lower_left[0], lower_left[1],\n lower_right[0], lower_right[1],\n upper_left[0], upper_left[1]\n ]\n\n # look at this: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform\n target_matrix = numpy.array([\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [Settings.image.size.x, 0, 0, 0, 1, 0],\n [0, Settings.image.size.x, 0, 0, 0, 1],\n [0, 0, Settings.image.size.y, 0, 1, 0],\n [0, 0, 0, Settings.image.size.y, 0, 1]\n ])\n\n transform_matrix = numpy.linalg.solve(target_matrix, old_coordinates)\n\n svg_element.set(\"transform\", \"matrix(%f %f %f %f %f %f)\" % tuple(transform_matrix))\n return svg_element\n\n def get_for_time(self, time, camera):\n svg = self.svg_provider.get_for_time(time)\n projection = camera.project_surface(self)\n\n if projection is not None:\n return self.get_svg_transformed_to(projection, svg)\n\n return svg\n","repo_name":"neopostmodern/f3d","sub_path":"f3d/film.py","file_name":"film.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"44206008869","text":"import goldenrecord\r\n\r\n#filepath = \"../data/mitdwh/\"\r\ncname = \"cluster_id\"\r\n#outfilepath = \"./mitdwh_updated.csv\"\r\n\r\n#input_file = filepath\r\n#output_file = outfilepath\r\n\r\ndef main(filepath, outfilepath, preput=\"\"):\r\n consolidation = goldenrecord.Consolidation(filepath, cname)\r\n num_of_tables = consolidation.number_of_tables\r\n for i in range(num_of_tables):\r\n str_noc = consolidation.TryNextTable(i)\r\n for col_id in range(int(str_noc)):\r\n\r\n if col_id == consolidation.cluster_id_col:\r\n continue\r\n\r\n message1 = consolidation.ProfileColumn(i, col_id)\r\n print(message1)\r\n\r\n skip, preput = (preput[0:1], preput[1:]) if len(preput) > 0 else (input(), \"\")\r\n if skip != \"1\" and skip != \"2\" and skip != \"3\" and skip != \"4\" and skip != \"5\":\r\n continue\r\n\r\n message2 = consolidation.TryNextColumn(i, col_id, skip)\r\n print(message2)\r\n\r\n applied_group_num = 0\r\n\r\n var = 1\r\n while var == 1:\r\n message3 = consolidation.ShowNextCluster()\r\n strs = message3.split('\\t', 2)\r\n max_group_id = int(strs[0])\r\n max_count = int(strs[1])\r\n print(strs[2])\r\n if max_count == 0:\r\n print(\"Done with current column\\n\")\r\n break\r\n\r\n choice, preput = (preput[0:1], preput[1:]) if len(preput) > 0 else (input(), \"\")\r\n if choice == \"4\":\r\n print(\"Done with current column\\n\")\r\n break\r\n\r\n message4 = consolidation.ApplyCluster(i, col_id, applied_group_num, max_group_id, choice)\r\n applied_group_num = applied_group_num + 1\r\n print(message4)\r\n\r\n consolidation.MaterializeTable(i, outfilepath)\r\n print(\"Successfully Exit!\\n\")\r\n break\r\n\r\n#consolidation.ConsolidationGo()\r\n\r\n","repo_name":"qcri/data_civilizer_system","sub_path":"grecord_service/gr/code/call_goldenrecord.py","file_name":"call_goldenrecord.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"55"} +{"seq_id":"30095742856","text":"#!/usr/bin/env python\nimport os\nimport urllib\nimport logging\nimport datetime\nimport math\nimport cgi\nfrom urlparse import urlparse\nimport re\n\nfrom google.appengine.api import images\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import blobstore\nfrom google.appengine.ext.webapp import blobstore_handlers\n\nimport webapp2\nimport jinja2\n\nMAX_PAGE_LIST = 5\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader = jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\nclass PhotoCategory(ndb.Model):\n author = ndb.UserProperty()\n category = ndb.StringProperty()\n coverUrl = ndb.StringProperty()\n created_date = ndb.DateTimeProperty(auto_now_add=True)\n\nclass UserInfo(ndb.Model):\n author = ndb.UserProperty()\n friends = ndb.UserProperty(repeated=True)\n created_date = ndb.DateTimeProperty(auto_now_add=True)\n\nclass Question(ndb.Model):\n \"\"\"Models an individual Guestbook entry.\"\"\"\n author = ndb.UserProperty()\n title = ndb.StringProperty()\n content = ndb.TextProperty(indexed=False)\n created_date = ndb.DateTimeProperty(auto_now_add=True)\n modified_date = ndb.DateTimeProperty()\n tags = ndb.StringProperty(repeated=True)\n\nclass Answer(ndb.Model):\n author = ndb.UserProperty()\n content = ndb.TextProperty(indexed=False)\n vote = ndb.IntegerProperty(indexed=False)\n voters = ndb.StringProperty(repeated=True)\n created_date = ndb.DateTimeProperty(auto_now_add=True)\n modified_date = ndb.DateTimeProperty()\n\nclass UserPhoto(ndb.Model):\n author = ndb.UserProperty()\n blob_key = ndb.BlobKeyProperty()\n url = ndb.StringProperty()\n note = ndb.StringProperty()\n created_date = ndb.DateTimeProperty(auto_now_add=True)\n\ndef url_repl(m):\n ext = m.group(1)\n if ext in ['.png', '.jpg', '.gif']:\n return \"\" % m.group(0)\n else:\n return \"%s\" % (m.group(0), m.group(0))\n\ndef parse_content(content):\n\n return re.sub(r'[a-zA-Z0-9]+://(?:[a-zA-Z0-9_]+:[a-zA-Z0-9_]+@)?(?:[a-zA-Z0-9.-]+\\.[A-Za-z]{2,4})(?::[0-9]+)?(?:/[^ \\.]*)?(\\.[^\\s]*)?', url_repl, content)\n\n\nclass HomePageHandler(webapp2.RequestHandler):\n def get(self):\n\n user = users.get_current_user()\n userinfo = None\n if user:\n userinfos = UserInfo.query(ancestor=ndb.Key('UserInfo', user.user_id()))\n #userinfo = userinfos.fetch(99999) #this returns a list, we can use len() to it\n #for ui in userinfos:\n # userinfo = ui\n if userinfos.count() == 0:\n userinfo = UserInfo(author=user, parent=ndb.Key('UserInfo', user.user_id()))\n else:\n userinfo = userinfos.get()\n\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n else:\n user_url = users.create_login_url(self.request.uri)\n user_url_linktext = 'Login'\n\n template_values = {\n 'userinfo':userinfo,\n 'user':user,\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('homePage.html')\n self.response.write(template.render(template_values))\n\nclass QuestionHomeHandler(webapp2.RequestHandler):\n\n def get_page_list(self, page=0, num_of_pages=1):\n PageCount = {}\n pl = []\n for i in range(page, page+MAX_PAGE_LIST):\n if i < num_of_pages:\n pl.append(i)\n else:\n break\n PageCount['pagelist'] = pl\n\n PageCount['pre'] = page-1 # Yes >=0 or No == -1\n\n if page + MAX_PAGE_LIST < num_of_pages:\n PageCount['post'] = page + MAX_PAGE_LIST\n else:\n PageCount['post'] = -1\n\n if page > 0:\n PageCount['start'] = 0\n else:\n PageCount['start'] = -1\n\n if page < num_of_pages - 1:\n PageCount['end'] = num_of_pages - 1\n else:\n PageCount['end'] = -1\n\n return PageCount\n\n def preprocess(self, cls, f = ''):\n user = users.get_current_user()\n max_page_size = 5\n\n tag = self.request.get('tag').split(' ')\n page = self.request.get('page')\n\n if not page:\n page = 0\n else:\n page = int(page)\n\n func = getattr(cls, f) # use this to replace cls.query, introduce more flexibility\n\n if tag[0] != '':\n question_query = func().filter(cls.tags.IN(tag)).order(-cls.created_date)\n else:\n question_query = func().order(-cls.created_date)\n #ancestor=ndb.Key(\"Questions\", \"0\")\n\n num_of_page = int(math.ceil(question_query.count() / float(max_page_size)))\n\n questions = question_query.fetch(max_page_size, offset=page * max_page_size)\n\n tagstr = \" \".join(tag)\n\n PageCount = self.get_page_list(page, num_of_page)\n\n if user:\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n else:\n user_url = users.create_login_url(self.request.uri)\n user_url_linktext = 'Login'\n\n self.user = user\n self.max_page_size = max_page_size\n self.page = page\n self.tag = tag\n self.tagstr = tagstr\n self.questions = questions\n self.num_of_page = num_of_page\n self.PageCount = PageCount\n self.user_url = user_url\n self.user_url_linktext = user_url_linktext\n\n def get(self):\n\n self.preprocess(Question, 'query')\n\n template_values = {\n 'parse_content':parse_content,\n 'num_of_page': self.num_of_page,\n 'PageCount':self.PageCount,\n 'tag' : self.tag,\n 'tagstr' : self.tagstr,\n 'page': self.page,\n 'user': self.user,\n #'userinfo':userinfo,\n 'questions': self.questions,\n 'user_url':self.user_url,\n 'user_url_linktext':self.user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('Question_Home.html')\n self.response.write(template.render(template_values))\n\nclass QuestionPageHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n question_key = ndb.Key(urlsafe=self.request.get('qid'))\n question = question_key.get()\n\n answers = Answer.query(ancestor=question_key).order(-Answer.created_date)\n\n if user:\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n else:\n user_url = users.create_login_url(self.request.uri)\n user_url_linktext = 'Login'\n\n template_values = {\n 'parse_content':parse_content,\n 'user': user,\n 'question': question,\n 'answers': answers,\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('Question.html')\n self.response.write(template.render(template_values))\n\nclass AddQuestion(webapp2.RequestHandler):\n def post(self):\n\n user = users.get_current_user()\n\n if user:\n question = Question(author=user, parent=ndb.Key(\"Questions\", \"0\"))\n question.title = self.request.get('title')\n question.content = self.request.get('content')\n q_tags = self.request.get('tags').split(r',')\n question.tags = q_tags\n\n question.put()\n\n else:\n self.redirect(users.create_login_url())\n\n self.redirect('/Question?qid='+question.key.urlsafe())\n\nclass EditQuestion(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n\n if user:\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n question = ndb.Key(urlsafe=self.request.get('qid')).get()\n decision = self.request.get('decision')\n if decision == 'Yes':\n question.key.delete()\n self.redirect('/DeleteSuccess')\n return\n\n template_values = {\n 'parse_content':parse_content,\n 'question': question,\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('EditQuestion.html')\n self.response.write(template.render(template_values))\n\n else:\n self.redirect(users.create_login_url())\n\n def post(self):\n user = users.get_current_user()\n\n question = ndb.Key(urlsafe=self.request.get('qid')).get()\n\n if user:\n\n question.title = self.request.get('title')\n question.content = self.request.get('content')\n question.modified_date = datetime.datetime.now()\n\n q_tags = self.request.get('tags').split(r',')\n question.tags = q_tags\n\n question.put()\n\n self.redirect('/Question?qid='+question.key.urlsafe())\n\n else:\n self.redirect(users.create_login_url())\n\nclass EditAnswerHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n\n if user:\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n answer = ndb.Key(urlsafe=self.request.get('aid')).get() #not get Key, get the entity itself!\n question = ndb.Key(urlsafe=self.request.get('qid')).get()\n decision = self.request.get('decision')\n if decision == 'Yes':\n answer.key.delete()\n self.redirect('/Question?qid='+question.key.urlsafe())\n return\n\n template_values = {\n 'question':question,\n 'answer': answer,\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('EditAnswer.html')\n self.response.write(template.render(template_values))\n\n else:\n self.redirect(users.create_login_url())\n\n def post(self):\n user = users.get_current_user()\n\n answer = ndb.Key(urlsafe=self.request.get('aid')).get()\n question = ndb.Key(urlsafe=self.request.get('qid')).get()\n\n if user:\n\n answer.content = self.request.get('content')\n answer.modified_date = datetime.datetime.now()\n\n answer.put()\n\n self.redirect('/Question?qid='+ question.key.urlsafe())\n\n else:\n self.redirect(users.create_login_url())\n\n\nclass AnswerHandler(webapp2.RequestHandler):\n def post(self):\n question_key = ndb.Key(urlsafe= self.request.get('qid'))\n user = users.get_current_user()\n\n if user:\n answer = Answer(author=user, parent=question_key)\n answer.content = self.request.get('content')\n answer.vote = 0\n answer.voters = []\n answer.put()\n self.redirect(\"/Question?qid=\"+self.request.get('qid'))\n else:\n self.redirect(users.create_login_url)\n\nclass UpVoteHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n answer = ndb.Key(urlsafe = self.request.get('aid')).get()\n id = user.user_id() + ''\n\n if user and id not in answer.voters:\n answer.vote += 1\n answer.voters.append(id)\n answer.put()\n\n self.redirect(\"/Question?qid=\"+self.request.get('qid'))\n\nclass DownVoteHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n answer = ndb.Key(urlsafe = self.request.get('aid')).get()\n id = user.user_id() + ''\n\n if user and id not in answer.voters:\n answer.vote -= 1\n answer.voters.append(id)\n answer.put()\n\n self.redirect(\"/Question?qid=\"+self.request.get('qid'))\n\nclass AlbumHomeHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n categories = PhotoCategory.query(ancestor=ndb.Key('PhotoCategory', user.user_id())).order(-PhotoCategory.created_date)\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n template_values = {\n 'user': user,\n 'categories': categories,\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('AlbumHome.html')\n self.response.write(template.render(template_values))\n else:\n self.redirect(users.create_login_url())\n\n\nclass CreateGalleryHandler(webapp2.RequestHandler):\n def get(self):\n\n user = users.get_current_user()\n category_name = self.request.get('name')\n\n if user:\n ancestor_key = ndb.Key('PhotoCategory', user.user_id())\n gallery = PhotoCategory.query(ancestor=ancestor_key).filter(PhotoCategory.category==category_name).get()\n if not gallery:\n gallery = PhotoCategory(parent=ancestor_key, author=user, category=category_name)\n gallery.put()\n self.redirect('/Album?cid='+gallery.key.urlsafe())\n else:\n self.redirect(users.create_login_url())\n\n\nclass AlbumPageHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n upload_url = blobstore.create_upload_url('/Upload')\n # require categories' url\n category_key = ndb.Key(urlsafe = self.request.get('cid'))\n category = category_key.get()\n\n #upload_url = upload_url + '?cid=' + category_key.urlsafe()\n\n photos = UserPhoto.query(ancestor=category_key).order(-UserPhoto.created_date)\n pt = photos.get()\n if pt:\n category.coverUrl = pt.url\n category.put()\n\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n template_values = {\n 'user': user,\n 'photos': photos,\n 'category': category,\n 'upload_url':upload_url,\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('Album.html')\n self.response.write(template.render(template_values))\n\n else:\n self.redirect(users.create_login_url())\n\n\nclass UploadHandler(blobstore_handlers.BlobstoreUploadHandler):\n def post(self):\n user = users.get_current_user()\n category_key = ndb.Key(urlsafe = self.request.get('cid'))\n note = self.request.get('note') # has words limit 27\n\n upload_files = self.get_uploads('file')\n #upload_files = self.get_uploads()[0]\n blob_info = upload_files[0]\n blob_key = blob_info.key()\n url = images.get_serving_url(blob_info)\n #url = images.get_serving_url(blob_key)\n\n if user:\n photo = UserPhoto(author=user,blob_key=blob_key, url=url, note=note, parent=category_key)\n photo.put()\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n else:\n user_url = users.create_login_url(self.request.uri)\n user_url_linktext = 'Login'\n\n\n template_values = {\n 'user': user,\n 'url': url,\n 'photo': photo,\n 'category_urlsafe' : category_key.urlsafe(),\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('Upload.html')\n self.response.write(template.render(template_values))\n\n\nclass DeletePhotoHandler(webapp2.RequestHandler):\n def get(self):\n\n user = users.get_current_user()\n\n #current_url = urlparse(self.request.url)\n #querys = cgi.parse_qs(current_url.query)\n #pid_num = querys.get('pid')\n\n photo = ndb.Key(urlsafe = self.request.get('pid')).get()\n\n if not user:\n self.redirect(users.create_login_url())\n elif user != photo.author:\n self.redirect('/')\n else:\n blob_key = photo.blob_key\n images.delete_serving_url(blob_key)\n blob_info = blobstore.BlobInfo.get(blob_key)\n blob_info.delete()\n photo.key.delete()\n\n self.redirect('/Album?cid='+self.request.get('cid'))\n\nclass DeleteGalleryHandler(webapp2.RequestHandler):\n def get(self):\n\n user = users.get_current_user()\n\n name = self.request.get('name')\n\n if not user:\n self.redirect(users.create_login_url())\n else:\n ancestor_key = ndb.Key('PhotoCategory', user.user_id())\n category_key = PhotoCategory.query(ancestor=ancestor_key).filter(PhotoCategory.category==name).get().key\n photos = UserPhoto.query(ancestor=category_key)\n for photo in photos:\n if user != photo.author:\n self.redirect('/')\n return\n blob_key = photo.blob_key\n images.delete_serving_url(blob_key)\n blob_info = blobstore.BlobInfo.get(blob_key)\n blob_info.delete()\n photo.key.delete()\n category_key.delete()\n\n self.redirect('/AlbumHome')\n\nclass AboutPageHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n\n if user:\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n else:\n user_url = users.create_login_url(self.request.uri)\n user_url_linktext = 'Login'\n\n template_values = {\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('About.html')\n self.response.write(template.render(template_values))\n\nclass DeleteSuccessHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n\n if user:\n user_url = users.create_logout_url(self.request.uri)\n user_url_linktext = 'Logout'\n else:\n user_url = users.create_login_url(self.request.uri)\n user_url_linktext = 'Login'\n\n template_values = {\n 'user_url':user_url,\n 'user_url_linktext':user_url_linktext\n }\n\n template = JINJA_ENVIRONMENT.get_template('DeleteSuccess.html')\n self.response.write(template.render(template_values))\n\n\napp = webapp2.WSGIApplication([\n ('/', HomePageHandler),\n ('/Question_Home', QuestionHomeHandler),\n ('/ask', AddQuestion),\n ('/Question', QuestionPageHandler),\n ('/EditQuestion', EditQuestion),\n ('/Answer', AnswerHandler),\n ('/upVote', UpVoteHandler),\n ('/downVote', DownVoteHandler),\n ('/EditAnswer', EditAnswerHandler),\n ('/AlbumHome', AlbumHomeHandler),\n ('/CreateGallery', CreateGalleryHandler),\n ('/Album', AlbumPageHandler),\n ('/Upload', UploadHandler),\n ('/DeletePhoto', DeletePhotoHandler),\n ('/DeleteGallery', DeleteGalleryHandler),\n ('/About', AboutPageHandler),\n ('/DeleteSuccess', DeleteSuccessHandler)\n], debug=True)\n","repo_name":"bbccyy/GoogleAppEngine","sub_path":"App_Home/zhidaoa.py","file_name":"zhidaoa.py","file_ext":"py","file_size_in_byte":19494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"37411090327","text":"from flask import Flask, request, make_response\nfrom dotenv import load_dotenv\n\nimport os\n\nfrom ig_crawler import crawl_ig\n\nload_dotenv()\n\nDEBUG = True\nPORT = 8000\nHOST = \"0.0.0.0\"\n\napp = Flask(__name__)\napp.secret_key = os.getenv(\"SECRET_KEY\")\napp.config['JSON_AS_ASCII'] = False\n\n@app.route(\"/profile/\", methods=['GET'])\ndef get_profile(username):\n data = crawl_ig(username)\n data.pop(\"new_posts\")\n return make_response(data, 200)\n\nif __name__ == \"__main__\":\n app.run(debug=DEBUG, host=HOST, port=PORT)","repo_name":"addie-tyc/ig-crawler","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"18406805283","text":"import random\nfrom guess_num_art import logo\nprint(logo)\nprint(\"WELCOME TO NUMBER GUESSING GAME\")\n\ndef choice(n):\n for i in range(n):\n guess=int(input(\"Make a guess: \"))\n if guess>num:\n print(\"Too High\")\n elif guess>', # Double greater than\n '\\xc2\\xbc' : '1/4', # one quarter\n '\\xc2\\xbd' : '1/2', # one half\n '\\xc2\\xbe' : '3/4', # three quarters\n '\\xca\\xbf' : '\\x27', # c-single quote\n '\\xcc\\xa8' : '', # modifier - under curve\n '\\xcc\\xb1' : '', # modifier - under line\n '\\xc2\\xb4' : '\\''\n \n}\n\nprint_flag_array = [True] * 2 + [False] * 8\nshuffle(print_flag_array)\n\nunigram_dictionary = {}\nbigram_dictionary = {}\n\nimport nltk\nnltk.download('averaged_perceptron_tagger')\nnltk.download('wordnet')\nnltk.download('omw-1.4')\n\n\ndef reduce_lengthening_word(text):\n pattern = re.compile(r\"(.)\\1{2,}\")\n return pattern.sub(r\"\\1\\1\", text)\n\ndef reduce_lengthening_comment(comment: \"dict[str, str]\"):\n comment['clean'] = ' '.join(list(map(reduce_lengthening_word, comment['clean'].split())))\n return comment\n \ndef strip_consecutive_punctutaion(comment: \"dict[str, str]\"):\n mul_punc = re.compile(r'([.,/#!$%^&*;:{}=_`~()-?])[.,/#!$%^&*;:{}=_`~()-?]+') \n comment['clean'] = mul_punc.sub(r'\\1', comment['clean'])\n return comment\n \ndef clean_string(comment: \"dict[str, str]\"):\n # print(comment)\n low_text = comment['raw'].lower()\n replaced_text = low_text.translate(replace_char_map)\n comment['clean'] = replaced_text.translate(remove_char_map) \n return comment\n\ndef update_unigram_dictionary(unigram, unigram_dictionary):\n pos = unigram[1]\n if pos in ['NN', 'NNS']:\n try:\n unigram_dictionary[unigram[0]] += 1\n except KeyError as e:\n unigram_dictionary[unigram[0]] = 1\n return True\n else:\n return False\n \ndef update_bi_gram_dictionary(bigram, bigram_dictionary):\n if bigram[0][1] in ['NN', 'NNS', 'JJ', 'JJR', 'JJS', 'RB', 'RBR', 'RBS'] \\\n and bigram[1][1] in ['NN', 'NNS', 'JJ', 'JJR', 'JJS']:\n bi_gram_word = bigram[0][0] + '_' +bigram[1][0]\n try:\n bigram_dictionary[bi_gram_word] += 1\n except KeyError as e:\n bigram_dictionary[bi_gram_word] = 1\n return True \n else:\n return False\n \ndef tokenize(comment: \"dict[str, str]\"):\n tokens = re.findall(r\"[\\w']+|[.,!?;]\", comment['clean'], re.UNICODE)\n token_pos = list(pos_tag(tokens))\n comment['tokens'] = token_pos\n return comment\n\ndef update_dicts(comment, unigram_dictionary, bigram_dictionary):\n token_pos = comment['tokens']\n unigrams = [(i,j) for i,j in token_pos if i not in (stop | set(punctuation))]\n bigrams = ngrams(unigrams, 2)\n filtered_unigrams = list(filter(lambda x: update_unigram_dictionary(x, unigram_dictionary), unigrams))\n filtered_bigrams = list(filter(lambda x: update_bi_gram_dictionary(x, bigram_dictionary), bigrams))\n comment['unigrams'] = filtered_unigrams\n comment['bigrams'] = filtered_bigrams\n \n \ndef all_the_steps(comment: \"dict[str, str]\", unigram_dictionary, bigram_dictionary) -> bool:\n punc_dig_free_comment = clean_string(comment)\n multiple_punc_removed = strip_consecutive_punctutaion(punc_dig_free_comment)\n reduced_comment = reduce_lengthening_comment(multiple_punc_removed)\n tokenized_comment = tokenize(reduced_comment)\n if remove_dpc(tokenized_comment): \n update_dicts(tokenized_comment, unigram_dictionary, bigram_dictionary)\n if len(tokenized_comment['unigrams']) != 0 or len(tokenized_comment['bigrams']) != 0:\n return True\n else:\n return False\n else:\n return False\n \ndef remove_dpc(comment: \"dict[str, str]\"):\n def lemmatize(pos):\n global lemmatizer\n if pos[1] in ['NN', 'NNS', 'NNP', 'NNPS']:\n return (lemmatizer.lemmatize(pos[0], wordnet.NOUN), pos[1])\n elif pos[1] in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:\n return (lemmatizer.lemmatize(pos[0], wordnet.VERB), pos[1])\n elif pos[1] in ['JJ', 'JJR', 'JJS']:\n return (lemmatizer.lemmatize(pos[0], wordnet.ADJ), pos[1]) \n elif pos[1] in [ 'RB', 'RBR', 'RBS']:\n return (lemmatizer.lemmatize(pos[0], wordnet.ADV), pos[1])\n else:\n return pos \n token_pos = comment['tokens']\n lemmatized_tokens = list(map(lemmatize, token_pos))\n for token in lemmatized_tokens:\n if token[0] in lame_word_list:\n return False\n comment['tokens'] = lemmatized_tokens\n return True\n\n\ndef compute_informativeness_score(comment: \"dict[str, str]\", unigram_scores, bigram_scores) -> float:\n global subjectivity_threshold, objectivity_threshold, print_flag_array\n unigram_score = 1.0\n bigram_score = 1.0\n too_objective = True\n too_subjective = True\n print_flag = print_flag_array[np.random.randint(10)]\n\n for unigram in comment['unigrams']:\n unigram_score *= unigram_scores[unigram[0]]\n for bigram in comment['bigrams']:\n bigram_score *= bigram_scores[bigram[0][0] +'_' +bigram[1][0]]\n \n informativeness_score = -np.log(unigram_score * bigram_score)/2\n\n if informativeness_score <= subjectivity_threshold:\n too_subjective = False\n if informativeness_score >= objectivity_threshold and len(comment['tokens']) >= 5:\n too_objective = False\n\n final_flag = not (too_subjective or too_objective )\n\n if not final_flag:\n if too_objective and print_flag:\n print((\"{:0.1e}\".format(bigram_score)), \\\n (\"{:0.1e}\".format(unigram_score)), \\\n (\"{:0.1f}\".format(-np.log(unigram_score * \\\n bigram_score)/2)) , comment['clean'])\n else :\n if print_flag:\n print((\"{:0.1e}\".format(bigram_score)),\\\n (\"{:0.1e}\".format(unigram_score)), \\\n (\"{:0.1f}\".format(-np.log(unigram_score * \\\n bigram_score)/2)), comment['clean'])\n else:\n if print_flag:\n print((\"{:0.1e}\".format(bigram_score)),\\\n (\"{:0.1e}\".format(unigram_score)),\\\n (\"{:0.1f}\".format(-np.log(unigram_score * \\\n bigram_score)/2)), comment['clean'])\n\n return informativeness_score","repo_name":"mediatechnologycenter/aestheval","sub_path":"aestheval/text_prediction/informativeness_score.py","file_name":"informativeness_score.py","file_ext":"py","file_size_in_byte":8759,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"55"} +{"seq_id":"15488356910","text":"import numpy as np\n\nfrom pandas import DataFrame\n\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\n\n\n\"\"\"This module implements models for analyzing pairwise interactions in half-\nHeusler materials. Features are assumed to be pariwise elemental features\nand the targets are assumed to be thermodynamic quantities.\n\"\"\"\n\n\nclass GSLinearModel(GridSearchCV):\n \"\"\"A linear model trained using gradient descent. A cross-vaidation loop\n optimizes the model hyperparameters using the r2 scoring metric.\n\n This model also has a method for estimating partial bonding energies.\n\n Pipeline:\n 1. StandardScaler\n 2. SGDRegressor\n \"\"\"\n def __init__(self, gs_params):\n \"\"\"\n Args:\n gs_params (dict) search parameters for hyperparameter optimization.\n \"\"\"\n\n # constructs pipline steps\n scale = StandardScaler()\n regressor = SGDRegressor()\n pipe = Pipeline(steps=[('scale', scale), ('regressor', regressor)])\n\n # constructs the gridsearch estimator\n GridSearchCV.__init__(pipe, gs_params, refit=True, cv=5, scoring='r2')\n\n def estimate_partial_energies(self, features):\n \"\"\"Returns the partial energies in each bond. Estimating the partial\n energies is possible because each feature describes only one of the\n bonding interactions.\n\n Note:\n The fit() method must be called before calling this method.\n\n Args:\n features (array-like) Pairwise bonding features that are ordered\n according to their pairwise interactions. In addition, the\n number of features for each bonding pair must be the same.\n\n Returns:\n (DataFrame) The estimated partial energies for each sample.\n \"\"\"\n\n # applies the standard scaling to the features\n A, B, C = np.split(\n self.best_estimator_.steps[0][1].transform(features), 3, axis=1)\n\n # collects the coefficients of the linear model (last pipeline step)\n a, b, c = np.split(\n self.best_estimator_.named_steps['regressor'].coef_, 3)\n\n # computes the partial energies, which sum to the total energy\n partials = DataFrame()\n partials['partial_energy bcc_tet1'] = np.sum(A * a, axis=1)\n partials['partial_energy bcc_tet2'] = np.sum(B * b, axis=1)\n partials['partial_energy tet1_tet2'] = np.sum(C * c, axis=1)\n return partials\n\n\nclass GSRandomForest(GridSearchCV):\n \"\"\"A tree-based model with a cross-validation loop for optimizing model\n hyperparameters using the r2 scoring metric.\n\n This model also has a method implemented for performing feature selection.\n\n Pipeline:\n 1. StandardScaler\n 2. RandomForestRegressor\n \"\"\"\n def __init__(self, gs_params):\n \"\"\"\n Args:\n gs_params (dict) search parameters for hyperparameter optimization.\n \"\"\"\n\n # constructs pipline steps\n scale = StandardScaler()\n regressor = RandomForestRegressor()\n pipe = Pipeline(steps=[('scale', scale), ('regressor', regressor)])\n\n # constructs the gridsearch estimator\n GridSearchCV.__init__(pipe, gs_params, refit=True, cv=5, scoring='r2')\n\n def select_x_percent(features):\n \"\"\"Returns the top x% of features ranked by their importance\n\n Note:\n The fit() method must be called before calling this method.\n\n Args:\n features(array-like) Feature array passed during the call to fit().\n\n Returns:\n (array-like) Either a DataFrame or a numpy array of features that\n were most predictive of the target property during training.\n \"\"\"\n pass\n","repo_name":"dyllamt/bonding_models","sub_path":"bonding_models/models/scikitlearn.py","file_name":"scikitlearn.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"16393843187","text":"#!/usr/bin/env python3\nimport os\nfrom re import sub, subn\nimport sys\nimport csv\nimport glob\nimport yaml\nimport json\nimport time\nimport resource\nimport argparse\nimport subprocess\nimport xml.etree.ElementTree as ET\n\nfrom zipfile import ZipFile\n\nfrom threading import Lock\nfrom concurrent.futures import ThreadPoolExecutor\n\nMAPPING = {\n \"black\": 90,\n \"red\": 91,\n \"green\": 92,\n \"yellow\": 93,\n \"blue\": 94,\n \"purple\": 95,\n \"cyan\": 96,\n \"white\": 97\n}\n\nBOLD = \"\\033[1m\"\nPREFIX = \"\\033[\"\nSUFFIX = \"\\033[0m\"\n\nVRAM_LIMIT = 15 * 1024 * 1024 * 1024\n\ndef progress(msg, curr, total, prev=0):\n status = round((curr / total) * 100)\n color = MAPPING.get(\"cyan\")\n prog_str = f\"{BOLD}{PREFIX}{color}m{status:3}%{SUFFIX}\"\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\" \" * prev)\n sys.stdout.write(\"\\r\")\n sys.stdout.write(f\"[{prog_str}] {msg}\")\n sys.stdout.flush()\n return len(msg) + 7\n\n\ndef warn(msg, prefix=None):\n if prefix:\n sys.stdout.write(prefix)\n color = MAPPING.get(\"purple\")\n warn_str = f\"{BOLD}{PREFIX}{color}mWARN{SUFFIX}\"\n sys.stdout.write(f\"[{warn_str}] {msg}\\n\")\n sys.stdout.flush()\n\n\ndef info(msg, prefix=None):\n if prefix:\n sys.stdout.write(prefix)\n color = MAPPING.get(\"green\")\n warn_str = f\"{BOLD}{PREFIX}{color}mINFO{SUFFIX}\"\n sys.stdout.write(f\"[{warn_str}] {msg}\\n\")\n sys.stdout.flush()\n\n\ndef indent(msg, prefix=None):\n if prefix:\n sys.stdout.write(prefix)\n color = MAPPING.get(\"white\")\n ident_str = f\"{BOLD}{PREFIX}{color}m....{SUFFIX}\"\n sys.stdout.write(f\"[{ident_str}] {msg}\\n\")\n sys.stdout.flush()\n\nclass RowLengthDiffersException(Exception):\n def __init__(self, len1, len2):\n self.len1 = len1\n self.len2 = len2\n self.message = f'Expected row length of \\'{len1}\\' but got \\'{len2}\\''\n super().__init__(self.message)\n\n def __str__(self):\n return self.message\n\nclass CSVTableGenerator:\n def __init__(self, file='result.csv', header=[], memory=False):\n self.file = file\n self.header = header\n self.memory = memory\n self.rsize = len(header)\n self.table = []\n\n with open(self.file, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(self.header)\n\n def clear_table(self):\n if self.memory:\n self.table.clear()\n\n def add_row(self, row):\n if len(row) != self.rsize:\n raise RowLengthDiffersException(self.rsize, len(row))\n if self.memory:\n self.table.append(row)\n else:\n with open(self.file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n def commit(self):\n if self.memory:\n with open(self.file, 'a') as f:\n writer = csv.writer(f)\n writer.writerows(self.table)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n prog=\"run.py\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\"-j\", \"--jobs\", dest=\"jobs\", action=\"store\", type=int,\n default=1, help=\"number of jobs to run in parallel\")\n parser.add_argument(\"-c\", \"--conf\", dest=\"conf\", action=\"store\",\n default=\"share/wasp-c.xml\")\n parser.add_argument(\"--results\", dest=\"results\", action=\"store\",\n default=\"results\")\n parser.add_argument(\"--backend\", dest=\"backend\", action=\"store\",\n default=\"share/backend/wasp-ce.json\")\n parser.add_argument(\"--property\", dest=\"property\", action=\"store\",\n default=\"sv-benchmarks/c/properties/coverage-error-call.prp\")\n parser.add_argument(\"--validate\", dest=\"validate\", action=\"store\",\n default=None)\n return parser\n\n\ndef parse(argv):\n return get_parser().parse_args(argv)\n\n\ndef parse_report(f):\n try:\n with open(f, \"r\") as fd:\n return json.load(fd)\n except:\n return { \"specification\" : \"Timeout\", \"solver_time\" : 0.0,\n \"paths_explored\" : 0 }\n\ndef parse_yaml(f):\n with open(f, \"r\") as fd:\n return yaml.load(fd, Loader=yaml.SafeLoader)\n\ndef parse_list(f):\n with open(f, \"r\") as fd:\n data = fd.readlines()\n return list(map(lambda l: l.strip(),\n filter(lambda l: not l.startswith(\"#\"), data)))\n\n\ndef parse_tasks(conf):\n tasks = {}\n root = ET.parse(conf).getroot()\n for task in root.findall(\"tasks\"):\n name = task.attrib[\"name\"]\n tasks[name] = set()\n for i in task.findall(\"includesfile\"):\n tasks_sets = parse_list(i.text)\n for tasks_set_file in tasks_sets:\n if not tasks_set_file:\n continue\n tasks_set = glob.glob(\n os.path.join(os.path.dirname(i.text), tasks_set_file)\n )\n tasks[name] = tasks[name].union(set(tasks_set))\n for i in task.findall(\"excludesfile\"):\n tasks_sets = parse_list(i.text)\n for tasks_set_file in tasks_sets:\n if not tasks_set_file:\n continue\n if tasks_set_file.startswith(\"sv-benchmarks\"):\n tasks_set = glob.glob(tasks_set_file)\n else:\n tasks_set = glob.glob(\n os.path.join(os.path.dirname(i.text), tasks_set_file)\n )\n tasks[name] = tasks[name].difference(set(tasks_set))\n return tasks\n\ndef limit_ram(limit):\n return lambda: resource.setrlimit(resource.RLIMIT_AS, (limit, limit))\n\ndef execute(benchmark, output_dir, backend, prop):\n result = {\n \"runtime\" : 0.0,\n \"answer\" : \"Timeout\",\n \"solver_time\" : 0.0,\n \"paths_explored\" : 0\n }\n start = time.time()\n cmd = [\n \"wasp-c\", benchmark,\n \"--output\", output_dir,\n \"-I\", \"../wasp-private/share/libc\"\n# \"--backend\", backend,\n# \"--test-comp\",\n# \"--property\", prop,\n# \"--arch\", \"32\",\n# \"--timeout\", \"900\"\n ]\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n preexec_fn=limit_ram(VRAM_LIMIT))\n try:\n _, _ = proc.communicate(timeout=900.0)\n report = parse_report(os.path.join(output_dir, \"report.json\"))\n result[\"answer\"] = str(report[\"specification\"])\n result[\"solver_time\"] = float(report[\"solver_time\"])\n result[\"paths_explored\"] = int(report[\"paths_explored\"])\n except subprocess.TimeoutExpired:\n result[\"answer\"] = \"Timeout\"\n proc.kill()\n proc.communicate()\n result[\"runtime\"] = time.time() - start\n return result\n\ndef run_benchmark(lock, conf, benchmark):\n global prev\n global curr\n\n size = conf[\"size\"]\n prop = conf[\"prop\"]\n backend = conf[\"backend\"]\n table = conf[\"table\"]\n lock.acquire()\n curr += 1\n prev = progress(f\"Running {benchmark}\", curr, size, prev = prev)\n lock.release()\n\n benchmark_conf = parse_yaml(benchmark)\n skip = True\n for prp in benchmark_conf[\"properties\"]:\n prop_name = os.path.basename(prop)\n prp_name = os.path.basename(prp[\"property_file\"])\n if prop_name == prp_name:\n skip = False\n break\n if skip:\n return None\n\n benchmark_file = os.path.join(os.path.dirname(benchmark),\n benchmark_conf[\"input_files\"])\n output_dir = os.path.join(\n \"wasp-out\",\n os.path.basename(os.path.dirname(benchmark_file)),\n os.path.basename(benchmark_file)\n )\n result = execute(benchmark_file, output_dir, backend, prop)\n lock.acquire()\n table.add_row([\n benchmark_file,\n result[\"answer\"],\n result[\"runtime\"],\n result[\"solver_time\"],\n result[\"paths_explored\"]\n ])\n lock.release()\n\n\ndef run_tasks(tasks, args):\n global prev\n global curr\n info(\"Starting Test-Comp Benchmarks...\")\n info(f\"property={args.property}, jobs={args.jobs}\")\n\n if not os.path.exists(args.results):\n os.makedirs(args.results)\n\n n_tasks = []\n for _, benchmarks in tasks.items():\n n_tasks += benchmarks\n with ThreadPoolExecutor(max_workers=args.jobs) as executor:\n info(f\"Analysing Test-Comp benchmarks.\", prefix=\"\\n\")\n table = CSVTableGenerator(\n file = os.path.join(args.results, f\"all.csv\"),\n header=[\"test\", \"answer\", \"t_backend\", \"t_solver\", \"paths\"]\n )\n lock = Lock()\n size, prev, curr = len(n_tasks), 0, 0\n conf = {\n \"prop\" : args.property,\n \"size\" : size,\n \"backend\" : args.backend,\n \"table\" : table,\n }\n results = executor.map(lambda b : run_benchmark(lock, conf, b), n_tasks)\n for _ in results:\n pass\n\n return 0\n\ndef validate(conf):\n (bench, args) = conf\n bench_conf = parse_yaml(bench)\n skip = True\n for prp in bench_conf[\"properties\"]:\n prop_name = os.path.basename(args.property)\n prp_name = os.path.basename(prp[\"property_file\"])\n if prop_name == prp_name:\n skip = False\n break\n if skip:\n return 1\n benchmark_file = os.path.join(os.path.dirname(bench),\n bench_conf[\"input_files\"])\n testsuite = os.path.join(\n args.validate,\n os.path.basename(os.path.dirname(benchmark_file)),\n os.path.basename(benchmark_file),\n \"test-suite\"\n )\n if not os.path.exists(testsuite):\n return 1\n # zip test-suite\n testcases = glob.glob(os.path.join(testsuite, \"*.xml\"))\n testsuite = os.path.join(testsuite, \"test-suite.zip\")\n with ZipFile(testsuite, \"w\") as zip_file:\n for testcase in testcases:\n zip_file.write(testcase)\n output_dir = os.path.join(\"val-out\", bench)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n subprocess.run(\n [\n \"/home/fmarques/test-suite-validator/bin/testcov\", benchmark_file,\n \"--no-plots\",\n \"--no-isolation\",\n \"--memlimit\", \"6GB\",\n \"--timelimit-per-run\", \"50\",\n \"--test-suite\", testsuite,\n \"--output\", output_dir\n ],\n check=True\n )\n aux_file = \"instrumented_\" + os.path.basename(benchmark_file) + \".gcov\"\n if os.path.exists(aux_file):\n os.remove(aux_file)\n return 0\n\ndef validate_tasks(tasks, args):\n info(\"Starting Test-Comp validation...\")\n info(f\"property={args.property}\")\n for cat, benchmarks in tasks.items():\n info(f\"Validating \\\"{cat}\\\"...\", prefix=\"\\n\")\n list(map(validate, [(bench, args) for bench in benchmarks]))\n return 0\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n args = parse(argv)\n tasks = parse_tasks(args.conf)\n if not args.validate:\n return run_tasks(tasks, args)\n return validate_tasks(tasks, args)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"wasp-platform/Test-Comp","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":11053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"33795821718","text":"from django.http import HttpResponseRedirect\nfrom .models import produit\nfrom .forms import *\nfrom django.shortcuts import redirect, render\nfrom .forms import ProduitForm, FournisseurForm,UserRegistrationForm\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib import messages\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.forms import UserCreationForm\n\n\ndef index(request):\n list=produit.objects.all() \n return render(request,'magasin/vitrine.html',{'list':list})\n\ndef AddProd(request):\n if request.method == \"POST\":\n form = ProduitForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/magasin')\n else:\n form = ProduitForm()\n produits = produit.objects.all()\n return render(request, 'magasin/majProduits.html', {'produits': produits, 'form': form})\n\ndef produit_detail(request, product_id):\n # Récupérer le produit correspondant à l'identifiant unique donné\n product = get_object_or_404(produit, id=product_id)\n \n return render(request, 'product_detail.html', {'product': product})\n\ndef edit_product(request, product_id):\n produit = produit.objects.get(id=product_id)\n form = ProduitForm(instance=produit)\n \n if request.method == 'POST':\n form = ProduitForm(request.POST, instance=produit)\n if form.is_valid():\n form.save()\n return redirect('produit_detail', product_id=produit.id)\n \n return render(request, 'edit_produit.html', {'form': form, 'produit': produit})\n\ndef delete_product(request, pk):\n product = get_object_or_404(produit, pk=pk)\n if request.method == 'POST':\n product.delete()\n return redirect('index')\n return render(request, 'magasin/delete_product.html', {'product': product})\n\n \n\ndef nouveauFournisseur(request):\n if request.method == \"POST\" : \n form = FournisseurForm(request.POST,request.FILES) \n if form.is_valid(): \n form.save() \n return HttpResponseRedirect('/magasin/affichefou') \n else: \n form = FournisseurForm() \n fournisseurs=fournisseur.objects.all()\n return render(request,'magasin/testForm.html',{'fournisseurs':fournisseurs,'form':form})\ndef affichefou(request):\n fou=fournisseur.objects.all()\n return render(request,'magasin/vitrine2.html',{'fou':fou})\ndef register(request):\n if request.method == 'POST' :\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=password)\n login(request,user)\n messages.success(request, f'Coucou {username}, Votre compte a été créé avec succès !')\n return redirect('home')\n else :\n form = UserCreationForm()\n return render(request,'registration/register.html',{'form' : form})","repo_name":"Chah1ne/Django-Project","sub_path":"djangoenv/mysite/magasin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"7514354839","text":"from datetime import datetime\n\nfrom calendarapp.models import Event\nfrom diploma_backend.forms import SearchForm\n\n\ndef all_events(request):\n events = Event.objects.get_all_events()\n running_events = Event.objects.get_running_events()\n search_form = SearchForm()\n page = request.GET.get('page', None)\n q = request.GET.get('q', None)\n if page is not None and q is not None:\n search_form = SearchForm(initial={\"page\": page, \"q\": q})\n event_list = []\n # for event in events:\n # if not event.start_time:\n # event_list.append({\n # \"title\": event.title,\n # \"end\": event.end_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n # \"url\": event.url,\n # })\n # elif event.url:\n # event_list.append({\n # \"title\": event.title,\n # \"start\": event.start_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n # \"end\": event.end_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n # \"url\": event.url,\n # })\n # else:\n # event_list.append(\n # {\n # \"title\": event.title,\n # \"start\": event.start_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n # \"end\": event.end_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n # }\n # )\n return {\"all_events\": event_list, \"events_obj\": events, 'running_events': running_events,\n \"search_form\": search_form}\n","repo_name":"Adilkhanweb/diploma","sub_path":"backend/diploma_backend/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16344890322","text":"from keras import Model, Input\nfrom keras.layers import Conv2D, Dense, Flatten, MaxPooling2D, Resizing\n\n\nclass ImageRegressionModel(Model):\n def __init__(self, num_target=4, img_size: tuple = (128, 128)):\n \"\"\"\n Base Model for image regression\n\n :param num_target: Number of target features as an output 1D vector\n :param img_size: Height x Width of input image.\n \"\"\"\n super().__init__()\n self.preprocess_resize = Resizing(*img_size, crop_to_aspect_ratio=True)\n self.conv_1 = Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 1))\n self.pool_1 = MaxPooling2D((3, 3))\n self.conv_2 = Conv2D(16, 3, activation='relu')\n self.pool_2 = MaxPooling2D((3, 3))\n self.flatten_1 = Flatten()\n self.dense3 = Dense(num_target)\n self.num_target = num_target\n\n def call(self, inputs, training=None, mask=None):\n x = self.preprocess_resize(inputs)\n x = self.conv_1(x)\n x = self.pool_1(x)\n x = self.conv_2(x)\n x = self.pool_2(x)\n x = self.flatten_1(x)\n\n return self.dense3(x)\n\n\nif __name__ == \"__main__\":\n input = Input((128, 128, 1))\n imgress = ImageRegressionModel()\n imgress.build(input_shape=(None, 128, 128, 1))\n imgress.call(input)\n imgress.summary()\n\n import random\n from src.utils.loader import get_image_paths_from_dir, RegressionDataLoader\n\n num_train = 800\n\n img_data_dir = \"../../../dataset/20220209/images\"\n target_data_path = \"../../../dataset/20220209/images/targets.npy\"\n\n img_paths = get_image_paths_from_dir(img_data_dir)\n random.Random(1337).shuffle(img_paths)\n\n batch_size = 10\n train_img_paths = img_paths[:num_train]\n test_img_paths = img_paths[num_train:]\n\n gen_kwargs = dict(target_data_path=target_data_path, num_targets=4, batch_size=batch_size, img_size=(128, 128))\n\n train_gen = RegressionDataLoader(input_img_paths=train_img_paths, **gen_kwargs)\n test_gen = RegressionDataLoader(input_img_paths=test_img_paths, **gen_kwargs)\n\n # train_sample\n","repo_name":"bearylogical/bayesian_mm","sub_path":"src/models/regression/cnn_regression.py","file_name":"cnn_regression.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43192317106","text":"from __future__ import absolute_import\nimport mock\nimport unittest\nimport os\n\nfrom yardstick.benchmark.scenarios.compute import memload\n\n\n@mock.patch('yardstick.benchmark.scenarios.compute.memload.ssh')\nclass MEMLoadTestCase(unittest.TestCase):\n\n def setUp(self):\n self.ctx = {\n 'host': {\n 'ip': '172.16.0.137',\n 'user': 'root',\n 'key_filename': \"mykey.key\"\n }\n }\n\n self.result = {}\n\n def test_memload_successful_setup(self, mock_ssh):\n m = memload.MEMLoad({}, self.ctx)\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n\n m.setup()\n self.assertIsNotNone(m.client)\n self.assertTrue(m.setup_done)\n\n def test_execute_command_success(self, mock_ssh):\n m = memload.MEMLoad({}, self.ctx)\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n m.setup()\n\n expected_result = 'abcdefg'\n mock_ssh.SSH.from_node().execute.return_value = (0, expected_result, '')\n result = m._execute_command(\"foo\")\n self.assertEqual(result, expected_result)\n\n def test_execute_command_failed(self, mock_ssh):\n m = memload.MEMLoad({}, self.ctx)\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n m.setup()\n\n mock_ssh.SSH.from_node().execute.return_value = (127, '', 'Failed executing \\\n command')\n self.assertRaises(RuntimeError, m._execute_command,\n \"cat /proc/meminfo\")\n\n def test_get_mem_usage_successful(self, mock_ssh):\n options = {\n \"interval\": 1,\n \"count\": 1\n }\n args = {\"options\": options}\n m = memload.MEMLoad(args, self.ctx)\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n m.setup()\n\n output = self._read_file(\"memload_sample_output.txt\")\n mock_ssh.SSH.from_node().execute.return_value = (0, output, '')\n result = m._get_mem_usage()\n expected_result = {\n \"max\": {\n 'shared': 2844,\n 'buff/cache': 853528,\n 'total': 263753976,\n 'free': 187016644,\n 'used': 76737332\n },\n \"average\": {\n 'shared': 2844,\n 'buff/cache': 853528,\n 'total': 263753976,\n 'free': 187016644,\n 'used': 76737332\n },\n \"free\": {\n \"memory0\": {\n \"used\": \"76737332\",\n \"buff/cache\": \"853528\",\n \"free\": \"187016644\",\n \"shared\": \"2844\",\n \"total\": \"263753976\",\n \"available\": \"67252400\"\n }\n }\n }\n\n self.assertEqual(result, expected_result)\n\n def _read_file(self, filename):\n curr_path = os.path.dirname(os.path.abspath(__file__))\n output = os.path.join(curr_path, filename)\n with open(output) as f:\n sample_output = f.read()\n return sample_output\n","repo_name":"opnfv/yardstick","sub_path":"yardstick/tests/unit/benchmark/scenarios/compute/test_memload.py","file_name":"test_memload.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"47"} +{"seq_id":"72469324302","text":"import requests\n\n\ndef get_zone_id(provider, IAM_TOKEN):\n name = input('请输入域名:')\n if provider == 1:\n print('暂未开发')\n exit(404)\n elif provider == 2:\n url = 'https://dns.myhuaweicloud.com/v2/zones'\n headers = {\n 'Content-Type': 'application/json;charset=utf8',\n 'X-Auth-Token': IAM_TOKEN\n }\n zones = requests.get(url, headers=headers)\n # 提取所以zone的id和name并输出\n for zone in zones.json()['zones']:\n print(zone['id'], zone['name'])","repo_name":"tony6960/DFS","sub_path":"zone_set.py","file_name":"zone_set.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20908400063","text":"from dataclasses import dataclass\nimport os\nimport sys\nfrom typing import Tuple\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom src.exception import CustomException\nfrom src.logger import logging\n\nfrom src.components.data_transformation import DataTransformation\nfrom src.components.model_trainer import ModelTrainer\n\n\n@dataclass\nclass DataIngestionConfig:\n train_data_path: str = os.path.join(\"artifacts\", \"train.csv\")\n test_data_path: str = os.path.join(\"artifacts\", \"test.csv\")\n raw_data_path: str = os.path.join(\"artifacts\", \"data.csv\")\n\n\nclass DataIngestion:\n def __init__(self):\n self.ingestion_config = DataIngestionConfig()\n\n def initiate_data_ingestion(self) -> Tuple[str, str]:\n \"\"\"\n Read raw data - from DB, file, API, etc.\n \"\"\"\n logging.info(\"Starting data ingestion\")\n\n try:\n df = pd.read_csv(\"notebooks\\data\\stud.csv\")\n logging.info(\"Read the data from the source into DF\")\n\n os.makedirs(\n os.path.dirname(self.ingestion_config.train_data_path), exist_ok=True\n )\n logging.info(\"Create the datasets directory if not present\")\n\n df.to_csv(self.ingestion_config.raw_data_path, index=False, header=True)\n\n logging.info(\"Train-test split initiated\")\n train_set, test_set = train_test_split(df, test_size=0.2, random_state=42)\n\n train_set.to_csv(\n self.ingestion_config.train_data_path, index=False, header=True\n )\n test_set.to_csv(\n self.ingestion_config.test_data_path, index=False, header=True\n )\n logging.info(\"Finished data ingestion\")\n\n return (\n self.ingestion_config.train_data_path,\n self.ingestion_config.test_data_path,\n )\n\n except Exception as e:\n msg = f\"Error while ingesting the data: {e}\"\n logging.error(msg)\n raise CustomException(msg, sys)\n\n\nif __name__ == \"__main__\":\n ingestion = DataIngestion()\n train_path, test_path = ingestion.initiate_data_ingestion()\n\n transformation = DataTransformation()\n train_arr, test_arr, _ = transformation.initiate_data_transformation(\n train_path, test_path\n )\n\n print(train_arr, \"\\n\", test_arr)\n\n trainer = ModelTrainer()\n model_score = trainer.initiate_model_training(train_arr, test_arr)\n print(f\"Training finished, model score: {model_score}\")\n","repo_name":"shatandv/e2e-ml-project","sub_path":"src/components/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71114792463","text":"# based on https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django/skeleton_website etc\n\nfrom django.urls import path\nfrom . import views\n\napp_name = 'equiptypes'\n\nurlpatterns = [\n path('', views.public_index, name='index'),\n path('', views.public_index, name='eqty'),\n path('update_details', views.update_details, name='update_details')\n]\n","repo_name":"hillwithsmallfields/makers","sub_path":"apps/equiptypes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"37827574641","text":"'''Users admin config site'''\n\n#Django\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\n\n\n#Models\nfrom django.contrib.auth.models import User\nfrom users.models import Profile\n\n\n@admin.register(Profile)\n\nclass ProfileAdmin(admin.ModelAdmin):\n list_display = ('id','user', 'address', 'postalCode', 'city', 'country', 'phoneNumber', 'cc_number', 'cc_expiry', 'cc_code')\n list_display_links = ['id', 'user']\n list_editable = ['address', 'postalCode', 'country', 'city', 'phoneNumber', 'cc_number', 'cc_expiry', 'cc_code']\n search_fields = ['user__email', 'user__is_staff', 'created_at', 'modified_at']\n list_filter = ['user__is_active', 'user__is_staff', 'created_at', 'modified_at']\n\n\nclass ProfileInline(admin.StackedInline):\n \n model = Profile\n can_delete = False\n verbose_name_plural = 'profiles'\n \nclass UserAdmin(BaseUserAdmin):\n inlines = [ProfileInline]\n list_display = ['username', 'email', 'first_name', 'last_name', 'is_active', 'is_staff']\n list_editable = ['is_active', 'is_staff']\n \n \nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\n\n\n\n","repo_name":"Greenhouse-Garden/Django-API","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23417556680","text":"BASE_VAL = 10\nBASE_EXP = 1.5\n## Function to ask user input for magnitude\ndef get_magnitude(num):\n magnitude = float(input(\"Enter the magnitude of earthquake \"+ str(num) +\":\"))\n while( magnitude < 1):\n magnitude = float(input(\"Enter the magnitude of earthquake\" + str(num) +\":\"))\n return magnitude\n\n## Compares the magnitudes\ndef compare_magnitude(magnitude1, magnitude2):\n difference = BASE_VAL **( BASE_EXP * (magnitude1 - magnitude2) )\n return difference\n\n## Will run function again if wanted\ndef get_run_again() :\n rerun = int(input(\"Try again? Type 1 for Yes: \") )\n if (rerun == 1):\n return True\n else:\n return False\n\n## Main Function\ndef main():\n ## Sets retry to true\n retry = True\n ## when retry is true loop will continue\n while(retry):\n magnitude1 = get_magnitude(1)\n magnitude2 = get_magnitude(2)\n ## Compares the difference between magnitude1 and magnitude2\n compare = compare_magnitude(magnitude1, magnitude2)\n \n compare2 = (1 / compare_magnitude(magnitude1, magnitude2) )\n if( magnitude1 > magnitude2 ):\n print(\"An earthquake of magnitude\",magnitude1,\"is\", \"{0:.1f}\".format(compare),\"times more powerful than an earthquake with of magnitude\",magnitude2)\n elif (magnitude2 > magnitude1):\n print(\"An earthquake of magnitude\",magnitude2,\"is\", \"{0:.1f}\".format(compare2),\"times more powerful than an earthquake with of magnitude\",magnitude1)\n retry = get_run_again()\n if( retry == False):\n print(\"Bye\")\nmain()\n\n","repo_name":"christiannlam/Python-Projects","sub_path":"EarthquakeMag.py","file_name":"EarthquakeMag.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40361813022","text":"\"\"\"Contains game loop functions\"\"\"\n\nimport botutils\nimport asyncio\nimport math\nimport traceback\nimport json\nimport discord\nimport datetime\nimport configparser\nfrom botc import ChoppingBlock\nfrom discord.ext import tasks\n\nConfig = configparser.ConfigParser()\nConfig.read(\"preferences.INI\")\n\n# Lengths\nBASE_NIGHT = int(Config[\"botc\"][\"BASE_NIGHT\"])\nNIGHT_MULTIPLER = int(Config[\"botc\"][\"NIGHT_MULTIPLER\"])\nBASE_DAWN = int(Config[\"botc\"][\"BASE_DAWN\"])\nDAWN_MULTIPLIER = int(Config[\"botc\"][\"DAWN_MULTIPLIER\"])\nVOTE_TIMEOUT = int(Config[\"botc\"][\"VOTE_TIMEOUT\"])\nDELETE_VOTE_AFTER = int(Config[\"botc\"][\"DELETE_VOTE_AFTER\"])\nDEBATE_TIME = int(Config[\"botc\"][\"DEBATE_TIME\"])\nINCREMENT = int(Config[\"botc\"][\"INCREMENT\"])\n\n# Colors\nCARD_LYNCH = Config[\"colors\"][\"CARD_LYNCH\"]\nCARD_LYNCH = int(CARD_LYNCH, 16)\nCARD_NO_LYNCH = Config[\"colors\"][\"CARD_NO_LYNCH\"]\nCARD_NO_LYNCH = int(CARD_NO_LYNCH, 16)\n\n# Config\nConfig.read(\"config.INI\")\n\nPREFIX = Config[\"settings\"][\"PREFIX\"]\n\nwith open('botc/game_text.json') as json_file: \n documentation = json.load(json_file)\n approved_seal = documentation[\"images\"][\"approved_seal\"]\n denied_seal = documentation[\"images\"][\"denied_seal\"]\n ghost_vote_url = documentation[\"images\"][\"ghost_vote\"]\n blank_token_url = documentation[\"images\"][\"blank_token\"]\n alive_lynch = documentation[\"images\"][\"alive_lynch\"]\n alive_no_lynch = documentation[\"images\"][\"alive_no_lynch\"]\n dead_lynch = documentation[\"images\"][\"dead_lynch\"]\n dead_no_lynch = documentation[\"images\"][\"dead_no_lynch\"]\n call_for_vote = documentation[\"gameplay\"][\"call_for_vote\"]\n votes_stats = documentation[\"gameplay\"][\"votes_stats\"]\n votes_to_exe = documentation[\"gameplay\"][\"votes_to_exe\"]\n votes_to_tie = documentation[\"gameplay\"][\"votes_to_tie\"]\n votes_current = documentation[\"gameplay\"][\"votes_current\"]\n voted_yes = documentation[\"gameplay\"][\"voted_yes\"]\n voted_no = documentation[\"gameplay\"][\"voted_no\"]\n verdict_chopping = documentation[\"gameplay\"][\"verdict_chopping\"]\n verdict_safe = documentation[\"gameplay\"][\"verdict_safe\"]\n nomination_intro = documentation[\"gameplay\"][\"nomination_intro\"]\n vote_summary = documentation[\"gameplay\"][\"vote_summary\"]\n nomination_short = documentation[\"gameplay\"][\"nomination_short\"]\n nominations_open = documentation[\"gameplay\"][\"nominations_open\"]\n nomination_countdown = documentation[\"gameplay\"][\"nomination_countdown\"]\n day_over_soon = documentation[\"gameplay\"][\"day_over_soon\"]\n no_execution = documentation[\"gameplay\"][\"no_execution\"]\n execution = documentation[\"gameplay\"][\"execution\"]\n copyrights_str = documentation[\"misc\"][\"copyrights\"]\n\nwith open('botutils/bot_text.json') as json_file: \n language = json.load(json_file)\n error_str = language[\"system\"][\"error\"]\n\nglobal botc_game_obj\n\n\n@tasks.loop(count = 1)\nasync def nomination_loop(game, nominator, nominated):\n \"\"\"One round of nomination. Iterate through all players with available \n votes and register votes using reactions.\n\n A vote results in an execution if the number of votes equals or exceeds \n half the number of alive players.\n \"\"\"\n\n intro_msg = nomination_intro.format(\n botutils.BotEmoji.gallows,\n botutils.make_alive_ping() + \" \" + botutils.make_dead_ping(),\n nominator.user.mention, \n nominated.user.mention,\n DEBATE_TIME\n )\n await botutils.send_lobby(intro_msg)\n\n import globvars\n\n # Debate time\n debate_timer.start()\n await asyncio.sleep(DEBATE_TIME)\n\n # Counts\n nb_total_players = len(game.sitting_order)\n nb_alive_players = len([player for player in game.sitting_order if player.is_apparently_alive()])\n nb_available_votes = len([player for player in game.sitting_order if player.has_vote()])\n nb_required_votes = math.ceil(nb_alive_players / 2)\n nb_current_votes = 0\n\n # The starting index is one after the nominated player\n find_nominated = lambda p: p.user.id == nominated.user.id\n nominated_idx = next(i for i, v in enumerate(game.sitting_order) if find_nominated(v))\n start_idx = nominated_idx + 1\n end_idx = start_idx + len(game.sitting_order)\n\n for i in range(start_idx, end_idx):\n\n idx = i % len(game.sitting_order)\n player = game.sitting_order[idx]\n \n if player.has_vote():\n\n link = ghost_vote_url if player.is_apparently_dead() else blank_token_url\n\n # Construct the message\n author_str = f\"{player.user.name}#{player.user.discriminator}, \"\n msg = call_for_vote.format(nominated.game_nametag)\n msg += \"\\n\\n\"\n\n # General vote stats\n # 10 players total. 10 players alive. 10 available voters.\n msg += votes_stats.format(\n total = nb_total_players,\n emoji_total = botutils.BotEmoji.people,\n alive = nb_alive_players,\n emoji_alive = botutils.BotEmoji.alive,\n votes = nb_available_votes,\n emoji_votes = botutils.BotEmoji.votes\n )\n msg += \"\\n\"\n\n # Goal vote stats\n # 【 5 :approved: votes to execute. 】 or 【 5 :approved: votes to tie. 】\n # Someone is already on the chopping block.\n if game.chopping_block:\n msg += votes_to_tie.format(\n votes = game.chopping_block.nb_votes,\n emoji = botutils.BotEmoji.approved\n )\n\n # No one is on the chopping block yet\n else:\n msg += votes_to_exe.format(\n votes = nb_required_votes,\n emoji = botutils.BotEmoji.approved\n )\n \n msg += \"\\n\"\n\n # Current vote stats\n # 【 0 :approved: votes currently. 】\n msg += votes_current.format(\n votes = nb_current_votes,\n emoji = botutils.BotEmoji.approved\n )\n\n # Create the embed and associated assets\n embed = discord.Embed(description = msg)\n embed.set_author(name = author_str, icon_url=player.user.avatar_url)\n embed.set_thumbnail(url = link)\n\n # Send the message and add reactions\n message = await botutils.send_lobby(message = player.user.mention, embed = embed)\n await message.add_reaction(botutils.BotEmoji.approved)\n await message.add_reaction(botutils.BotEmoji.denied)\n\n def check(reaction, user):\n \"\"\"Reaction must meet these criteria:\n - Must be from the user in question\n - Must be one of the two voting emojis\n - Must be on the same voting call message\n \"\"\"\n return user.id == player.user.id and \\\n str(reaction.emoji) in (botutils.BotEmoji.approved, botutils.BotEmoji.denied) and \\\n reaction.message.id == message.id\n \n try:\n reaction, user = await globvars.client.wait_for('reaction_add', timeout=VOTE_TIMEOUT, check=check)\n assert user.id == player.user.id, f\"{user} reacted instead\"\n \n # The player did not vote. It counts as a \"No\" (hand down)\n except asyncio.TimeoutError:\n author_str = f\"{player.user.name}#{player.user.discriminator}, \"\n msg = voted_no.format(\n botutils.BotEmoji.denied,\n nominated.game_nametag\n )\n new_embed = discord.Embed(\n description = msg,\n color = CARD_NO_LYNCH\n )\n new_embed.set_author(name = author_str, icon_url=player.user.avatar_url)\n if player.is_apparently_alive():\n new_embed.set_thumbnail(url = alive_no_lynch)\n else:\n new_embed.set_thumbnail(url = dead_no_lynch)\n await message.edit(embed = new_embed, delete_after = DELETE_VOTE_AFTER)\n await message.clear_reactions()\n continue\n\n # The player has voted\n else:\n\n # Hand up (lynch)\n if str(reaction.emoji) == botutils.BotEmoji.approved:\n author_str = f\"{player.user.name}#{player.user.discriminator}, \"\n msg = voted_yes.format(\n botutils.BotEmoji.approved,\n nominated.game_nametag\n )\n nb_current_votes += 1\n player.spend_vote()\n new_embed = discord.Embed(\n description = msg,\n color = CARD_LYNCH\n )\n new_embed.set_author(name = author_str, icon_url=player.user.avatar_url)\n if player.is_apparently_alive():\n new_embed.set_thumbnail(url = alive_lynch)\n else:\n new_embed.set_thumbnail(url = dead_lynch)\n \n # Hand down (no lynch)\n elif str(reaction.emoji) == botutils.BotEmoji.denied:\n author_str = f\"{player.user.name}#{player.user.discriminator}, \"\n msg = voted_no.format(\n botutils.BotEmoji.denied,\n nominated.game_nametag\n )\n new_embed = discord.Embed(\n description = msg,\n color = CARD_NO_LYNCH\n )\n new_embed.set_author(name = author_str, icon_url=player.user.avatar_url)\n if player.is_apparently_alive():\n new_embed.set_thumbnail(url = alive_no_lynch)\n else:\n new_embed.set_thumbnail(url = dead_no_lynch)\n \n await message.edit(embed = new_embed, delete_after = DELETE_VOTE_AFTER)\n await message.clear_reactions()\n \n # ----- The summmary embed message -----\n\n msg = nomination_short.format(\n nominated.game_nametag,\n nominator.game_nametag\n )\n msg += \"\\n\"\n\n # General vote stats\n msg += votes_stats.format(\n total = nb_total_players,\n emoji_total = botutils.BotEmoji.people,\n alive = nb_alive_players,\n emoji_alive = botutils.BotEmoji.alive,\n votes = nb_available_votes,\n emoji_votes = botutils.BotEmoji.votes\n )\n msg += \"\\n\"\n\n # Goal vote stats\n if game.chopping_block:\n msg += votes_to_tie.format(\n votes = game.chopping_block.nb_votes,\n emoji = botutils.BotEmoji.approved\n )\n else:\n msg += votes_to_exe.format(\n votes = nb_required_votes,\n emoji = botutils.BotEmoji.approved\n )\n msg += \"\\n\"\n\n # Current vote stats\n msg += votes_current.format(\n votes = nb_current_votes,\n emoji = botutils.BotEmoji.approved\n )\n msg += \"\\n\"\n msg += \"\\n\"\n\n # The vote count has reached execution threshold. \n if nb_current_votes >= nb_required_votes:\n # Someone is on the chopping block\n if game.chopping_block:\n # Tie: no one is lynched\n if nb_current_votes == game.chopping_block.nb_votes:\n globvars.master_state.game.chopping_block = ChoppingBlock(None, nb_current_votes)\n msg += verdict_safe.format(nominated.game_nametag)\n thumbnail_url = denied_seal\n # This player will replace the person on the chopping block.\n elif nb_current_votes > game.chopping_block.nb_votes:\n globvars.master_state.game.chopping_block = ChoppingBlock(nominated, nb_current_votes)\n msg += verdict_chopping.format(nominated.game_nametag)\n thumbnail_url = approved_seal\n # The player on the chopping block remains there.\n else:\n msg += verdict_safe.format(nominated.game_nametag)\n thumbnail_url = denied_seal\n # No one is on the chopping block currently. \n # The player is now on the chopping block awaiting death.\n else:\n globvars.master_state.game.chopping_block = ChoppingBlock(nominated, nb_current_votes)\n msg += verdict_chopping.format(nominated.game_nametag)\n thumbnail_url = approved_seal\n\n # The execution did not pass. The player is safe.\n else:\n msg += verdict_safe.format(nominated.game_nametag)\n thumbnail_url = denied_seal\n \n summary_embed = discord.Embed(description = msg)\n summary_embed.set_author(\n name = vote_summary\n )\n summary_embed.set_thumbnail(url = thumbnail_url)\n summary_embed.set_footer(text = copyrights_str)\n summary_embed.timestamp = datetime.datetime.utcnow()\n await botutils.send_lobby(message = None, embed = summary_embed)\n\n\nasync def night_loop(game):\n \"\"\"Night loop\n ----- Night : \n 30 seconds min\n 90 seconds max\n At intervals of 15 seconds when all actions are submitted (45, 60, 75)\n \"\"\"\n # Transition to night fall\n await game.make_nightfall()\n # Start night\n if not game._chrono.is_night_1():\n # Night 1 is alraedy handled by the opening dm\n await before_night(game)\n # Base night length\n await asyncio.sleep(BASE_NIGHT)\n # Increment night by small blocks of time if not all players have finished actions\n for _ in range(NIGHT_MULTIPLER):\n if game.has_received_all_expected_night_actions():\n break\n await asyncio.sleep(INCREMENT)\n # End night 1\n if game._chrono.is_night_1():\n await after_night_1(game)\n # End a regular night\n else:\n await after_night(game)\n\n\nasync def dawn_loop(game):\n \"\"\"Dawn loop\n ----- Dawn : \n 15 seconds min\n 30 seconds max\n At intervals of 15 seconds (15, 30)\n \"\"\"\n # Start dawn\n await game.make_dawn()\n # Query for dawn actions\n for player in game.sitting_order:\n await player.role.ego_self.send_regular_dawn_start_dm(player)\n # Base dawn length\n await asyncio.sleep(BASE_DAWN)\n # Increment (dawn)\n for _ in range(DAWN_MULTIPLIER):\n if game.has_received_all_expected_dawn_actions():\n break\n await asyncio.sleep(INCREMENT)\n await after_dawn(game)\n\n\ndef calculate_base_day_duration(game):\n \"\"\"Calculate the base day length.\"\"\"\n base_day_length = math.sqrt(2 * game.nb_alive_players)\n base_day_length = math.ceil(base_day_length) + 1\n base_day_length = base_day_length * 60\n return base_day_length\n\n\n@tasks.loop(count = 1)\nasync def base_day_loop(duration):\n \"\"\"The base day length during which it's not possible to nominate\"\"\"\n await asyncio.sleep(duration)\n\n\n@tasks.loop(seconds = DEBATE_TIME, count=2)\nasync def debate_timer():\n \"\"\"Debate phase timer, for the time command\"\"\"\n pass\n\n\nasync def day_loop(game):\n \"\"\"Day loop\"\"\"\n\n import botc.switches\n import globvars\n\n # Start day\n await game.make_daybreak()\n # Base day length\n base_day_length = calculate_base_day_duration(game)\n base_day_loop.start(base_day_length)\n\n for _ in range(base_day_length):\n # The master switch has been turned on. Proceed to the next phase.\n if botc.switches.master_proceed_to_night:\n base_day_loop.cancel()\n return\n # The master switch has been turned on. Go to nominations.\n if botc.switches.master_proceed_to_nomination:\n base_day_loop.cancel()\n break\n await asyncio.sleep(1)\n\n # Nominations are open\n msg = botutils.BotEmoji.clocktower + \" \" + nominations_open.format(PREFIX)\n await botutils.send_lobby(msg)\n\n timers = [90, 60, 45, 30, 20, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]\n\n for timer in timers:\n\n globvars.master_state.game.nomination_iteration_date = (\n datetime.datetime.now(), \n timer\n )\n\n msg = botutils.BotEmoji.clocktower + \" \" + nomination_countdown.format(timer)\n await botutils.send_lobby(msg)\n\n countdown = timer\n count = 0\n\n while not nomination_loop.is_running():\n \n # The master switch has been turned on. Proceed to the next phase.\n if botc.switches.master_proceed_to_night:\n return\n\n count += 1\n await asyncio.sleep(1)\n\n # Give a time remaining reminder\n remaining_time = countdown - count\n if remaining_time == 10:\n msg = botutils.BotEmoji.hourglass + \" \" + day_over_soon\n await botutils.send_lobby(msg)\n\n # Time has run out\n if count >= countdown:\n if game.chopping_block:\n player_about_to_die = game.chopping_block.player_about_to_die\n if player_about_to_die:\n await player_about_to_die.role.true_self.on_being_executed(player_about_to_die)\n msg = botutils.BotEmoji.guillotine + \" \" + execution.format(\n game.chopping_block.player_about_to_die.game_nametag, \n game.chopping_block.nb_votes\n )\n else:\n msg = botutils.BotEmoji.clocktower + \" \" + no_execution\n await botutils.send_lobby(msg)\n else:\n msg = botutils.BotEmoji.clocktower + \" \" + no_execution\n await botutils.send_lobby(msg)\n return\n\n while nomination_loop.is_running():\n\n # The master switch has been turned on. Proceed to the next phase.\n if botc.switches.master_proceed_to_night:\n return\n\n await asyncio.sleep(1)\n\n\nasync def before_night(game):\n \"\"\"Run before a regular (not the first) night starts. Distribute regular night dm.\"\"\"\n for player in game.sitting_order:\n await player.role.ego_self.send_regular_night_start_dm(player.user)\n\n\nasync def after_night_1(game):\n \"\"\"Run after night 1 ends. Handle the night 1 end.\"\"\"\n # Send n1 end messages\n await game.compute_night_ability_interactions()\n for player in game.sitting_order:\n await player.role.ego_self.send_n1_end_message(player.user)\n\n\nasync def after_night(game):\n \"\"\"Run after a regular (not the first) night ends. Handle the regular night end.\"\"\"\n await game.compute_night_ability_interactions()\n for player in game.sitting_order:\n await player.role.ego_self.send_regular_night_end_dm(player.user)\n \n\nasync def after_dawn(game):\n \"\"\"Run after the dawn phase. Handle the dawn phase end.\"\"\"\n await game.compute_dawn_ability_interactions()\n\n\n@tasks.loop(count = 1)\nasync def master_game_loop(game_obj):\n \"\"\"Master game loop\n\n Cycling works like this:\n Night start\n Night end\n Dawn start\n Dawn end\n Day start\n Day end\n etc.\n\n ----- Night : \n 30 seconds min\n 90 seconds max\n At intervals of 15 seconds when all actions are submitted (45, 60, 75)\n\n ----- Dawn : \n 15 seconds min\n 30 seconds max\n At intervals of 15 seconds (15, 30)\n\n ----- Day: \n 2 * sqrt(total_players) minutes until nomination\n Time until each nomination: 30, 20, 15, and 10 for all subsequent nominations.\n\n ----- Nomination:\n 30 seconds for accusations & defence\n 7 seconds for each vote (fastforwording)\n \"\"\"\n global botc_game_obj\n botc_game_obj = game_obj\n while True:\n # Night\n await night_loop(game_obj)\n # Wear off status\n for player in game_obj.sitting_order:\n for status in player.status_effects:\n status.wear_off()\n # Dawn\n await dawn_loop(game_obj)\n # Wear off status\n for player in game_obj.sitting_order:\n for status in player.status_effects:\n status.wear_off()\n # Day\n await day_loop(game_obj)\n # Check the win con after day\n for player in game_obj.sitting_order:\n player.role.true_self.check_wincon_after_day(player)\n # Wear off status\n for player in game_obj.sitting_order:\n for status in player.status_effects:\n status.wear_off()\n \n\n@master_game_loop.after_loop\nasync def after_master_game_loop():\n global botc_game_obj\n await botc_game_obj.end_game()\n\n\n@master_game_loop.error\nasync def master_loop_error(error):\n \"\"\"Handler of exceptions in master game loop\"\"\"\n\n try:\n raise error\n except Exception:\n await botutils.send_lobby(error_str)\n await botutils.log(botutils.Level.error, traceback.format_exc())\n finally:\n master_game_loop.cancel()\n","repo_name":"Xinverse/Blood-on-the-Clocktower-Storyteller-Discord-Bot","sub_path":"botc/gameloops.py","file_name":"gameloops.py","file_ext":"py","file_size_in_byte":20941,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"} +{"seq_id":"42784214434","text":"from tkinter import *\nimport random\nfrom PIL import Image as img\n\nfrom tkinter import filedialog\n\ndef EPressed(): #function\n #main window\n button1 = Button(root, text = 'Input file', command = Input)\n button1.pack(pady=20, padx = 20)\n\ndef DPressed(): #function\n #main window\n button1 = Button(root, text = 'Encrypted file', command = Output)\n button1.pack(pady=20, padx = 20)\n\n\ndef Input():\n\n #function\n\n list=[]\n list_bcd=[]\n lib_ch=[]\n lib_final=[]\n count=0\n lib_final2=''\n lib_final_list=[]\n rand_list=['00','01','10','11']\n root.filename = filedialog.askopenfilename(initialdir = \"C:/Users/SUMANTH/Desktop/\",title = \"Select file\")\n #print(root.filename)\n file_obj = open(root.filename,\"r\")\n z = file_obj.read()\n s = [ord(c) for c in z]\n #print(s)\n\n \n #s=input(\"Enter the input:\")\n def ascii_conv(s):\n for c in s:\n #integer=ord(c)\n integerstr=str(c).zfill(3)\n #print(integerstr)\n list.extend(integerstr)\n #print(list)\n return list\n ascii_conv(s)\n #print(list)\n #print('over')\n \n \n for li in list:\n if li=='0':\n list_bcd.append('0000')\n elif li=='1':\n list_bcd.append('0001')\n elif li=='2':\n list_bcd.append('0010')\n elif li=='3':\n list_bcd.append('0011')\n elif li=='4':\n list_bcd.append('0100')\n elif li=='5':\n list_bcd.append('0101')\n elif li=='6':\n list_bcd.append('0110')\n elif li=='7':\n list_bcd.append('0111')\n elif li=='8':\n list_bcd.append('1000')\n elif li=='9':\n list_bcd.append('1001')\n\n\n\n \n \n\n \n\n \n def add_bits(list_bcd): \n for lib in list_bcd:\n lib=random.choice(rand_list)+lib\n lib_ch.append(lib)\n \n\n \n \n\n ascii_conv(s)\n add_bits(list_bcd)\n \n #print(lib_ch) \n \n\n \n\n length=int((len(lib_ch))/3)\n #print(length)\n\n while(count<=length-1):\n \n\n lib_final.append(lib_ch[:3])\n #print(lib_final)\n #print(\"\\n\")\n lib_ch=lib_ch[3:]\n #print(lib_ch)\n count=count+1\n \n\n \n\n #print(lib_final)\n #print(lib_ch)\n\n \n\n for i in range(0,length):\n for j in range(0,3):\n lib_final2=lib_final2+lib_final[i][j]\n #print(lib_final2)\n lib_final_list.append(lib_final2)\n lib_final2=''\n #print(lib_final_list)\n\n\n s=[]\n final_list=[]\n\n for i in lib_final_list:\n while i:\n s.append(i[:2])\n i=i[2:]\n final_list.append(s)\n s=[]\n #print(final_list)\n\n\n #main window\n button1 = Button(root, text = 'Pattern to encrypt', command = lambda: Pattern_to_encrypt(final_list))\n button1.pack(pady=20, padx = 20)\n\n \n\n\n \n\ndef Pattern_to_encrypt(final_list):\n \n root.filename = filedialog.askopenfilename(initialdir = \"C:/Users/SUMANTH/Desktop/\",title = \"Select file\")\n #print(root.filename)\n im = img.open(root.filename)\n width, height = im.size\n img_string = im.tobytes()\n with open('outfile.txt', 'w+') as fout:\n #print(fout)\n for char in img_string:\n #print(char,type(char))\n fout.write( format(char,'02x')) \n\n \n inputFile = open('outfile.txt', 'r').read() #Open test.txt file in read mode\n #print inputFile\n count_of_0=0 \n count_of_1=0 \n count_of_2=0 \n count_of_3=0 \n count_of_4=0 \n count_of_5=0\n count_of_6=0 \n count_of_7=0 \n count_of_8=0 \n count_of_9=0 \n count_of_a=0 \n count_of_b=0 \n count_of_c=0 \n count_of_d=0 \n count_of_e=0 \n count_of_f=0 \n for i in inputFile:\n if(i=='0'):\n count_of_0=count_of_0+1\t\n elif(i=='1'):\n count_of_1=count_of_1+1\n elif(i=='2'):\n count_of_2=count_of_2+1\n elif(i=='3'):\n count_of_3=count_of_3+1\n elif(i=='4'):\n count_of_4=count_of_4+1\n elif(i=='5'):\n count_of_5=count_of_5+1\n elif(i=='6'):\n count_of_6=count_of_6+1\n elif(i=='7'):\n count_of_7=count_of_7+1\n elif(i=='8'):\n count_of_8=count_of_8+1\n elif(i=='9'):\n count_of_9=count_of_9+1\n elif(i=='a'):\n count_of_a=count_of_a+1\n elif(i=='b'):\n count_of_b=count_of_b+1\n elif(i=='c'):\n count_of_c=count_of_c+1\n elif(i=='d'):\n count_of_d=count_of_d+1\n elif(i=='e'):\n count_of_e=count_of_e+1\n elif(i=='f'):\n count_of_f=count_of_f+1\n \n \n '''print(\"Number of '0' are:\",count_of_0)\t\t\n print(\"Number of '1' are:\",count_of_1)\t\t\n print(\"Number of '2' are:\",count_of_2)\t\n print(\"Number of '3' are:\",count_of_3)\t\n print(\"Number of '4' are:\",count_of_4)\n print(\"Number of '5' are:\",count_of_5)\n print(\"Number of '6' are:\",count_of_6)\n print(\"Number of '7' are:\",count_of_7)\n print(\"Number of '8' are:\",count_of_8)\n print(\"Number of '9' are:\",count_of_9)\n print(\"Number of 'a' are:\",count_of_a)\n print(\"Number of 'b' are:\",count_of_b)\n print(\"Number of 'c' are:\",count_of_c)\n print(\"Number of 'd' are:\",count_of_d)\n print(\"Number of 'e' are:\",count_of_e)\n print(\"Number of 'f' are:\",count_of_f)'''\n\n a=[count_of_0,count_of_1,count_of_2,count_of_3,count_of_4,count_of_5,count_of_6,count_of_7,count_of_8,count_of_9,count_of_a,count_of_b,count_of_c,count_of_d,count_of_e,count_of_f]\n index=[]\n #print(a)\n\n x=0\n for i in a:\n while i:\n x = i%10\n if x not in index:\n index.append(x)\n break\n else:\n i=int(i/10)\n #print(index)\n\n \n \n \n #print(list)\n #print(index)\n \n #index=[8, 1, 4, 7, 5, 2, 3, 6,9,0]\n index.remove(9)\n\n final_list2=[]\n main_list=[]\n length=len(final_list)\n #print(length)\n\n for i in range(0,length):\n new_list=[final_list[i][j] for j in index]\n final_list2=final_list2+new_list\n #print(final_list2)\n\n leng=len(final_list2) \n for i in range(0,leng):\n for j in range(0,2):\n main_list=main_list+[final_list2[i][j]]\n #print(main_list)\n\n leng=int(len(main_list))\n\n super_final=[]\n\n\n\n\n for i in range(0,leng,3):\n super_final=super_final+[main_list[i]+main_list[i+1]+main_list[i+2]]\n\n \n #print(super_final)\n code=''\n for i in range(0,len(super_final)):\n code=code+str((int(super_final[i],2)))\n\n #print(code)\n\n with open('code.txt', 'w+') as fout:\n for char in code:\n #print(char,type(char))\n fout.write(char)\n\n print(\"Encryption done\")\n\n root.destroy()\n \n\n\n \ndef Output():\n list_bcd=[]\n main_list=[]\n mainn_list=[]\n last_list=[]\n llist=[]\n data=''\n ori_data=''\n #input=input(\"Enter the value\")\n \n root.filename = filedialog.askopenfilename(initialdir = \"C:/Users/SUMANTH/Desktop/\",title = \"Select file\")\n #print(root.filename)\n input_file = open(root.filename,\"r\")\n #print(input)\n for i in input_file.read():\n if i=='0':\n list_bcd.append('000')\n elif i=='1':\n list_bcd.append('001')\n elif i=='2':\n list_bcd.append('010')\n elif i=='3':\n list_bcd.append('011')\n elif i=='4':\n list_bcd.append('100')\n elif i=='5':\n list_bcd.append('101')\n elif i=='6':\n list_bcd.append('110')\n elif i=='7':\n list_bcd.append('111')\n #print(list_bcd)\n\n leng=len(list_bcd)\n for i in range(0,leng):\n for j in range(0,3):\n main_list=main_list+[list_bcd[i][j]]\n #print(main_list)\n \n \n lengt=len(main_list) \n for i in range(0,lengt-1,2):\n temp=main_list[i]+main_list[i+1]\n last_list.append(temp)\n \n #print(last_list)\n \n #index=[8, 1, 4, 7, 5, 2, 3, 6,0]\n \n #print(length3)\n #print(index)\n \n \n\n \n #print(super_list)\n\n #main window\n button1 = Button(root, text = 'Pattern to decrypt', command = lambda: Pattern_to_decrypt(last_list,leng))\n button1.pack(pady=20, padx = 20)\n\n\ndef Pattern_to_decrypt(last_list,leng):\n\n length3=len(last_list)\n super_list=[0]*length3\n \n root.filename = filedialog.askopenfilename(initialdir = \"C:/Users/SUMANTH/Desktop/\",title = \"Select file\")\n #print(root.filename)\n #file_obj = open(root.filename,\"r\")\n #z = file_obj.read()\n im = img.open(root.filename)\n width, height = im.size\n img_string = im.tobytes()\n with open('outfile.txt', 'w+') as fout:\n #print(fout)\n for char in img_string:\n #print(char,type(char))\n fout.write( format(char,'02x')) \n\n \n inputFile = open('outfile.txt', 'r').read() #Open test.txt file in read mode\n #print inputFile\n count_of_0=0 \n count_of_1=0 \n count_of_2=0 \n count_of_3=0 \n count_of_4=0 \n count_of_5=0\n count_of_6=0 \n count_of_7=0 \n count_of_8=0 \n count_of_9=0 \n count_of_a=0 \n count_of_b=0 \n count_of_c=0 \n count_of_d=0 \n count_of_e=0 \n count_of_f=0 \n for i in inputFile:\n if(i=='0'):\n count_of_0=count_of_0+1\t\n elif(i=='1'):\n count_of_1=count_of_1+1\n elif(i=='2'):\n count_of_2=count_of_2+1\n elif(i=='3'):\n count_of_3=count_of_3+1\n elif(i=='4'):\n count_of_4=count_of_4+1\n elif(i=='5'):\n count_of_5=count_of_5+1\n elif(i=='6'):\n count_of_6=count_of_6+1\n elif(i=='7'):\n count_of_7=count_of_7+1\n elif(i=='8'):\n count_of_8=count_of_8+1\n elif(i=='9'):\n count_of_9=count_of_9+1\n elif(i=='a'):\n count_of_a=count_of_a+1\n elif(i=='b'):\n count_of_b=count_of_b+1\n elif(i=='c'):\n count_of_c=count_of_c+1\n elif(i=='d'):\n count_of_d=count_of_d+1\n elif(i=='e'):\n count_of_e=count_of_e+1\n elif(i=='f'):\n count_of_f=count_of_f+1\n \n \n '''print(\"Number of '0' are:\",count_of_0)\t\t\n print(\"Number of '1' are:\",count_of_1)\t\t\n print(\"Number of '2' are:\",count_of_2)\t\n print(\"Number of '3' are:\",count_of_3)\t\n print(\"Number of '4' are:\",count_of_4)\n print(\"Number of '5' are:\",count_of_5)\n print(\"Number of '6' are:\",count_of_6)\n print(\"Number of '7' are:\",count_of_7)\n print(\"Number of '8' are:\",count_of_8)\n print(\"Number of '9' are:\",count_of_9)\n print(\"Number of 'a' are:\",count_of_a)\n print(\"Number of 'b' are:\",count_of_b)\n print(\"Number of 'c' are:\",count_of_c)\n print(\"Number of 'd' are:\",count_of_d)\n print(\"Number of 'e' are:\",count_of_e)\n print(\"Number of 'f' are:\",count_of_f)'''\n\n a=[count_of_0,count_of_1,count_of_2,count_of_3,count_of_4,count_of_5,count_of_6,count_of_7,count_of_8,count_of_9,count_of_a,count_of_b,count_of_c,count_of_d,count_of_e,count_of_f]\n index=[]\n #print(a)\n\n x=0\n for i in a:\n while i:\n x = i%10\n if x not in index:\n index.append(x)\n break\n else:\n i=int(i/10)\n #print(index)\n\n \n \n \n #print(list)\n #print(index)\n \n #index=[8, 1, 4, 7, 5, 2, 3, 6,9,0]\n index.remove(9)\n\n\n def shuffle(last_list,super_list,index):\n \n k=0\n for i in index:\n \n super_list[i]=last_list[k]\n \n k=k+1\n\n return last_list,super_list,index\n\n times=int(length3/9)\n for i in range(0,times):\n shuffle(last_list,super_list,index)\n \n \n last_list=last_list[9:]\n index=[x +9 for x in index]\n \n\n \n\n '''print(\"FROMHERE\") \n print(last_list)\n print(super_list)\n print(index)'''\n\n length2=len(super_list)\n #print(length2)\n for i in range(0,length2):\n if i%3==0:\n super_list[i]='none'\n\n #print(super_list)\n\n super_list=[i for i in super_list if i not in('none')]\n #print(super_list)\n \n \n \n \n \n mainn_list=[]\n llist=[]\n data=''\n ori_data=''\n \n for i in range(0,leng):\n for j in range(0,2):\n mainn_list=mainn_list+[super_list[i][j]]\n #print(mainn_list)\n\n length4=len(mainn_list) \n for i in range(0,length4-1,4):\n temp=mainn_list[i]+mainn_list[i+1]+mainn_list[i+2]+mainn_list[i+3]\n llist.append(temp)\n\n #print(llist)\n\n for i in llist:\n if i=='0000':\n data=data+'0'\n elif i=='0001':\n data=data+'1'\n elif i=='0010':\n data=data+'2'\n elif i=='0011':\n data=data+'3'\n elif i=='0100':\n data=data+'4'\n elif i=='0101':\n data=data+'5'\n elif i=='0110':\n data=data+'6'\n elif i=='0111':\n data=data+'7'\n elif i=='1000':\n data=data+'8'\n elif i=='1001':\n data=data+'9'\n '''elif i=='1010':\n data=data+'a'\n elif i=='1011':\n data=data+'b'\n elif i=='1100':\n data=data+'c'\n elif i=='1101':\n data=data+'d'\n elif i=='1110':\n data=data+'e'\n elif i=='1111':\n data=data+'f'''\n\n #print(data)\n\n \n\n\n \n data_len=len(data)\n \n #print(data_len)\n\n for i in range(0,data_len-2,3):\n \n tempor=data[i]+data[i+1]+data[i+2]\n #print(tempor)\n ori_data=ori_data+chr(int(tempor))\n #print(ori_data)\n \n \n\n with open('original.txt', 'w+') as fout:\n #print(fout)\n for char in ori_data:\n #print(char,type(char))\n fout.write(char)\n\n print(\"Decryption done\")\n\n root.destroy()\n\n \nroot = Tk()\nroot.title('PROJECT RCMG')\nroot.geometry('1100x350+500+300')\nroot.configure(background='grey')\nbutton1 = Button(root, text = 'Encrypt', command = EPressed)#,width=100, bg ='yellow', fg='red', activebackground='violet', activeforeground='black')\nbutton1.pack(pady=20, padx = 20)\nbutton2 = Button(root, text = 'Decrypt', command = DPressed)\nbutton2.pack(pady=20, padx = 20)\n\n\nroot.mainloop()\n","repo_name":"mnsumanth/data-encrytion-without-encryption","sub_path":"RCGM/Project RCGM.py","file_name":"Project RCGM.py","file_ext":"py","file_size_in_byte":15480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72827159183","text":"from django.shortcuts import render\nfrom django.conf import settings\nfrom rest_framework.decorators import api_view\nfrom django.views.decorators.csrf import csrf_protect\nfrom elasticsearch_dsl.connections import connections\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search\nfrom elasticsearch_dsl import Q\nimport json\nfrom django import template\nfrom random import randint\nimport re\nimport itertools\nimport unicodedata\n\nregister = template.Library()\n\n# Create your views here.\n\nclient = Elasticsearch([settings.ELASTIC_HOST])\n\n@api_view(['GET'])\ndef index(request):\n\treturn render(request, 'index.html')\n\n\n@api_view(['POST'])\ndef get_search(request):\n\t#print(' request .. ', request.POST)\n\trestrictive = True\n\tcheck_keywords_in_description = True\n\tcheck_keywords_in_bio = True\n\tnot_current_position = False\n\tif '0' in request.POST.get('restrictive'):\n\t\trestrictive = False\n\n\t\n\tif not 'descCheckK' in request.POST:\n\t\tcheck_keywords_in_description = False\n\tif not 'bioCheckK' in request.POST:\n\t\tcheck_keywords_in_bio = False\n\n\tif 'notCurrentPosition' in request.POST:\n\t\tnot_current_position = True\n\n\n\trequest_obj = {\n\t'not_current_position': not_current_position,\n\t'check_keywords_in_description': check_keywords_in_description,\n\t'check_keywords_in_bio': check_keywords_in_bio,\n\t'year_recency': request.POST['yearRecency'],\n\t'restrictive': restrictive,\n\t'locations': request.POST.get('locationsInput'), \n\t'companies': request.POST.get('companiesInput'), \n\t'keywords': request.POST.get('keywordsInput'), \n\t'titles': request.POST.get('titlesInput') }\n\tresult = process_search_request(request_obj)\n\n\treturn render(request, 'show.html', result)\n\n\ndef process_search_request(request_obj):\n\t#print('REQUEST OBJJJ ... ', request_obj)\n\tcompanies = request_obj['companies'].replace('\"','')\n\ttitles = request_obj['titles'].replace('\"','')\n\tkeywords = request_obj['keywords'].replace('\"','')\t\n\t#locations = request_obj['locations'].replace('\"','')\n\tlocations = 'locations'\n\n\tif companies == '' and titles == '' and keywords == '':\n\t\treturn make_default_request();\n\telse:\n\t\trestrictive = True\n\t\tif request_obj['restrictive'] == True:\n\t\t\treturn make_restrictive_request(companies, titles, keywords, locations, request_obj)\n\t\telse:\n\t\t\treturn make_non_restrictive_request(companies, titles, keywords, locations, request_obj)\n\t\n\ndef make_default_request():\t\n\ts = Search(index=settings.ELASTIC_INDEX).using(client).query()\n\tprint(s.to_dict())\t\n\treturn execute_es_request(s)\n\ndef make_non_restrictive_request(companies, titles, keywords, locations, request_obj):\n\tmust_list = []\n\t# if (get_companies_query_string(companies) != None):\n\t# \tmust_list.append(get_companies_query_string(companies))\n\tif companies != '':\n\t\tcompanies = get_refined_titles(companies.lower())\n\t\tshould_list_companies = []\n\t\tfor company in companies:\n\t\t\tshould_list_companies.append(get_companies_match_phrase(company))\n\t\tmust_list.append(Q('bool', should=should_list_companies))\n\tif (get_titles_query_string(titles) != None):\n\t\tmust_list.append(get_titles_query_string(titles))\n\t# if (request_obj['check_keywords_in_description'] == True):\n\t# \tif (get_description_query_string(keywords) != None):\n\t# \t\tmust_list.append(get_description_query_string(keywords))\n\t\n\t# if (request_obj['check_keywords_in_bio'] == True):\n\t# \tif (get_bio_query_string(keywords) != None):\n\t# \t\tmust_list.append(get_bio_query_string(keywords))\n\n\t# if (get_locations_query_string(locations) != None):\n\t# \tmust_list.append(get_locations_query_string(locations))\n\n\tif (request_obj['check_keywords_in_description'] == True) and (request_obj['check_keywords_in_bio'] == True):\n\t\tif (get_description_bio_query_string(keywords) != None):\n\t\t\tmust_list.append(get_description_bio_query_string(keywords))\n\telif (request_obj['check_keywords_in_description'] == True) and (request_obj['check_keywords_in_bio'] == False):\n\t\tif (get_description_query_string(keywords) != None):\n\t\t\tmust_list.append(get_description_query_string(keywords))\n\telif (request_obj['check_keywords_in_description'] == False) and (request_obj['check_keywords_in_bio'] == True):\n\t\tif (get_bio_query_string(keywords) != None):\n\t\t\tmust_list.append(get_bio_query_string(keywords))\n\n\tif (request_obj['year_recency'] != ''):\n\t\tmust_list.append(Q('range', positions__enddateyear={'gte': int(request_obj['year_recency'])}))\n\n\tif (request_obj['not_current_position'] == True):\t\t\n\t\tmust_list.append(Q('bool', must_not=Q('match', positions__position_type='Current')))\n\n\n\tq = Q('bool',\n\t\tmust=must_list)\n\t\n\ts = Search(index=settings.ELASTIC_INDEX).using(client).query('nested', path=\"positions\", query=q, inner_hits={\"highlight\": {\n\t\t \"pre_tags\" : [\"\"],\n\t\t \"post_tags\" : [\"\"],\n \"fields\": {\n \"positions.companyname\": {},\n \"positions.description\": {},\n \"positions.title\": {}\n }\n }})[0:100]\t\n\n\tprint(s.to_dict())\t\n\treturn execute_es_request(s)\n\n\ndef make_restrictive_request(companies, titles, keywords, locations, request_obj):\n\tmust_list = []\n\n\tif companies != '':\n\t\tcompanies = get_refined_titles(companies.lower())\n\t\tshould_list_companies = []\n\t\tfor company in companies:\n\t\t\tshould_list_companies.append(get_companies_match_phrase(company))\n\t\tmust_list.append(Q('bool', should=should_list_companies))\t\t\n\n\tif titles != '':\n\t\ttitles = get_refined_titles(titles.lower())\n\t\tshould_list_titles = []\n\t\tfor title in titles:\t\t\t\n\t\t\tshould_list_titles.append(get_titles_match_phrase(title))\n\t\tmust_list.append(Q('bool', should=should_list_titles))\n\n\tif (request_obj['check_keywords_in_description'] == True) and (request_obj['check_keywords_in_bio'] == True):\n\t\tif keywords != '':\n\t\t\tkeywords_description_bio = get_refined_titles(keywords.lower())\n\t\t\tshould_list_description_bio = []\n\t\t\tfor keyword in keywords_description_bio:\n\t\t\t\tshould_list_description_bio.append(get_description_match_phrase(keyword))\n\t\t\t\tshould_list_description_bio.append(get_bio_match_phrase(keyword))\n\t\t\tmust_list.append(Q('bool', should=should_list_description_bio))\n\telif (request_obj['check_keywords_in_description'] == True) and (request_obj['check_keywords_in_bio'] == False):\n\t\tif keywords != '':\n\t\t\tkeywords_description = get_refined_titles(keywords.lower())\t\n\t\t\tshould_list_description = []\n\t\t\tfor keyword in keywords_description:\n\t\t\t\tshould_list_description.append(get_description_match_phrase(keyword))\n\t\t\tmust_list.append(Q('bool', should=should_list_description))\n\telif (request_obj['check_keywords_in_description'] == False) and (request_obj['check_keywords_in_bio'] == True):\n\t\tif keywords != '':\n\t\t\tkeywords_bio = get_refined_titles(keywords.lower())\n\t\t\tshould_list_bio = []\n\t\t\tfor keyword in keywords_bio:\n\t\t\t\tshould_list_bio.append(get_bio_match_phrase(keyword))\n\t\t\tmust_list.append(Q('bool', should=should_list_bio))\n\n\t# if (request_obj['check_keywords_in_description'] == True):\n\t# \tif keywords != '':\n\t# \t\tkeywords_description = get_refined_titles(keywords.lower())\t\n\t# \t\tshould_list_description = []\n\t# \t\tfor keyword in keywords_description:\n\t# \t\t\tshould_list_description.append(get_description_match_phrase(keyword))\n\t# \t\tmust_list.append(Q('bool', should=should_list_description))\n\n\t# if (request_obj['check_keywords_in_bio'] == True):\n\t# \tif keywords != '':\n\t# \t\tkeywords_bio = get_refined_titles(keywords.lower())\n\t# \t\tshould_list_bio = []\n\t# \t\tfor keyword in keywords_bio:\n\t# \t\t\tshould_list_bio.append(get_bio_match_phrase(keyword))\n\t# \t\tmust_list.append(Q('bool', should=should_list_bio))\n\n\tif (request_obj['year_recency'] != ''):\n\t\tmust_list.append(Q('range', positions__enddateyear={'gte': int(request_obj['year_recency'])}))\n\n\tif (request_obj['not_current_position'] == True):\t\t\n\t\tmust_list.append(Q('bool', must_not=Q('match', positions__position_type='Current')))\n\n\t#print(' must list .. ', must_list)\n\t\n\tq = Q('bool',\n\t\tmust=must_list)\n\n\ts = Search(index=settings.ELASTIC_INDEX).using(client).query('nested', path=\"positions\", query=q, inner_hits={\"highlight\": {\n\t \"pre_tags\" : [\"\"],\n\t \"post_tags\" : [\"\"],\n \"fields\": {\n \"positions.companyname\": {},\n \"positions.description\": {},\n \"positions.title\": {}\n }\n }})[0:100]\n\n\tprint(s.to_dict())\n\treturn execute_es_request(s)\n\n\ndef execute_es_request(es_request):\n\tsearch_result = es_request.execute()\n\tres = json.loads(json.dumps(search_result.to_dict()).replace('_source','source').\n\t\treplace('positions.description','description').\n\t\treplace('positions.companyname','companyname').\n\t\treplace('positions.title','title').\n\t\treplace('positions.startdatemonth','startdatemonth').\n\t\treplace('positions.position_type','position_type').\n\t\treplace('positions.startdateyear','startdateyear').\n\t\treplace('positions.companyurl','companyurl').\n\t\treplace('positions.enddatemonth','enddatemonth').\n\t\treplace('positions.enddateyear','enddateyear'))\n\t\n\texperts_list = []\n\tfor hit in res['hits']['hits']:\n\t\t# print(\" --------- -------- --------- -------- ---------\")\n\t\t# print(\"HIT ......... \", hit['source']['fullname'] + ' .... ' + hit['_id'])\n\t\t# print(\" --------- -------- --------- -------- ---------\")\n\t\tmatches = []\n\t\tif 'inner_hits' in hit:\n\t\t\tfor match in hit['inner_hits']['positions']['hits']['hits']:\t\t\t\n\t\t\t\tif 'highlight' in match:\n\t\t\t\t\tmatches.append({\n\t\t\t\t\t'id_random': str(randint(0, 999999999)),\n\t\t\t\t\t'title': ' '.join(match['highlight']['title'] if 'title' in match['highlight'] else ''),\n\t\t\t\t\t'companyname': '. '.join(match['highlight']['companyname'] if 'companyname' in match['highlight'] else 'N/A'),\n\t\t\t\t\t'description': '. '.join(match['highlight']['description']) if 'description' in match['highlight'] else 'No matching description'},\n\t\t\t\t\t)\n\t\texperts_list.append({ 'source': hit['source'], 'matches': matches })\n\treturn {'count': search_result.hits.total, 'experts': experts_list}\t\n\t\t\n\n\ndef get_bio_match_phrase(keyword):\n\tif keyword != '':\n\t\treturn Q('match_phrase', positions__short_bio=keyword)\n\telse:\n\t\treturn\n\ndef get_companies_match_phrase(company):\n\tif company != '':\n\t\treturn Q('match_phrase', positions__companyname=company)\n\telse:\n\t\treturn\n\ndef get_titles_match_phrase(title):\n\tif title != '':\n\t\treturn Q('match_phrase', positions__title=title)\n\telse:\n\t\treturn\n\ndef get_description_match_phrase(keyword):\n\tif keyword != '':\n\t\treturn Q('match_phrase', positions__description=keyword)\n\telse:\n\t\treturn\n\ndef get_bio_query_string(keywords):\n\tif keywords != '':\n\t\treturn Q('query_string', default_field='short_bio', query=keywords)\n\telse:\n\t\treturn\n\ndef get_companies_query_string(companies):\n\tif companies != '':\n\t\treturn Q('query_string', default_field='positions.companyname', query=companies)\n\telse:\n\t\treturn\n\ndef get_titles_query_string(titles):\n\tif titles != '':\n\t\treturn Q('query_string', default_field='positions.title', query=titles)\n\telse:\n\t\treturn\t\n\ndef get_description_query_string(keywords):\n\tif keywords != '':\n\t\treturn Q('query_string', default_field='positions.description', query=keywords)\n\telse:\n\t\treturn\n\ndef get_description_bio_query_string(keywords):\n\tif keywords != '':\n\t\treturn Q('query_string', fields=['positions.description', 'short_bio'], query=keywords)\n\telse:\n\t\treturn\t\n\ndef get_locations_query_string(locations):\n\tif locations != '':\n\t\treturn Q('query_string', default_field='positions.lip_exact_entry', query=locations)\n\telse:\n\t\treturn\n\ndef get_refined_titles(title):\t\n\ttitle = re.sub('[^[\\W_]-/.&\\' ]+', '', title)\n\ttitle = title.replace('(', '').replace(')', '')\n\ttitle = strip_accents(title)\n\ttitles = tsplit(title, (' or ', ' and '))\t\n\treturn titles\n\ndef strip_accents(value):\n return ''.join(char for char in\n unicodedata.normalize('NFKD', value)\n if unicodedata.category(char) != 'Mn') \n\ndef tsplit(string, delimiters):\n \"\"\"Behaves str.split but supports multiple delimiters.\"\"\"\n delimiters = tuple(delimiters)\n stack = [string, ]\n\n for delimiter in delimiters:\n for i, substring in enumerate(stack):\n substack = substring.split(delimiter)\n stack.pop(i)\n for j, _substring in enumerate(substack):\n stack.insert(i + j, _substring)\n\n return stack \n","repo_name":"kartikn27/elastic_poc","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21713347592","text":"#YOU HAVE TO USE COMMAND PROMPT TO VERIFY CODE!!!!!\n#in the end are commands if YOU'D like to use it\n\n\n\nimport getopt\n#getopt: Библиотека для обработки аргументов командной строки\nimport sys\n#sys: Библиотека для взаимодействия с системой (например, для выхода из программы)\nfrom PIL import Image\n#PIL (Python Imaging Library): Библиотека для работы с изображениями\n\ndef usage():\n print('Usage: decode.py [-v] [FILE]')\n sys.exit(2)\n #Функция выводит сообщение о том, как использовать программу, затем завершает выполнение с кодом завершения 2\n\ndef main(argv): #-Получает аргументы командной строки (за исключением имени скрипта) и начинает их обработку\n if len(argv) == 0:\n usage() #-Если нет аргументов командной строки, вызывается функция usage()\n verbose = False\n input = ''\n for arg in argv:\n if arg == '-v':\n verbose = True\n else:\n input = arg #-Обрабатывает аргументы и определяет, нужно ли включить режим вывода подробной информации (-v)\n if len(input) == 0:\n usage()\n image = Image.open(input)\n width, height = image.size\n #Функции main(argv):\n #-Определяет имя входного файла из аргументов.\n #-Открывает изображение с помощью библиотеки PIL и получает его размеры.\n #-Затем начинает декодирование данных из изображения\n \n i = 0\n value = 0\n bits = ''\n text = ''\n for y in range(height):\n for x in range(width):\n pixel = image.getpixel((x, y))\n mean = (pixel[0] + pixel[1] + pixel[2]) / 3\n bit = 1 if mean >= 128 else 0\n bits += str(bit)\n value = value | (bit << (7 - i))\n i += 1\n if i >= 8:\n text += chr(value)\n value = 0\n i = 0\n bits += '\\n' #Создает строку битов (bits) и значения (value) для каждых 8 битов.\n if verbose:\n print('Input image: {}x{}'.format(width, height))\n print('Bits:')\n print(bits)\n print('Result string:')\n print(text) #Когда набрано 8 битов, конвертирует значение в символ ASCII и добавляет его к текстовой строке (text)\n\nif __name__ == '__main__':\n main(sys.argv[1:]) #После обработки всех пикселей выводит полученный текст.\n\n#ФУНКЦИЯ КОДА!!!!!:\n#-Программа проходит через каждый пиксель изображения, вычисляет среднюю яркость (по RGB компонентам) и на основе этой яркости определяет бит (0 или 1)(0-чёрный,1-белый)\n#-Таким образом, данный код берет изображение, извлекает биты из яркости каждого пикселя, собирает их в байты и декодирует в текстовую строку\n#-Код внутри этой секции будет выполнен только в том случае, если скрипт запущен напрямую, а не импортирован как модуль\n# W A R N I N G\n#Этот код сделан в моих личных целях для декода файла игры. Только ради интереса\n#На моём устройстве код рабочий\n#ДАННЫЙ КОД НЕ УЧАСТВУЕТ В НАУКЕ ЭТО ТОЛЬКО ТРЕНИРОВКА\n#Я провела более месяца на разгадку этого файла...\n\n#Ten kod zrobiłam dla moich osobistych celów dla odszyfrowania pliku gry. Tylko dla \"zabawy\"\n#Na moim urządzeniu kod działa\n#KOD POWYŻEJ NIE UCZĘSZCZA W NAUCE TO JEST TYLKO TRENING\n#Straciłam ponad miesiąc na rozwikłanie tego pliku...\n\n# !!! C O M M A N D S !!! \n\n#(nazwa pliku w którym jest \"py\"(ma wyświetlać się automatycznie)) nano ../Scripts/decode.py\n#(nazwa pliku w którym jest \"py\"(ma wyświetlać się automatycznie)) chmode +x ../Scripts/decode.py\n#(nazwa pliku w którym jest \"py\"(ma wyświetlać się automatycznie)) ../Scripts/decode.py\n#(nazwa pliku w którym jest \"py\"(ma wyświetlać się automatycznie)) ../Scripts/decode.py -v (nazwa \"png\").png\n\n#~~ Wyświetlane są rozmaite znaki ~~\n\n#(nazwa pliku w którym jest \"py\"(ma wyświetlać się automatycznie)) ../Scripts/decode.py -v (nazwa \"png\").png | less\n\n#~~ Wyświetlane są cyfry 0 1 ~~\n\n#(nazwa pliku w którym jest \"py\"(ma wyświetlać się automatycznie)) ../Scripts/decode.py (nazwa \"png\").png | base64 -d | less\n\n#~~ Wyświetlany jest rezultat dekodu ~~\n\n# K O N I E C\n\n\n","repo_name":"MCDream3/Can_be_usefull","sub_path":"decode0101.py","file_name":"decode0101.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37895975959","text":"# coding=utf-8\n\"\"\"\n题目描述\n\n注意:字典中可能有重复单词\n输入描述:\n先输入字典中单词的个数,再输入n个单词作为字典单词。\n输入一个单词,查找其在字典中兄弟单词的个数\n再输入数字n\n\n输出描述:\n根据输入,输出查找到的兄弟单词的个数\n\n示例1\n输入\n复制\n3 abc bca cab abc 1\n输出\n复制\n2\nbca\n\"\"\"\n\n# 尼玛,出题的语文水平简直服了。一道破题,坑比要求还多。\n# 1.输出不是一行,而是两行,分别是数量及对应的单词。\n# 2.要判断一下,如果兄弟单词列表为空或者输入的数字大于列表的长度,都不进行输出。。。\n# 3.强行把简单题目难度上升到困难,你们开心就好。\n# 方法一\nfrom collections import defaultdict\n\nwhile True:\n try:\n dd = defaultdict(list)\n a = input().split()\n # words是输入的单词,lookup是要查找的单词,num是要查找兄弟单词的索引,brothers是找到的兄弟单词列表\n words, lookup, num, brothers = a[1:1 + int(a[0])], a[-2], int(a[-1]), []\n for i in words:\n dd[\"\".join(sorted(i))].append(i)\n for i in dd[\"\".join(sorted(lookup))]:\n if i != lookup: brothers.append(i)\n # 下面这两行坑的老子调了半个小时。\n print(len(brothers))\n if brothers and num <= len(brothers):\n print(sorted(brothers)[num - 1])\n except:\n break\n\n# 方法二\nwhile True:\n try:\n s = input().split(' ')\n N = int(s.pop(0))\n n = int(s.pop())\n if len(s) == N:\n print(0)\n break\n if n > N:\n print(0)\n break\n b = s.pop()\n bob = sorted(b)\n\n s = sorted(s)\n ce, m = 0, ''\n for j in s:\n if sorted(j) == bob and j != b:\n ce += 1\n if ce == n:\n m = j\n print(ce)\n if m:\n print(m)\n except:\n break\n\n# 方法二\nwhile True:\n try:\n s = input().split(\" \")\n sl = s[1:int(s[0]) + 1]\n st = s[int(s[0]) + 1]\n sn = int(s[int(s[0]) + 2]) # 要查找的兄弟单词的指定序号\n ans = []\n for i in sl:\n if i != st and sorted(list(i)) == sorted(list(st)): # 判断要抄找的单词是不是原单词,以及是不是兄弟单词\n ans.append(i)\n ans.sort() # 对兄弟单词排序\n print(len(ans)) # 打印出兄弟单词的数量\n if sn <= len(ans): # 查找指定序号的兄弟单词\n print(ans[sn - 1]) # 序号从1开始,列表从0开始\n except:\n break\n","repo_name":"susebing/HJ108","sub_path":"pass/易错/HJ27 查找兄弟单词.py","file_name":"HJ27 查找兄弟单词.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72679425742","text":"def anagram(s):\n n = len(s)\n if(n%2!=0):\n return -1\n else:\n count = 0\n s1, s2 = Counter(s[:n//2]), Counter(s[n//2:])\n for char in s2:\n print(char)\n current = s2[char] - s1.get(char,0)\n if current > 0:\n count += current\n return count\n","repo_name":"Bhavit1008/e-connect","sub_path":"python/Anagram.py","file_name":"Anagram.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41164054417","text":"graph = {\r\n \"Пекин\": {\"Дели\": 3784, \"Токио\": 2100, \"Сеул\": 955},\r\n \"Дели\": {\"Пекин\": 3784, \"Джакарта\": 5828, \"Тегеран\": 3369},\r\n \"Токио\": {\"Пекин\": 2100, \"Джакарта\": 5786, \"Бангкок\": 4600},\r\n \"Джакарта\": {\"Дели\": 5828, \"Токио\": 5786, \"Бангкок\": 1425},\r\n \"Сеул\": {\"Пекин\": 955, \"Бангкок\": 3687},\r\n \"Бангкок\": {\"Токио\": 4600, \"Джакарта\": 1425, \"Сеул\": 3687},\r\n \"Тегеран\": {\"Дели\": 3369}\r\n}\r\n\r\ndistances = {}\r\nvisited = set()\r\n\r\n\r\ndef find_min_distance(graph):\r\n min_distance = float(\"inf\")\r\n min_vertex = None\r\n for vertex in graph:\r\n if vertex not in visited and distances[vertex] < min_distance:\r\n min_distance = distances[vertex]\r\n min_vertex = vertex\r\n return min_vertex\r\n\r\n\r\ndef dijkstra(graph, start):\r\n for vertex in graph:\r\n distances[vertex] = float(\"inf\")\r\n distances[start] = 0\r\n\r\n while len(visited) < len(graph):\r\n current_vertex = find_min_distance(graph)\r\n visited.add(current_vertex)\r\n for neighbor in graph[current_vertex]:\r\n if neighbor not in visited:\r\n distance = distances[current_vertex] + graph[current_vertex][neighbor]\r\n if distance < distances[neighbor]:\r\n distances[neighbor] = distance\r\n\r\n\r\ndijkstra(graph, \"Пекин\")\r\n\r\n# for vertex in distances:\r\n# print(f\"Расстояние от Пекина до {vertex} равно {distances[vertex]} км.\")\r\n\r\nprint(f\"Расстояние от Пекина до {'Джакарта'} равно {distances['Джакарта']} км.\")\r\n","repo_name":"al-shimchenko/ads","sub_path":"hw3/dijkstra_Asia.py","file_name":"dijkstra_Asia.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33406008256","text":"# -*- coding: utf-8 -*-\nfrom typing import Any\nimport logging\nfrom matos_aws_provider.lib import factory\nfrom matos_aws_provider.lib.base_provider import BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\nclass AwsSQLEventSubscription(BaseProvider):\n \"\"\"AWS sql plugin\"\"\"\n\n def __init__(\n self,\n resource: Any,\n **kwargs,\n ) -> None:\n \"\"\"Constructor method\"\"\"\n try:\n super().__init__(**kwargs, client_type=\"rds\")\n self.sts = self.client(\"sts\")\n self.resource = resource\n\n except Exception as ex:\n logger.error(ex)\n\n def get_inventory(self) -> Any:\n \"\"\"\n Service discovery\n \"\"\"\n response = self.conn.describe_event_subscriptions()\n subscriptions = [\n {**item, \"type\": \"sql_event_subscription\"}\n for item in response.get(\"EventSubscriptionsList\", [])\n ]\n return subscriptions\n\n def get_resources(self) -> Any:\n \"\"\"\n Fetches instance details.\n\n Args:\n return: dictionary object.\n \"\"\"\n aws_account_id = self.sts.get_caller_identity()[\"Account\"]\n if isinstance(self.resource, dict):\n self.resource[\"aws_account_id\"] = aws_account_id\n elif isinstance(self.resource, list):\n for item in self.resource:\n item[\"aws_account_id\"] = aws_account_id\n return self.resource\n\n\ndef register() -> None:\n \"\"\"Register plugin\"\"\"\n factory.register(\"sql_event_subscription\", AwsSQLEventSubscription)\n","repo_name":"cloudmatos/matos-aws-provider","sub_path":"src/matos_aws_provider/plugins/sql_event_subscription.py","file_name":"sql_event_subscription.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26693484557","text":"from utils.OakRunner import OakRunner\nfrom utils.draw import displayFPS\nimport depthai as dai\nimport cv2\n\n\n\ndef init(runner, device):\n runner.output_queues[\"depth\"] = device.getOutputQueue(name=\"depth\", maxSize=1, blocking=False)\n\n\ndef process(runner):\n frame = runner.output_queues[\"depth\"].get().getFrame()\n frame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX)\n frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET)\n displayFPS(frame, runner.getFPS())\n cv2.imshow(\"disparity\", frame)\n\n\nrunner = OakRunner()\npipeline = runner.getPipeline()\n\nrunner.setMonoDepth(sensor_resolution=dai.MonoCameraProperties.SensorResolution.THE_400_P)\nstereo_output_stream = pipeline.createXLinkOut()\nstereo_output_stream.setStreamName(\"depth\")\n\nstereo = runner.getStereo()\nstereo.disparity.link(stereo_output_stream.input)\n\nrunner.run(process=process, init=init)","repo_name":"Ikomia-dev/ikomia-oakd","sub_path":"VI-depth/mono/depth_map.py","file_name":"depth_map.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"40331849030","text":"# coding:utf-8\nfrom flask import render_template, request, redirect, url_for, flash, session\nfrom admin import admin\nfrom admin.forms.post import PostForm\nfrom admin.forms.search import SearchForm\nfrom admin.helper import content_file_handle, tags_handle, field_obj_set, to_dict\nfrom common.extends import db\nfrom common.models import Post, Classify, Style, Tag, Review\n\n@admin.route('/post', methods=['GET', 'POST'])\ndef post():\n page = request.args.get('page',1,type=int)\n form = SearchForm(request.form)\n form.cid.choices = [(v.id, v.title) for v in db.session.query(Classify.id, Classify.title).all()]\n form.sid.choices = [(v.id, v.name) for v in Style.query.all()]\n if form.validate_on_submit():\n keywords = form.keywords.data\n posts = Post.query.filter_by(cid=form.cid.data, sid=form.sid.data).order_by(Post.id.desc())\n if not form.keywords.data:\n posts = posts.paginate(page=page, error_out=False)\n else:\n posts = posts.filter(Post.title.ilike('%'+keywords+'%')).paginate(page=page, error_out=False)\n else:\n posts = Post.query.filter(Post.sid>0).order_by(Post.id.desc()).paginate(page=page, error_out=False)\n args = dict(endpoint='admin.post')\n title = '文章管理'\n data = dict(title=title, pagination=posts, args=args, form=form)\n return render_template('admin/post.html', **data)\n\n@admin.route('/post/edit/', methods=['GET', 'POST'])\ndef post_edit(id):\n post = Post.query.get(id)\n #转成字符串\n tags = ','.join([tag.name for tag in post.tags])\n #转成表单默认数据\n data = to_dict(PostForm, post)\n data['tags'] = tags\n form = PostForm(data=data)\n form.cid.choices = [(v.id, v.title) for v in db.session.query(Classify.id, Classify.title).all()]\n form.sid.choices = [(v.id, v.name) for v in Style.query.all()]\n # 表单是否验证成功\n if form.validate_on_submit():\n form.content.data =content_file_handle(form.content.data, old_content=post.content)\n form.tags.data = tags_handle(form.tags.data,old=tags)\n post = field_obj_set(post,form.data)\n db.session.add(post)\n db.session.commit()\n db.session.close()\n flash('更新成功!', category='ok')\n return redirect(url_for('admin.post'))\n title = '更新文章'\n\n data = dict(title=title, form=form, tags=Tag.tags(), id=id)\n return render_template('admin/post.form.html', **data)\n\n@admin.route('/post/add', methods=['GET', 'POST'])\ndef post_add():\n form = PostForm()\n form.cid.choices = [(v.id, v.title) for v in db.session.query(Classify.id, Classify.title).all()]\n form.sid.choices = [(v.id, v.name) for v in Style.query.all()]\n form.author.data = session.get('nikename')\n # 表单是否验证成功\n if form.validate_on_submit():\n form.tags.data = tags_handle(form.tags.data)\n form.content.data = content_file_handle(form.content.data)\n post = field_obj_set(Post(),form.data)\n db.session.add(post)\n db.session.commit()\n flash('添加成功!', category='ok')\n return redirect(url_for('admin.post_add'))\n title = '新建文章'\n data = dict(title=title, tags=Tag.tags(), form=form)\n return render_template('admin/post.form.html', **data)\n\n@admin.route('/post/del')\ndef post_del():\n all = request.args.get('all',0)\n ids = [id for id in all.split(',') if id.isdigit()]\n if not ids:\n flash('数据不存在', category='err')\n return redirect(url_for('admin.post'))\n #查询出所有要删除的文章\n posts = db.session.query(Post).filter(Post.id.in_(ids)).all()\n for post in posts:\n # 删除文章中插入的图片\n content_file_handle('',old_content=post.content)\n # 删除无引用的标签\n tags_handle('',old=','.join([tag.name for tag in post.tags]))\n # 删除评论\n Review.query.filter(Review.pid == post.id).delete()\n db.session.delete(post)\n db.session.commit()\n db.session.close()\n flash('文章已删除', category='ok')\n return redirect(url_for('admin.post'))","repo_name":"mina998/zblog","sub_path":"admin/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40754708965","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\nfrom openerp.tools.translate import _\nfrom openerp.addons.base_openprod.common import get_form_view\n\n\nclass calendar_event(models.Model):\n _inherit = 'calendar.event'\n\n \n def get_default_values(self):\n res = super(calendar_event, self).get_default_values()\n if self.env.context.get('active_model', '') == 'sale.order' and self.env.context.get('active_id', ''):\n res.update(self.env['sale.order'].browse(self.env.context['active_id']).get_sale_action_vals())\n \n return res\n \n \n @api.model\n def _link_object_get(self):\n res = super(calendar_event, self)._link_object_get()\n res.append(('sale', _('Sale')))\n return res\n \n \n @api.one\n def _compute_linked_objects(self):\n \"\"\"\n Surcharge pour permettre de calculer le champ fonction sélection si\n une affaire est saisie\n \"\"\"\n super(calendar_event, self)._compute_linked_objects()\n if self.sale_id:\n self.link_object = 'sale'\n \n \n #===========================================================================\n # COLUMNS\n #===========================================================================\n sale_id = fields.Many2one('sale.order', string='Sale', required=False, ondelete='restrict')\n\n \n def fields_to_check(self):\n \"\"\"\n Surcharge de la liste pour vérifier ce champ\n \"\"\"\n fields_list = super(calendar_event, self).fields_to_check()\n fields_list.append('sale_id')\n return fields_list\n \n \n @api.onchange('sale_id')\n def _onchange_sale_id(self):\n \"\"\"\n On passe le champ fonction à la bonne valeur pour gérer les attrs\n \"\"\"\n if self.sale_id:\n self.link_object = 'sale'\n else:\n self.link_object = 'none'\n ","repo_name":"kazacube-mziouadi/ceci","sub_path":"OpenPROD/openprod-addons/sale/calendar_event.py","file_name":"calendar_event.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37362427910","text":"import tkinter\nfrom tkinter import filedialog\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter import filedialog, messagebox\n\nwindow = tkinter.Tk()\nwindow.wm_withdraw()\nwindow.wm_attributes(\"-topmost\", True)\n\ndef selectFile():\n filename = ''\n try:\n filename = filedialog.askopenfilename(filetypes=[('Image Files', ('.png', '.jpg', '.jpeg', '.gif'))])\n if not filename:\n raise Exception('No se seleccionó ningún archivo')\n except Exception as e:\n messagebox.showerror('Error', str(e))\n print(f'{filename}')\n return filename","repo_name":"MatiasBorquez/Vision_Sistem","sub_path":"mTk.py","file_name":"mTk.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4631309596","text":"import matplotlib.pyplot as plt\nimport time\n\ndef main():\n f = open('testCase1.txt', 'r')\n aux = [int(x) for x in f.readline().split()]\n fig, ax = plt.subplots()\n ax.set_xlim((0, aux[0]))\n ax.set_ylim((0, aux[1]))\n nCircles = int(f.readline())\n cList = [] #lista de cículos, dada pelos seus raios.\n for i in range(nCircles):\n radius = int(f.readline())\n cList.append(radius)\n cList.sort() #O(nlgn)\n #start = time.time()\n insertCircles(cList, fig, ax, aux[0], aux[1])#Através de resultados obtidos por testes, aparente se comportar com complexidade O(n).\n #end = time.time()\n #print(end-start)\n \ndef insertCircles(cList, fig, ax, xlim, ylim):\n '''\n A inserção começa do canto esquerdo inferior do retângulo. Vai adicionando\n círculos daquele com menor raio pro maior. Se o círculo a ser adicionado não\n couber, o algoritmo traça uma linha imaginária a uma altura igual ao diâmetro\n do maior círculo inserido neste espaço.\n '''\n currentx = 0\n currentyLine = 0\n for c in cList:\n diam = 2*c\n if currentx+diam <= xlim and currentyLine+diam <= ylim:\n circle = plt.Circle((currentx+c, currentyLine+c), c, color='r')\n currentx += diam\n elif currentx+diam > xlim:\n currentx = 0\n currentyLine += diam\n if currentyLine+diam > ylim:\n continue\n circle = plt.Circle((currentx+c, currentyLine+c), c, color='r')\n currentx += diam\n ax.add_artist(circle)\n \n fig.savefig('circles.png')\n #plt.show() \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wolima/python","sub_path":"exercicios/trab3.py","file_name":"trab3.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32123131111","text":"import sys\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\n\n\nclass TestCheckSatpy(unittest.TestCase):\n \"\"\"Test the 'check_satpy' function.\"\"\"\n\n def test_basic_check_satpy(self):\n \"\"\"Test 'check_satpy' basic functionality.\"\"\"\n from satpy.config import check_satpy\n check_satpy()\n\n def test_specific_check_satpy(self):\n \"\"\"Test 'check_satpy' with specific features provided.\"\"\"\n from satpy.config import check_satpy\n with mock.patch('satpy.config.print') as print_mock:\n check_satpy(readers=['viirs_sdr'], extras=('cartopy', '__fake'))\n checked_fake = False\n for call in print_mock.mock_calls:\n if len(call[1]) > 0 and '__fake' in call[1][0]:\n self.assertNotIn('ok', call[1][1])\n checked_fake = True\n self.assertTrue(checked_fake, \"Did not find __fake module \"\n \"mentioned in checks\")\n\n\nclass TestBuiltinAreas(unittest.TestCase):\n \"\"\"Test that the builtin areas are all valid.\"\"\"\n\n def test_areas_pyproj(self):\n \"\"\"Test all areas have valid projections with pyproj.\"\"\"\n import pyproj\n from pyresample import parse_area_file\n from satpy.resample import get_area_file\n\n all_areas = parse_area_file(get_area_file())\n for area_obj in all_areas:\n if getattr(area_obj, 'optimize_projection', False):\n # the PROJ.4 is known to not be valid on this DynamicAreaDef\n continue\n proj_dict = area_obj.proj_dict\n _ = pyproj.Proj(proj_dict)\n\n def test_areas_rasterio(self):\n \"\"\"Test all areas have valid projections with rasterio.\"\"\"\n try:\n from rasterio.crs import CRS\n except ImportError:\n return unittest.skip(\"Missing rasterio dependency\")\n if not hasattr(CRS, 'from_dict'):\n return unittest.skip(\"RasterIO 1.0+ required\")\n\n from pyresample import parse_area_file\n from satpy.resample import get_area_file\n all_areas = parse_area_file(get_area_file())\n for area_obj in all_areas:\n if getattr(area_obj, 'optimize_projection', False):\n # the PROJ.4 is known to not be valid on this DynamicAreaDef\n continue\n proj_dict = area_obj.proj_dict\n _ = CRS.from_dict(proj_dict)\n\n\ndef suite():\n \"\"\"The test suite for test_config.\"\"\"\n loader = unittest.TestLoader()\n my_suite = unittest.TestSuite()\n my_suite.addTest(loader.loadTestsFromTestCase(TestCheckSatpy))\n my_suite.addTest(loader.loadTestsFromTestCase(TestBuiltinAreas))\n\n return my_suite\n","repo_name":"lastproxy/satpy","sub_path":"satpy/tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"40535605704","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom scipy.stats import halfcauchy\n\nN = 1000\nmu = np.random.normal(loc=0, scale=5, size=N)\ntau = halfcauchy.rvs(loc=0, scale=5, size=N)\ntheta = np.random.normal(loc=mu, scale=tau, size=N)\n\nmask_tau = (np.log(tau) > -2) & (np.log(tau) < 3)\n\nplt.scatter(np.log(tau[mask_tau]), theta[mask_tau], color='gray', alpha=0.6)\n\nplt.show()","repo_name":"pierresegonne/VINF","sub_path":"code/studies/target_distributions/eight_schools.py","file_name":"eight_schools.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"} +{"seq_id":"73431952462","text":"import torch\nfrom torch import nn, Tensor\n\nfrom style_gan.models.other import EqualConv2d, ConvBlock, EqualLinear\nfrom style_gan.models.builder import ModuleBuilder, Identity, NamedModule\n\n\nclass Out(nn.Module):\n\n def forward(self, out):\n out_std = torch.sqrt(out.var(0, unbiased=False) + 1e-8)\n mean_std = out_std.mean()\n mean_std = mean_std.expand(out.size(0), 1, 4, 4)\n out = torch.cat((out, mean_std), 1)\n return out\n\n\nclass AlphaMixWithProgression(nn.Module):\n\n def __init__(self, progression: nn.Module, alpha: float):\n super().__init__()\n self.progr = progression\n self.alpha = alpha\n\n def forward(self, x1: Tensor, x2: Tensor):\n x1_pr = self.progr(x1)\n return (1 - self.alpha) * x2 + self.alpha * x1_pr\n\n\nclass ToLinear(nn.Module):\n def forward(self, input: Tensor):\n return input.view(input.shape[0], -1)\n\n\nclass DiscriminatorBuilder:\n\n def make_from_rgb(self, out_channel):\n if self.from_rgb_activate:\n return nn.Sequential(EqualConv2d(4, out_channel, 1), nn.LeakyReLU(0.2))\n\n else:\n return EqualConv2d(4, out_channel, 1)\n\n def __init__(self, alpha, fused=True, from_rgb_activate=False):\n super(DiscriminatorBuilder, self).__init__()\n self.from_rgb_activate = from_rgb_activate\n\n builder3 = ModuleBuilder()\n\n builder3.add_module_seq(\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: (\"input_%d\" % i,\n Identity(),\n [\"rgb%d\" % i],\n [\"rgb%d\" % i])\n )\n\n builder3.add_module_seq(\n [2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: (\"down_pool2x_%d\" % i,\n nn.AvgPool2d(2),\n [\"rgb%d\" % i],\n [\"rgb%d\" % (i - 1)])\n )\n\n builder3.add_edge_seq(\n [2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: ([f\"input_{i}\"], f\"down_pool2x_{i}\")\n )\n\n builder3.add_modules(\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: (\"from_rgb_%d\" % i,\n [\"rgb%d\" % i],\n [\"out%d\" % i])\n )(\n self.make_from_rgb(256),\n self.make_from_rgb(256),\n self.make_from_rgb(256),\n self.make_from_rgb(256),\n self.make_from_rgb(256),\n self.make_from_rgb(128),\n self.make_from_rgb(64),\n self.make_from_rgb(32),\n self.make_from_rgb(16),\n )\n\n builder3.add_edge_seq(\n [2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: ([f\"down_pool2x_{i}\"], f\"from_rgb_{i - 1}\")\n )\n\n builder3.add_edge_seq(\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: ([f\"input_{i}\"], f\"from_rgb_{i}\")\n )\n\n builder3.add_module(\n \"progression_1\",\n nn.Sequential(Out(), ConvBlock(257, 256, 3, 1, 4, 0), ToLinear(), EqualLinear(256, 1)),\n [\"out_pr1\"], [\"out_pr0\"]\n )\n builder3.add_modules(\n [2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: (f\"progression_{i}\",\n [f\"out_pr{i}\"],\n [f\"out_pr{i-1}\"])\n )(\n ConvBlock(256, 256, 3, 1, downsample=True),\n ConvBlock(256, 256, 3, 1, downsample=True),\n ConvBlock(256, 256, 3, 1, downsample=True),\n ConvBlock(256, 256, 3, 1, downsample=True),\n ConvBlock(128, 256, 3, 1, downsample=True, fused=fused),\n ConvBlock(64, 128, 3, 1, downsample=True, fused=fused),\n ConvBlock(32, 64, 3, 1, downsample=True, fused=fused),\n ConvBlock(16, 32, 3, 1, downsample=True, fused=fused),\n )\n\n builder3.add_module_seq(\n [1, 2, 3, 4, 5, 6, 7, 8],\n lambda i: (f\"alpha_mix_pr_{i}\",\n AlphaMixWithProgression(builder3.nodes[f\"progression_{i+1}\"].module, alpha=alpha),\n [f\"out{i+1}\", f\"out{i}\"],\n [f\"out_pr{i}\"])\n )\n\n builder3.add_edge_seq(\n [2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: ([f\"from_rgb_{i}\", f\"from_rgb_{i-1}\"], f\"alpha_mix_pr_{i-1}\")\n )\n\n builder3.add_edge_seq(\n [1, 2, 3, 4, 5, 6, 7, 8],\n lambda i: ([f\"alpha_mix_pr_{i}\"], f\"progression_{i}\")\n )\n\n builder3.add_edge_seq(\n [2, 3, 4, 5, 6, 7, 8, 9],\n lambda i: ([f\"progression_{i}\"], f\"progression_{i-1}\")\n )\n\n self.builder = builder3\n self.builder.cuda()\n\n def build(self, step) -> NamedModule:\n\n return self.builder.build(\n [\"input_\" + str(step + 1)],\n \"progression_1\"\n )\n\n\n\n\n\n\n\n\n\n","repo_name":"YaroslavBespalov/gans-pytorch","sub_path":"models/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6682206022","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom Parameters import *\nfrom MuscleTendonUnit import *\nfrom scipy.integrate import odeint\nimport matplotlib.animation as animation\n\n\ndef bb_momentArm(angle):\n\telbow_angles = angle*180/np.pi\n\tmoment_arms_bb = coeff_bb_ma[0]*elbow_angles**3 + \\\n\t\t\t\t\t\t\tcoeff_bb_ma[1]*elbow_angles**2 + \\\n\t\t\t\t\t\t\tcoeff_bb_ma[2]*elbow_angles**1 + \\\n\t\t\t\t\t\t\tcoeff_bb_ma[3]*elbow_angles**0\n\n\treturn moment_arms_bb\ndef bb_muscleLength(angle):\n\telbow_angles = angle*180/np.pi\n\tmuscleLengths_bb = 378.06+ coeff_bb_ml[0]*elbow_angles**4 + \\\n\t\t\t\t\t\t\tcoeff_bb_ml[1]*elbow_angles**3 + \\\n\t\t\t\t\t\t\tcoeff_bb_ml[2]*elbow_angles**2 + \\\n\t\t\t\t\t\t\tcoeff_bb_ml[3]*elbow_angles**1\n\n\treturn muscleLengths_bb\n\ndef TwoLinkArm(x,t):\n\t#print(\"x\",x)\n\t####\n\n\t# 12 dimensional vector\n\t#x[0] - shoulder angle\n\t#x[1] - shoulder velocity\n\t#x[2] - elbow angle\n\t#x[3] - elbow velocity\n\t#x[4] - lm_ad\n\t#x[5] - Vm_ad\n\t#x[6] - lm_pd\n\t#x[7] - Vm_pd\n\t#x[8] - lm_bb\n\t#x[9] - Vm_bb\n\t#x[10] - lm_tb\n\t#x[11] - Vm-tb\n\n\t####\n\tm1 = 2.1\n\tm2 = 1.0\n\tl1 = 0.3\n\tl2 = 0.5\n\tlc1 = l1/2.\n\tlc2 = l2/2.\n\n\tI1 = m1*l1**2\n\n\tI2 = m2*l2**2\n\n\ttheta1 = x[0]\n\n\ttheta2 = x[2]\n\t\n\tdtheta1 = x[1]\n\t\n\tdtheta2 = x[3]\n\t\n\tqdot = np.array([[dtheta1],[dtheta2]])\n\tq = np.array([[theta1],[theta2]])\n\n\n\tL_m = x[4]\n\tV_m = x[5]\n\ta = 0.2\n\tfl = np.exp(-((L_m/0.160) - 1)**2/0.45)\n\tF_a,fv,fl = MTU_unit.MuscleDynamics(a,L_m,V_m,fl)\n\t#print(\"Fa\",F_a)\n\tF_p = MTU_unit.PassiveMuscleForce(L_m)\n\t#print(\"lm\",L_m)\n\t#print(\"Fp\",F_p)\n\tF_m = F_a + F_p\n\tema = bb_momentArm(x[2])\n\t#print(\"fm\",F_m)\n\t#print(\"x2\",x[2])\n\t#print(ema)\n\tF_lever = ema*F_m*0.001\n\t#print(F_lever)\n\t#dl_mt = -1*ema*x[2]\n\n\n\n\n\n\n\n\tTorques = np.array([[0.0],[F_lever]])\n\tg = -9.81\n\tHq = np.array([[I1 + I2 + m2*l1**2 + 2*m2*l1*lc2*np.cos(theta2),I2+m2*l1*lc2*np.cos(theta2)],[I2+m2*l1*lc2*np.cos(theta2),I2]])\n\tCq = np.array([[-2*m2*l1*lc2*np.sin(theta2)*dtheta2,-m2*l1*lc2*np.sin(theta2)*dtheta2],[m2*l1*lc2*np.sin(theta2)*dtheta1,0]])\n\tGq = np.array([[(m1*lc1 + m2*l1)*g*np.sin(theta1) + m2*g*l2*np.sin(theta1+theta2)],[m2*g*l2*np.sin(theta1+theta2)]])\n\tdamping = np.array([[2.10,0],[0,2.10]])\n\tacc = np.dot(np.linalg.inv(Hq),(Torques+-np.dot(Cq,qdot) + Gq - np.dot(damping,qdot)))\n\n\tdx = np.zeros(6,)\n\n\n\t\n\tL_t = MTU_unit.TendonDynamics(F_m)\n\tx_new = x[2] + x[3]*0.0005\n\t#print(\"x new\",x_new)\n\tNew_Lmtu = bb_muscleLength(x_new)\n\n\tLm_new = New_Lmtu*0.001 - L_t\n\t#print(\"lm new\",Lm_new)\n\tdx[0] = x[1]\n\tdx[2] = x[3]\n\tdx[1] = acc[0]\n\tdx[3] = acc[1]\n\n\tdx[4] = (Lm_new - x[4])/0.0005\n\t#print(dx[4])\n\tdx[5] = (dx[4] - x[5])/0.0005\n\t#while True:\n\t#\tbreak\n\t#d = 2*r\n\treturn dx\n\t#v += acc*MTU_unit.dt \n\n\t#x += v*MTU_unit.dt\ndef init():\n \"\"\"initialize animation\"\"\"\n line.set_data([], [])\n time_text.set_text('')\n return line, time_text\n\n\ndef animate(i):\n \"\"\"perform animation step\"\"\"\n #global arm, dt\n #arm.step(dt)\n global pos\n #print(pos[i,:])\n x = np.cumsum([0,0.3*np.sin(pos[i,0]),0.5*np.sin(pos[i,2])])\n y = np.cumsum([0,-0.3*np.cos(pos[i,0]),-0.5*np.cos(pos[i,2])])\n #print(x)\n #print(y)\n line.set_data(*(x,y))\n time_text.set_text('time = %.2f' % 0.1)\n return line, time_text\n\n\n\nif __name__ == '__main__':\n\t\n\n\tMTU_unit = MTU(L0=0.160,F_max=1000,)\n\n\telbow_angles = np.arange(0,140,1)\n\tmoment_arms_bb = np.zeros(elbow_angles.shape[0])\n\n\tfor i in range(elbow_angles.shape[0]):\n\t\tmoment_arms_bb[i] = coeff_bb_ma[0]*elbow_angles[i]**3 + \\\n\t\t\t\t\t\t\tcoeff_bb_ma[1]*elbow_angles[i]**2 + \\\n\t\t\t\t\t\t\tcoeff_bb_ma[2]*elbow_angles[i]**1 + \\\n\t\t\t\t\t\t\tcoeff_bb_ma[3]*elbow_angles[i]**0 \t\n\t\n\tmuscle_lengths_bb = np.zeros(elbow_angles.shape[0])\n\tfor i in range(elbow_angles.shape[0]):\n\t\tmuscle_lengths_bb[i] = 378.06+ coeff_bb_ml[0]*elbow_angles[i]**4 + \\\n\t\t\t\t\t\t\tcoeff_bb_ml[1]*elbow_angles[i]**3 + \\\n\t\t\t\t\t\t\tcoeff_bb_ml[2]*elbow_angles[i]**2 + \\\n\t\t\t\t\t\t\tcoeff_bb_ml[3]*elbow_angles[i]**1 \n\n\n\t\n\tshoulder_angles = np.arange(0,120,1)\n\tmuscle_lengths_ad = np.zeros(shoulder_angles.shape[0])\n\tmuscle_lengths_pd = np.zeros(shoulder_angles.shape[0])\n\t#for i in range(shoulder_angles.shape[0]):\n\tmuscle_lengths_ad = cst_ad + slope_ad*shoulder_angles\n\tmuscle_lengths_pd = cst_pd + slope_pd*shoulder_angles\n\n\t\n\n\n\n\t#plt.plot(elbow_angles,moment_arms_bb)\n\t\n\t#plt.figure()\n\t#plt.plot(elbow_angles,muscle_lengths_bb)\n\t\n\t#plt.figure()\n\t#plt.plot(shoulder_angles,muscle_lengths_ad)\n\t#plt.plot(shoulder_angles,muscle_lengths_pd)\n\t#plt.legend(['Anterior Deltoid','Posterior Deltoid'])\n\t#plt.title(\"Anterior Deltoid\")\n\n\t#plt.show()\n\t## ACTUAL INTEGRATION\n\tdt = 0.0005\n\tt = np.arange(0, 5.0, dt)\n\t#print(\"i\",t0)\n\tstate = np.array([np.pi/4,0,0,0,0.160,0])\n\tglobal poss\n\tpos = odeint(TwoLinkArm, state, t)\n\t#print(state[0])\n\t#pos.append(y[0])\n\t#t0+=dt\n\n\t#plt.plot(y[:,0],'r')\n\t#plt.plot(y[:,2],'b')\n\t#plt.legend(['shoulder','elbow'])\n\t#plt.show()\n\n\tplt.plot(pos[:,2])\n\tplt.plot(pos[:,0])\n\tplt.legend(['theta2','theta1'])\n\tplt.figure()\n\tplt.plot(pos[:,4])\n\tplt.show()\n\n\n\tfig = plt.figure(figsize=(4,4))\n\tax = fig.add_subplot(111, aspect='equal', autoscale_on=False,\n xlim=(-1, 1), ylim=(-1, 1))\n\tax.grid()\n\tline, = ax.plot([], [], 'o-', lw=4, mew=5)\n\ttime_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)\n\n\n\tani = animation.FuncAnimation(fig, animate, frames=None,\n interval=1, blit=True, \n init_func=init)\n\n\tani.save('2linkarm_withMuscleDynamics.mp4', fps=60, \n extra_args=['-vcodec', 'libx264'])\n\n\tplt.show()\n\n\n\n\t","repo_name":"VisakK/MuscleDynamics","sub_path":"TwoLinkDynamics.py","file_name":"TwoLinkDynamics.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11413326919","text":"import logging\n\nfrom django.core.management.base import BaseCommand\n\nfrom distro_tracker.core.tasks import run_task\n\nlogger = logging.getLogger('distro_tracker.tasks')\n\n\nclass Command(BaseCommand):\n \"\"\"\n A management command which starts a number of Distro Tracker tasks.\n A task is a subclass of :class:`distro_tracker.core.tasks.BaseTask`.\n \"\"\"\n help = \"Start all the Distro Tracker tasks given by name.\" # noqa\n\n def add_arguments(self, parser):\n parser.add_argument('tasks', nargs='+', help='Tasks to be run')\n parser.add_argument(\n '--force-update',\n action='store_true',\n dest='force_update',\n default=False,\n help=(\n 'Force the update. '\n 'This clears any caches and makes a full update.'\n )\n )\n parser.add_argument(\n '--fake-update',\n action='store_true',\n dest='fake_update',\n default=False,\n help=(\n 'Instruct the task to not do anything except recording that '\n 'everything has been done.'\n )\n )\n\n def handle(self, *args, **kwargs):\n params = {}\n if kwargs['force_update']:\n params['force_update'] = True\n if kwargs['fake_update']:\n params['fake_update'] = True\n for task_name in kwargs['tasks']:\n if isinstance(task_name, bytes):\n task_name = task_name.decode('utf-8')\n logger.info(\"./manage.py tracker_run_task %s\", task_name)\n if not run_task(task_name, **params):\n self.stderr.write('Task {} failed to run.\\n'.format(task_name))\n","repo_name":"rhertzog/distro-tracker","sub_path":"distro_tracker/core/management/commands/tracker_run_task.py","file_name":"tracker_run_task.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"38511049835","text":"import argparse\n\nimport pandas as pd\nimport yaml\nfrom box import Box\nfrom pyjmx import createJMX\n\n\ndef get_args():\n ap = argparse.ArgumentParser()\n ap.add_argument(\n \"-c\",\n \"--conf\",\n type=str,\n required=True,\n metavar=\"CONF\",\n help=\"URLなどの設定を記載したconfig.yaml\",\n )\n ap.add_argument(\n \"-l\",\n \"--load\",\n type=str,\n required=True,\n metavar=\"LOAD\",\n help=\"負荷変動を記載したload.csv\",\n )\n ap.add_argument(\n \"-o\",\n \"--output\",\n type=str,\n default=\"test.jmx\",\n metavar=\"OUTPUT\",\n help=\"出力するアウトプットファイルのファイル名(default:test.jmx)\",\n )\n return ap.parse_args()\n\n\ndef read_conf(conf_file):\n config = None\n with open(conf_file, \"r\") as f:\n config = Box(yaml.safe_load(f))\n pass\n return config\n\n\ndef main():\n args = get_args()\n conf = read_conf(args.conf)\n load = pd.read_csv(args.load)\n createJMX(conf.domain, conf.port, conf.path, conf.threads, load, args.output)\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"no1986/pyjmeter","sub_path":"app/app/script/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14337598931","text":"from .. import pzdzi, geometry\nimport PIL\nfrom PIL import ImageFont\n\nclass LazyFont(object):\n def __init__(self, name, size):\n self.name = name\n self.size = size\n self.font = None\n\n def get(self):\n if self.font is None:\n self.font = ImageFont.truetype(self.name, self.size)\n return self.font\n\nif tuple(map(int,PIL.__version__.split('.'))) >= (8,0,0):\n def text_size(draw, text, font):\n left, top, right, bottom = draw.textbbox((0, 0), text, font)\n return left, top, right - left, bottom - top\nelse:\n def text_size(draw, text, font):\n w, h = draw.textsize(text, font)\n return 0, 0, w, h\n\ndef render_text(draw, x, y, text, color, font):\n dx, dy, w, h = text_size(draw, text, font)\n draw.text((x - (w >> 1) - dx, y - (h >> 1) - dy), text, color, font)\n\ndef draw_square(draw, x, y, color):\n h = pzdzi.IsoDZI.HALF_SQR_HEIGHT\n w = pzdzi.IsoDZI.HALF_SQR_WIDTH\n draw.polygon([x, y - h, x + w, y, x, y + h, x - w, y], fill=color)\n\nSUFFIX = [\n 'store',\n 'storage',\n 'kitchen',\n 'bathroom',\n 'room',\n 'rooms',\n 'factory',\n 'occupied',\n 'dining',\n 'warehouse',\n 'restaurant',\n 'clothes',\n 'station',\n 'game',\n 'stand',\n 'shipping',\n 'cooking',\n 'office',\n 'print',\n 'bottling',\n]\ndef break_long_text(text):\n for s in SUFFIX:\n if text.endswith(s):\n return text[:len(text) - len(s)] + '\\n' + s\n l = len(text) // 2\n return text[:l] + '\\n' + text[l:]\n\ndef render_long_text(draw, x, y, text, color, font):\n dx, dy, w, h = text_size(draw, text, font)\n if w >= pzdzi.IsoDZI.SQR_WIDTH:\n text = break_long_text(text)\n dx, dy, w, h = text_size(draw, text, font)\n draw.text((x - dx - w // 2, y - dy - h // 2), text, color, font, align='center')\n\n_PAD_Y = 5\n_PAD_X = 10\ndef render_edge(draw, x, y, color, width, border_flags):\n edges = geometry.get_edge_segments(border_flags, x, y,\n pzdzi.IsoDZI.HALF_SQR_WIDTH, pzdzi.IsoDZI.HALF_SQR_HEIGHT,\n _PAD_X, _PAD_Y)\n for edge in edges:\n draw.line(edge, fill=color, width=width)\n","repo_name":"cff29546/pzmap2dzi","sub_path":"pzmap2dzi/render_impl/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"47"} +{"seq_id":"37307721524","text":"my_dict = {'Name': None, 'Price': None, 'Quantity': None, 'Unit': None}\nmy_list = []\n\nwhile True:\n for el in my_dict.keys():\n ent = input(f'Enter {el}: ')\n my_dict.update({el: ent})\n my_list.append((len(my_list) + 1, my_dict.copy()))\n\n if input('Print end for stop: ') == 'end':\n break\nprint(my_list)\n\nfor key in my_dict.keys():\n tm_list = []\n for el in my_list:\n val = el[1].get(key)\n tm_list.append(val)\n my_dict.update({key: tm_list})\nprint(my_dict)\n","repo_name":"Srmrlt/GeekBrains-HomeWork-Python","sub_path":"lesson02/task06.py","file_name":"task06.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2188837949","text":"#\n# Backtester Report\n# Report class for the Backtester\n#\n\nimport numpy as np\nimport pandas as pd\nfrom typing import (Any, Optional, Sequence)\n\nimport nfpy.IO as IO\nimport nfpy.Math as Math\nimport nfpy.Tools.Utilities as Ut\nimport nfpy.Trading as Trd\n\nfrom .BaseReport import BaseReport\n\n# Remove a style property for Pandas version 0.x\nif int(pd.__version__.split('.')[0]) < 1:\n PD_STYLE_PROP = {}\nelse:\n PD_STYLE_PROP = {'na_rep': \"-\"}\n\n\nclass ReportBacktester(BaseReport):\n DEFAULT_P = {\n \"start_amount\": 10000.,\n \"strategy\": \"MACDHistReversalStrategy\",\n \"strategy_params\": {\n 'w_fast': 100,\n 'w_slow': 200,\n 'w_macd': 50\n },\n \"sizer\": \"ConstantSplitSizer\",\n \"sizer_params\": {'buy': .2, 'sell': .3}\n }\n\n def _init_input(self, type_: Optional[str] = None) -> None:\n \"\"\" Prepare and validate the the input parameters for the model. This\n includes verifying the parameters are correct for the models in the\n report. Takes the default parameters if any, applies the values from\n the database and the asset-specific overlays if any.\n The function must ensure the parameters from the database stored in\n the self._p symbol are NOT altered for later usage by making copies\n if required.\n \"\"\"\n pass\n\n def _one_off_calculations(self) -> None:\n \"\"\" Perform all non-uid dependent calculations for efficiency. \"\"\"\n pass\n\n def _calculate(self) -> Any:\n \"\"\" Calculate the required models.\n MUST ensure that the model parameters passed in are not\n modified so that the database parameters in self._p are not\n changed from one asset to the next.\n \"\"\"\n bk = Trd.Backtester(self._uids, self._p['start_amount'], False)\n symbol = '.'.join(['nfpy.Trading.Strategies', self._p['strategy']])\n bk.strategy = Ut.import_symbol(symbol)\n bk.parameters = self._p['strategy_params']\n\n symbol = '.'.join(['nfpy.Trading.Sizers', self._p['sizer']])\n class_ = Ut.import_symbol(symbol)\n bk.sizer = class_(**self._p['sizer_params'])\n try:\n bk.run()\n except (RuntimeError, IndexError) as ex:\n print(ex)\n\n return self._render_results(\n bk.results,\n bk.strategy,\n bk.parameters,\n self._p['sizer_params']\n )\n\n def _render_results(self, res_dict: Any, strategy: Trd.TyStrategy,\n strategy_p: dict, sizer_p: Sequence) -> Any:\n outputs = Ut.AttributizedDict()\n\n # Aggregated measures\n avg_return = .0\n avg_buy = 0\n avg_sell = 0\n\n for uid, bt_res in res_dict.items():\n asset = self._af.get(uid)\n labels = ((uid,), ('BT',), ('results',))\n fig_full, fig_rel = self._get_image_paths(labels)\n res = Ut.AttributizedDict()\n\n # Relative path in results object\n res.img_results = fig_rel[0]\n\n # General results\n res.initial_value = bt_res.initial\n res.final_value = bt_res.final_value\n res.buy = bt_res.num_buy\n res.sell = bt_res.num_sell\n res.tot_return = bt_res.total_return * 100.\n\n # Aggregated results\n avg_return += bt_res.total_return\n avg_buy += bt_res.num_buy\n avg_sell += bt_res.num_sell\n\n # Prepare data for plotting\n dates = asset.prices.index.values\n prices = asset.prices.values\n sig_dates = np.array([v[0] for v in bt_res.trades])\n signals = np.array([v[1] for v in bt_res.trades])\n sell_dates = np.array(\n [\n v[0]\n for v in bt_res.trades\n if v[1] == -1\n ]\n )\n returns = np.array(\n [\n v[7]\n for v in bt_res.trades\n if v[1] == -1\n ]\n )\n\n _idx = np.searchsorted(dates, sig_dates)\n _shares = np.zeros(dates.shape[0], dtype=int)\n np.put(\n _shares, _idx,\n [v[1] * v[3] for v in bt_res.trades]\n )\n\n _cash = np.zeros(dates.shape[0])\n _cash[0] = bt_res.initial\n np.put(\n _cash, _idx,\n [-v[1] * v[4] for v in bt_res.trades]\n )\n _cash = _cash.cumsum()\n _shares = _shares.cumsum()\n\n _shares_val = prices * _shares\n _total_val = _shares_val + _cash\n _perf = Math.comp_ret(asset.returns.values, is_log=False)\n\n # Plotting\n pl = IO.Plotter(4, 1, figsize=(15, 12.8)) \\\n .lplot(0, dates, _total_val, label='tot. value', color='C0') \\\n .lplot(0, dates, _shares_val / _total_val, label='eq%',\n color='C1', linewidth=.75, secondary_y=True) \\\n .lplot(0, dates, _cash / _total_val, label='cash%',\n color='C2', linewidth=.75, secondary_y=True) \\\n .lplot(1, dates, _total_val, label='total value') \\\n .lplot(1, dates, _shares_val, label='equity value') \\\n .lplot(1, dates, _cash, label='cash value') \\\n .lplot(3, dates, _total_val / bt_res.initial,\n label='ptf perf.', color='C0') \\\n .lplot(3, dates, _perf, label='price perf.', color='C5') \\\n .line(2, 'xh', .0)\n\n pos_ret_mask = returns >= .0\n if np.sum(pos_ret_mask) > 0:\n pl.stem(2, sell_dates[pos_ret_mask], returns[pos_ret_mask],\n linefmt='C1--', markerfmt='C1o', label='gain')\n if np.sum(~pos_ret_mask) > 0:\n pl.stem(2, sell_dates[~pos_ret_mask], returns[~pos_ret_mask],\n linefmt='C2--', markerfmt='C2o', label='loss')\n\n for i in range(sig_dates.shape[0]):\n color = 'C2' if signals[i] == -1 else 'C1'\n pl.line(0, 'xv', sig_dates[i], color=color, linewidth=.6) \\\n .line(1, 'xv', sig_dates[i], color=color, linewidth=.6) \\\n .line(3, 'xv', sig_dates[i], color=color, linewidth=.6)\n\n pl.plot() \\\n .save(fig_full[0]) \\\n .close(True)\n\n # Render dataframes\n df = pd.DataFrame(\n bt_res.trades,\n columns=('date', 'signal', 'price', 'shares',\n 'd_cash', 'base cost', 'P&L', 'R')\n )\n df['date'] = df['date'].dt.strftime('%Y-%m-%d')\n res.trades_table = df.style.format(\n formatter={\n 'price': '{:,.2f}'.format,\n 'shares': '{:,.0f}'.format,\n 'd_cash': '{:,.2f}'.format,\n 'base cost': '{:,.2f}'.format,\n 'P&L': '{:,.2f}'.format,\n 'R': '{:,.1%}'.format\n },\n **PD_STYLE_PROP) \\\n .set_table_attributes('class=\"dataframe\"') \\\n .render()\n\n outputs[uid] = res\n\n # Add aggregated results to outputs\n tests_performed = len(res_dict)\n aggregated_res = Ut.AttributizedDict()\n\n aggregated_res.name = strategy.NAME\n aggregated_res.description = strategy.DESCRIPTION\n aggregated_res.parameters = strategy_p\n aggregated_res.sizer = sizer_p\n\n aggregated_res.tests_performed = tests_performed\n aggregated_res.avg_return = avg_return / tests_performed * 100.\n aggregated_res.avg_buy = avg_buy / tests_performed\n aggregated_res.avg_sell = avg_sell / tests_performed\n outputs['__aggregated_results__'] = aggregated_res\n\n return outputs\n","repo_name":"bign86/nfpy","sub_path":"nfpy/Reporting/Reports/ReportBacktester.py","file_name":"ReportBacktester.py","file_ext":"py","file_size_in_byte":7967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30608400182","text":"import logging\nimport os\nimport sys\n\nimport pymysql\n\ntry:\n from beeprint import pp as ppt\nexcept Exception as e:\n try:\n from prettyprinter import cpprint as ppt\n except Exception as e:\n from pprint import pprint as ppt\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# CONSTANT\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# LOGGER\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nlogdirname = \"logs\"\nnot os.path.exists(logdirname) and os.makedirs(logdirname)\nlogfilename = os.path.join(logdirname, os.path.splitext(__file__)[0])\nlogger = logging.getLogger(logfilename + \".log\")\nfhandler = logging.FileHandler(logfilename + \".log\")\nshandler = logging.StreamHandler()\nfmt = \" - \".join(\n [\"%(asctime)s\", \"%(pathname)s:%(module)s:%(funcName)s\", \"%(lineno)s\", \"%(message)s\"]\n)\nformatter = logging.Formatter(fmt)\nfhandler.setFormatter(formatter)\nshandler.setFormatter(formatter)\nlogger.addHandler(fhandler)\nlogger.addHandler(shandler)\nlogger.setLevel(logging.INFO)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# CONST\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# LOGIC\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ================================================================\n# PURPOSE:\n# ================================================================\nclass MySQL(object):\n \"\"\"docstring for MySQL.\"\"\"\n\n def __init__(self):\n super(MySQL, self).__init__()\n self.host = \"172.17.0.2\"\n self.port = 3306\n self.user = \"root\"\n self.password = \"yangrui\"\n self.database = \"test\"\n\n self.db = None\n self.cursor = None\n\n def __enter__(self):\n self.db = pymysql.connect(\n host=self.host,\n port=self.port,\n user=self.user,\n password=self.password,\n database=self.database,\n )\n self.cursor = self.db.cursor()\n\n if self.db:\n logger.info(\"login successful\")\n logger.info(\n \"mysql://{}:{}@{}:{}/{}\".format(\n self.host, self.port, self.user, self.password, self.database\n )\n )\n\n def __exit__(self, *args):\n self.cursor.close()\n self.db.close()\n\n self.cursor = None\n self.db = None\n\n if not self.db:\n logger.info(\"logout successful.\")\n\n # ----------------------------------------------------------------\n # METHOD:\n # PARAMETERS:\n # RESULT:\n # ----------------------------------------------------------------\n def execute_sql(self, sql):\n res = None\n for each in [each.strip(\"\\n\") + \";\" for each in sql.split(\";\") if each.strip(\"\\n\")]:\n logger.info(\"SQL: {}\".format(each))\n res = self.cursor.execute(each)\n self.db.commit()\n return res\n\n # ----------------------------------------------------------------\n # METHOD:\n # PARAMETERS:\n # RESULT:\n # ----------------------------------------------------------------\n def execute_sql_multi(self, sqls):\n for sql in sqls:\n self.execute_sql(sql)\n\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# FUNCTION:\n# PARAMETERS:\n# RESULT:\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n# **=--=*=--=*=--=*=--=*=--=*=--=*=--=*=--=*=--=*=--=*=--=*=--=**\nsql_initial = [\n \"\"\"\ndrop table if exists student;\ncreate table student(sid varchar(6), sname varchar(10), sage datetime, ssex varchar(10));\ninsert into student values('01' , 'ZL' , '1990-01-01' , 'M');\ninsert into student values('02' , 'QD' , '1990-12-21' , 'M');\ninsert into student values('03' , 'SF' , '1990-05-20' , 'M');\ninsert into student values('04' , 'LY' , '1990-08-06' , 'M');\ninsert into student values('05' , 'ZM' , '1991-12-01' , 'F');\ninsert into student values('06' , 'WL' , '1992-03-01' , 'F');\ninsert into student values('07' , 'ZZ' , '1989-07-01' , 'F');\ninsert into student values('08' , 'WJ' , '1990-01-20' , 'F')\n\"\"\",\n \"\"\"\ndrop table if exists sc;\ncreate table sc(sid varchar(10), cid varchar(10), score decimal(18,1));\ninsert into sc values('01' , '01' , 80);\ninsert into sc values('01' , '02' , 90);\ninsert into sc values('01' , '03' , 99);\ninsert into sc values('02' , '01' , 70);\ninsert into sc values('02' , '02' , 60);\ninsert into sc values('02' , '03' , 80);\ninsert into sc values('03' , '01' , 80);\ninsert into sc values('03' , '02' , 80);\ninsert into sc values('03' , '03' , 80);\ninsert into sc values('04' , '01' , 50);\ninsert into sc values('04' , '02' , 30);\ninsert into sc values('04' , '03' , 20);\ninsert into sc values('05' , '01' , 76);\ninsert into sc values('05' , '02' , 87);\ninsert into sc values('06' , '01' , 31);\ninsert into sc values('06' , '03' , 34);\ninsert into sc values('07' , '02' , 89);\ninsert into sc values('07' , '03' , 98)\n\"\"\",\n \"\"\"\ndrop table if exists course;\ncreate table course(cid varchar(10),cname varchar(10),tid varchar(10));\ninsert into course values('01' , 'Chinese' , '02');\ninsert into course values('02' , 'Math' , '01');\ninsert into course values('03' , 'English' , '03')\n\"\"\",\n \"\"\"\ndrop table if exists teacher;\ncreate table teacher(tid varchar(10),tname varchar(10));\ninsert into teacher values('01' , 'Z3');\ninsert into teacher values('02' , 'L4');\ninsert into teacher values('03' , 'W5')\n\"\"\",\n]\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# MAIN\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\ndef main(args):\n obj = MySQL()\n with obj:\n # obj.execute_sql_multi(sql_initial)\n with open(\"sqls/01.sql\", \"r\") as f:\n sql = \" \".join([each.strip(\"\\n\") for each in f.readlines()])\n # print(\" \".join(sql.split(\"\\n\")))\n # print(sql)\n obj.execute_sql(sql)\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","repo_name":"UlricYang/bootcamp-python","sub_path":"jobHunting/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18874612192","text":"import json\nfrom flask import Blueprint, jsonify, request\nfrom models.Product.ProductStatus import ProductStatus\nfrom utils.db import db\nfrom utils.ma import ma\n\nproduct_status = Blueprint('product_status', __name__)\n\nclass StatusProductSchema(ma.Schema):\n class Meta:\n fields = ('id','name')\n \nstatus_schema = StatusProductSchema()\nmany_status_schema = StatusProductSchema(many=True)\n\n@product_status.route('/')\ndef list_product_status():\n try:\n status = ProductStatus.query.all()\n return many_status_schema.jsonify(status)\n except Exception as ex:\n return jsonify(messages=str(ex), context=3), 500\n \n@product_status.route('/', methods=['POST'])\ndef create_product_status():\n try:\n new_product_status = ProductStatus(request.json['name'])\n db.session.add(new_product_status)\n db.session.commit()\n return jsonify(messages='Elemento creado', context=0), 200\n except Exception as ex:\n return jsonify(messages=str(ex), context=3), 500\n \n@product_status.route('/', methods=['DELETE'])\ndef delete_product_status(id):\n try:\n product_status=ProductStatus.query.get(id)\n if product_status == None:\n return jsonify(messages='No existe un estado el usuario con este ID', context=2), 404\n db.session.delete(product_status)\n db.session.commit()\n return jsonify(messages='Elemento eliminado', context=0), 200\n except Exception as ex:\n return jsonify(messages=str(ex), context=3), 500\n \n@product_status.route('/', methods=['PUT'])\ndef update_product_status(id):\n try:\n product_status=ProductStatus.query.get(id)\n if product_status == None:\n return jsonify(messages='No existe un estado el usuario con este ID', context=2), 404\n product_status.name = request.json['name']\n db.session.commit()\n return jsonify(messages='Elemento actualizado', context=0), 200\n except Exception as ex:\n return jsonify(messages=str(ex), context=3), 500\n \n@product_status.route('/')\ndef get_product_status(id):\n try:\n product_status=ProductStatus.query.get(id)\n if product_status == None:\n return jsonify(messages='No existe un estado el usuario con este ID', context=2), 404\n return status_schema.jsonify(product_status)\n except Exception as ex:\n return jsonify(messages=str(ex), context=3), 500","repo_name":"santiagocano23181/fruitexco-backend","sub_path":"routes/Product/ProductStatus.py","file_name":"ProductStatus.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6059107731","text":"import json\nimport torch\nimport torch.nn as nn\nembedding = nn.Embedding(64000, 128)\nf = open(\"sample_data3.json\", \"r\")\ndata = json.load(f)\nf.close()\n\n\nclass SpatialEncoding(nn.Module):\n def __init__(self):\n super(SpatialEncoding, self).__init__()\n self.projector = nn.Linear(128, 128)\n\n def propagate(self, t, x, shifted_edge_indices, edge_weights):\n # n = embedding(torch.tensor(node_features[t]))\n n_hat = x.clone()\n updated = list()\n for i in range(len(shifted_edge_indices[t][0])):\n if i not in updated:\n n_hat[shifted_edge_indices[t][1][i]] = x[shifted_edge_indices[t][1][i]] + x[shifted_edge_indices[t][0][i]] * edge_weights[t][i]\n updated.append(shifted_edge_indices[t][1][i])\n else:\n n_hat[shifted_edge_indices[t][1][i]] = n_hat[shifted_edge_indices[t][1][i]] + x[shifted_edge_indices[t][0][i]] * edge_weights[t][i]\n\n del updated\n return self.projector(n_hat)\n\n def forward(self, node_features, shifted_edge_indices, edge_weights):\n node_embedding = []\n for timestep in range(len(shifted_edge_indices)):\n x = embedding(torch.tensor(node_features[timestep]))\n x = self.propagate(timestep, x, shifted_edge_indices, edge_weights)\n x = self.propagate(timestep, x, shifted_edge_indices, edge_weights)\n node_embedding.append(self.propagate(timestep, x, shifted_edge_indices, edge_weights))\n return node_embedding\n\n\nclass TemporalEncoding(nn.Module):\n def __init__(self, example, input_size, hidden_size, num_heads):\n super(TemporalEncoding, self).__init__()\n self.example = example\n self.num_heads = num_heads\n self.hidden_size = hidden_size\n self.head_size = hidden_size // num_heads\n self.nodes = set(example['edge_indices'][0][0]).union(set(example['edge_indices'][0][1]))\n self.spatial_encoding = SpatialEncoding()\n self.query = nn.Linear(input_size, self.num_heads * self.head_size)\n self.key = nn.Linear(input_size, self.num_heads * self.head_size)\n self.value = nn.Linear(input_size, self.num_heads * self.head_size)\n self.projector = nn.Linear(self.num_heads * self.head_size, 2)\n\n def concat_features(self, edge_indices, shifted_edge_indices, node_embedding):\n temporal_input = [[[] for timestep in range(len(node_embedding))] for i in range(len(self.nodes))]\n for node in self.nodes:\n for timestep in range(len(node_embedding)):\n if node in edge_indices[timestep][0]:\n index = edge_indices[timestep][0].index(node)\n n = shifted_edge_indices[timestep][0][index]\n temporal_input[node][timestep] = node_embedding[timestep][n]\n elif node in edge_indices[timestep][1]:\n index = edge_indices[timestep][1].index(node)\n n = shifted_edge_indices[timestep][1][index]\n temporal_input[node][timestep] = node_embedding[timestep][n]\n else:\n break\n\n for node in self.nodes:\n t = 0\n while t < len(temporal_input[node]):\n if temporal_input[node][t] == []:\n del temporal_input[node][t]\n else:\n t += 1\n return temporal_input\n\n def split_to_attention_head(self, tensors):\n new_shape = tensors.shape[: 2] + (self.num_heads, self.head_size) # spliting features for each head\n tensors = tensors.view(new_shape)\n return tensors.transpose(1, 2)\n\n def compute_attention(self, node_features, edge_indices, shifted_edge_indices, edge_weights):\n node_embedding = self.spatial_encoding(node_features, shifted_edge_indices, edge_weights)\n temporal_inputs = self.concat_features(edge_indices, shifted_edge_indices, node_embedding)\n\n node_embedding = list()\n for node in self.nodes:\n t = torch.cat((temporal_inputs[node][0], temporal_inputs[node][1]))\n for timestep in range(2, len(temporal_inputs[node])):\n t = torch.cat((t, temporal_inputs[node][timestep]))\n t = t.reshape(1, -1, 128)\n\n query = self.split_to_attention_head(self.query(t))\n key = self.split_to_attention_head(self.key(t))\n value = self.split_to_attention_head(self.value(t))\n\n scores = query.matmul(key.transpose(-1, -2)) / torch.sqrt(torch.tensor(self.head_size))\n attention_scores = scores.softmax(dim=-1).matmul(value)\n attention_scores = attention_scores.reshape(1, t.shape[1], -1)\n node_embedding.append(attention_scores)\n\n return node_embedding\n\n def forward(self):\n output = self.compute_attention(self.example['node_features'],\n self.example['edge_indices'],\n self.example['shifted_edge_indices'],\n self.example['edge_weights'])\n agg = torch.cat((output[0][0], output[1][0]))\n for node in range(2, len(output)):\n agg = torch.cat((agg, output[node][0]))\n agg = agg.transpose(1, 0).mean(dim=-1).reshape(-1)\n t = self.projector(agg)\n return t\n\nmodel = TemporalEncoding(data[0], 128, 128, 12)\noutput = model()\nprint(output.softmax(dim=-1).argmax(dim=-1))\n","repo_name":"AIhmed/TGNN_session_classfication","sub_path":"message_passing.py","file_name":"message_passing.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29707234129","text":"def see(x,lst):\n if min(lst)>=85:\n print(x)\n\ndef avg_total(x,lst):\n avg=sum(lst)/3\n total=sum(lst)\n print(\"{}的平均分为:{:.2f},总分为:{:.2f}\".format(x,avg,total))\n return(avg,total)\n\ndef sortt(dict):\n lsVK=[(sum(v),k) for k,v in dict.items()]\n lsVK.sort()\n return lsVK\n\ndict={'01':[67,88,45],'02':[97,68,85],'03':[97,98,95],'04':[67,48,45],'05':[82,58,75],'06':[96,49,65]}\nfor xuehao in dict:\n see(xuehao,dict[xuehao])\n\nfor xuehao in dict:\n avg_total(xuehao,dict[xuehao])\n\nlsVK=sortt(dict)\ndict_new = {}\nfor item in lsVK:\n dict_new[item[1]] = item[0]\nprint(dict_new) \n","repo_name":"bloodgather/learning_files","sub_path":"python/python基础/书后部分习题参考答案/书后6-5.py","file_name":"书后6-5.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"20230573859","text":"import requests\n\nfrom BiddingInfoSpider.spiders.base_spider import BaseSpider\nfrom BiddingInfoSpider.items import BiddinginfospiderItem\nimport scrapy\nfrom urllib.parse import urljoin\nfrom scrapy import FormRequest\n\n\nclass YiDongDianZi(BaseSpider):\n name = 'YiDongDianZi'\n allowed_domains = ['bidding.sinopec.com']\n start_urls = ['https://b2b.10086.cn/b2b/main/preIndex.html']\n website_name = '中国移动电子采购与招标投标系统'\n tmpl_url = 'https://b2b.10086.cn/b2b/main/showBiao!showZhaobiaoResult.html'\n endPageNum = 2\n headers = {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Connection\": \"keep-alive\",\n \"Content-Length\": \"122\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Cookie\": \"saplb_*=(J2EE204289720)204289752; JSESSIONID=0n4SKCHPKByVhhMLhVGvmOaZM3lZbgHYNi0M_SAPvqbC7Pbnqkvq23R4dnMX-j7J\",\n \"Host\": \"b2b.10086.cn\",\n \"Origin\": \"https://b2b.10086.cn\",\n \"Referer\": \"https://b2b.10086.cn/b2b/main/showBiao!preShowBiao.html?noticeType=list1\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n\n def __init__(self, *a, **kw):\n super(YiDongDianZi, self).__init__(*a, **kw)\n if not self.biddingInfo_update:\n self.endPageNum = 5\n\n def parse(self, response):\n for i in range(1, self.endPageNum):\n form_data = {\n \"page.currentPage\": str(i),\n \"page.perPageSize\": \"20\",\n \"noticeBean.companyName\": \"\",\n \"noticeBean.title\": \"\",\n \"noticeBean.startDate\": \"\",\n \"noticeBean.endDate\": \"\",\n\n }\n response = requests.post(self.tmpl_url, headers=self.headers, data=form_data)\n res = scrapy.Selector(text=response.text)\n li = res.xpath('//table[@class=\"jtgs_table\"]//tr')\n article_tmp_url = 'https://b2b.10086.cn/b2b/main/viewNoticeContent.html?noticeBean.id={0}'\n for l in li[1:]:\n item = BiddinginfospiderItem()\n a = l.xpath(\".//a\")\n id = l.xpath('@onclick').get()[14:-2]\n href = article_tmp_url.format(id)\n\n title = a.xpath('.//text()').get()\n item.update(\n title=title,\n href=href,\n )\n yield item\n","repo_name":"LeeeetMe/BiddingInfo","sub_path":"BiddingInfoSpider/spiders/国企/YiDongDianZi.py","file_name":"YiDongDianZi.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22709521777","text":"from util.args import *\nfrom util.data import get_dataloaders\nfrom util.net import get_network, freeze\nfrom util.visualize import gen_vis\nfrom util.analyse import *\nfrom util.save import *\nfrom prototree.train import train_epoch, train_epoch_kontschieder\nfrom prototree.test import eval_accuracy, eval_fidelity\nfrom prototree.prune import prune\nfrom prototree.project import project_with_class_constraints\nfrom prototree.upsample import upsample_prototypes\n\nimport torch\nfrom copy import deepcopy\n\n# Use onyl deterministic algorithms\ntorch.use_deterministic_algorithms(True)\n\n\ndef create_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser('Train a ProtoTree')\n add_prototree_init_args(parser)\n add_general_args(parser)\n add_training_args(parser)\n return parser\n\n\ndef run_tree(args: argparse.Namespace = None):\n args = get_args(create_parser()) if args is None else args\n\n resume = False\n if (os.path.exists(args.root_dir) and os.path.exists(args.root_dir+'/metadata')\n and load_args(args.root_dir+'/metadata') == args and os.path.exists(args.root_dir+'/checkpoints/latest')) \\\n or args.tree_dir != '':\n # Directory already exists and contains the same arguments => resume computation\n # Alternatively, checkpoint can be explicitely specified\n resume = True\n\n if os.path.exists(args.root_dir) and not resume and not args.force:\n raise ValueError(f'Output directory {args.root_dir} already exists. To overwrite, use --force option.')\n\n # Create a logger\n log = Log(args.root_dir, mode='a' if resume else 'w')\n print(\"Log dir: \", args.root_dir, flush=True)\n # Log the run arguments\n save_args(args, log.metadata_dir)\n device = args.device\n\n # Log which device was actually used\n log.log_message('Device used: ' + device)\n\n # Obtain the dataset and dataloaders\n trainloader, projectloader, testloader, classes, num_channels = get_dataloaders(\n dataset=args.dataset,\n projection_mode=args.projection_mode,\n batch_size=args.batch_size,\n device=args.device,\n )\n\n if not resume:\n # Create a convolutional network based on arguments and add 1x1 conv layer\n features_net, add_on_layers = get_network(\n net=args.net,\n init_mode=args.init_mode,\n num_features=args.num_features,\n )\n\n # Create a ProtoTree\n tree = ProtoTree(\n num_classes=len(classes),\n depth=args.depth,\n num_features=args.num_features,\n features_net=features_net,\n add_on_layers=add_on_layers,\n derivative_free=not args.disable_derivative_free_leaf_optim,\n kontschieder_normalization=args.kontschieder_normalization,\n kontschieder_train=args.kontschieder_train,\n log_probabilities=args.log_probabilities,\n focal_distance=args.focal_distance,\n H1=args.H1,\n W1=args.W1,\n )\n tree = tree.to(device)\n # Determine which optimizer should be used to update the tree parameters\n optimizer, params_to_freeze, params_to_train = get_optimizer(tree, args)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=args.milestones,\n gamma=args.gamma)\n log.log_message(\n \"Max depth %s, so %s internal nodes and %s leaves\" % (args.depth, tree.num_branches, tree.num_leaves))\n analyse_output_shape(tree, trainloader, log, device)\n\n leaf_labels = dict()\n best_train_acc = 0.\n best_test_acc = 0.\n\n save_checkpoint(\n f'{log.checkpoint_dir}/tree_init', tree, optimizer, scheduler, 0,\n best_train_acc, best_test_acc, leaf_labels, args)\n epoch = 1\n else:\n # Either latest checkpoint or the one pointed by args\n directory_path = log.checkpoint_dir+'/latest' if not args.tree_dir else args.tree_dir\n print('Resuming computation from ' + directory_path)\n tree, (optimizer, params_to_freeze, params_to_train), scheduler, stats = \\\n load_checkpoint(directory_path)\n tree.to(device)\n best_train_acc, best_test_acc, leaf_labels, epoch = stats\n # Go to the next epoch\n epoch += 1\n\n # Create a csv log for storing the test accuracy, mean train accuracy and mean loss for each epoch\n logged_values = ('test_acc', 'mean_total_loss', 'mean_train_acc')\n log.create_log('log_epoch_overview', 'epoch', *logged_values)\n\n if epoch < args.epochs+1:\n '''\n TRAIN AND EVALUATE TREE\n '''\n for epoch in range(epoch, args.epochs + 1):\n log.log_message(\"\\nEpoch %s\" % str(epoch))\n # Freeze (part of) network for some epochs if indicated in args\n freeze(epoch, params_to_freeze, params_to_train, args.freeze_epochs, log)\n log_learning_rates(optimizer, args, log)\n\n # Train tree\n if tree._kontschieder_train:\n train_info = train_epoch_kontschieder(\n tree, trainloader, optimizer, epoch,\n args.disable_derivative_free_leaf_optim, device)\n else:\n train_info = train_epoch(\n tree, trainloader, optimizer, epoch,\n args.disable_derivative_free_leaf_optim, device)\n # Update scheduler and leaf labels before saving checkpoints\n scheduler.step()\n leaf_labels = analyse_leafs(tree, epoch, len(classes), leaf_labels, args.pruning_threshold_leaves, log)\n\n # Update best train accuracy (if necessary)\n best_train_acc = save_best_train_tree(\n tree, optimizer, scheduler, epoch,\n train_info['train_accuracy'], best_train_acc, best_test_acc, leaf_labels, args, log)\n save_tree(\n tree, optimizer, scheduler, epoch,\n best_train_acc, best_test_acc, leaf_labels, args, log)\n\n # Evaluate tree\n if args.epochs <= 150 or epoch % 10 == 0 or epoch == args.epochs:\n eval_info = eval_accuracy(tree, testloader, f'Epoch {epoch}: ', device, log)\n original_test_acc = eval_info['test_accuracy']\n best_test_acc = save_best_test_tree(\n tree, optimizer, scheduler, epoch,\n best_train_acc, original_test_acc, best_test_acc, leaf_labels, args, log)\n stats = (original_test_acc, train_info['loss'], train_info['train_accuracy'])\n log.log_values('log_epoch_overview', epoch, *stats)\n else:\n stats = (\"n.a.\", train_info['loss'], train_info['train_accuracy'])\n log.log_values('log_epoch_overview', epoch, *stats)\n\n else: # tree was loaded and not trained, so evaluate only\n '''\n EVALUATE TREE\n '''\n # Readjust epoch index\n epoch = args.epochs\n original_test_acc = None\n if not args.skip_eval_after_training:\n eval_info = eval_accuracy(tree, testloader, f'Epoch {epoch}: ', device, log)\n original_test_acc = eval_info['test_accuracy']\n best_test_acc = save_best_test_tree(\n tree, optimizer, scheduler, epoch,\n best_train_acc, original_test_acc, best_test_acc, leaf_labels, args, log)\n stats = (original_test_acc, \"n.a.\", \"n.a.\")\n log.log_values('log_epoch_overview', epoch, *stats)\n\n '''\n EVALUATE AND ANALYSE TRAINED TREE\n '''\n log.log_message(\"Training Finished. Best training accuracy was %s, best test accuracy was %s\\n\"\n % (str(best_train_acc), str(best_test_acc)))\n trained_tree = deepcopy(tree)\n leaf_labels = analyse_leafs(tree, epoch+1, len(classes), leaf_labels, args.pruning_threshold_leaves, log)\n analyse_leaf_distributions(tree, log)\n\n '''\n PRUNE\n '''\n prune(tree, args.pruning_threshold_leaves, log)\n save_checkpoint(f'{log.checkpoint_dir}/pruned',\n tree, optimizer, scheduler, epoch, best_train_acc, best_test_acc, leaf_labels, args)\n pruned_tree = deepcopy(tree)\n # Analyse and evaluate pruned tree\n leaf_labels = analyse_leafs(tree, epoch+2, len(classes), leaf_labels, args.pruning_threshold_leaves, log)\n analyse_leaf_distributions(tree, log)\n pruned_test_acc = None\n if not args.skip_eval_after_training:\n eval_info = eval_accuracy(tree, testloader, \"Pruned tree\", device, log)\n pruned_test_acc = eval_info['test_accuracy']\n\n '''\n PROJECT\n '''\n proj_dir = os.path.join(args.root_dir, args.proj_dir)\n os.makedirs(proj_dir, exist_ok=True)\n project_info, tree = project_with_class_constraints(tree, projectloader, device, log)\n save_checkpoint(f'{proj_dir}/model/',\n tree, optimizer, scheduler, epoch, best_train_acc, best_test_acc, leaf_labels, args)\n pruned_projected_tree = deepcopy(tree)\n # Analyse and evaluate pruned tree with projected prototypes\n average_distance_nearest_image(project_info, tree, log)\n analyse_leafs(tree, epoch+3, len(classes), leaf_labels, args.pruning_threshold_leaves, log)\n analyse_leaf_distributions(tree, log)\n pruned_projected_test_acc = eval_info_samplemax = eval_info_greedy = fidelity_info = None\n if not args.skip_eval_after_training:\n eval_info = eval_accuracy(tree, testloader, \"Pruned and projected\", device, log)\n pruned_projected_test_acc = eval_info['test_accuracy']\n eval_info_samplemax = eval_accuracy(tree, testloader, \"Pruned and projected\", device, log, 'sample_max')\n get_avg_path_length(tree, eval_info_samplemax, log)\n eval_info_greedy = eval_accuracy(tree, testloader, \"Pruned and projected\", device, log, 'greedy')\n get_avg_path_length(tree, eval_info_greedy, log)\n fidelity_info = eval_fidelity(tree, testloader, device, log)\n\n # Upsample prototype for visualization\n upsample_prototypes(\n tree=tree,\n project_info=project_info,\n project_loader=projectloader,\n output_dir=os.path.join(proj_dir, \"upsampling\"),\n threshold=args.upsample_threshold,\n log=log,\n mode=args.upsample_mode,\n grads_x_input=args.grads_x_input,\n )\n # Save projection file\n torch.save(project_info, os.path.join(proj_dir, 'projection.pth'))\n # visualize tree\n gen_vis(tree, classes, proj_dir)\n\n return trained_tree.to('cpu'), pruned_tree.to('cpu'), pruned_projected_tree.to('cpu'), \\\n original_test_acc, pruned_test_acc, pruned_projected_test_acc, \\\n project_info, eval_info_samplemax, eval_info_greedy, fidelity_info\n\n\nif __name__ == '__main__':\n run_tree()\n","repo_name":"romain-xu-darme/prototype_sanity_checks","sub_path":"prototree/main_tree.py","file_name":"main_tree.py","file_ext":"py","file_size_in_byte":10792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"29042413992","text":"class Solution(object):\n def canPartition(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n # calculate the total sum of all the terms in nums\n total_sum = sum(nums)\n \n # if total sum is odd, then its impossible\n if total_sum % 2 == 1: \n return False\n \n target = total_sum / 2\n \n # dp array holds booleans which dictate whether a certain value(index) is reachable\n dp = [False] * (target + 1)\n # 0 is reachable\n dp[0] = True\n\n # check each number and modify dp to show whats reachable after you add that number\n for num in nums: \n # traverse backwards to avoid double counting numbers\n for i in range(target, num - 1, -1): \n dp[i] = dp[i] or dp[i - num]\n \n return dp[target]\n","repo_name":"warrenjx/leetcode_problems","sub_path":"Medium/0416.Partition_Equal_Subset_Sum.py","file_name":"0416.Partition_Equal_Subset_Sum.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74110666382","text":"from src.product.goods.regular_goods import *\n\nfrom src.product.offers.soup_bread_offer import SoupBreadOffer\nfrom src.product.offers.apples_offer import ApplesOffer\n\nAVAILABLE_OFFERS = [SoupBreadOffer(), ApplesOffer()]\n\n\nclass AvailableAssortment:\n \"\"\"Class which stores available assortment for our shop\n in form of mapping between the goods name and actual object\"\"\"\n\n NAME2GOOD_MAPPING: dict = {\n \"Apples\": Apples(),\n \"Bread\": Bread(),\n \"Milk\": Milk(),\n \"Soup\": Soup(),\n }\n","repo_name":"desireoftheother/product_store_assignment","sub_path":"src/product/available_assortment.py","file_name":"available_assortment.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12875244925","text":"import sys\n\n\ndef divide(num,den):\n try:\n res=num/den\n return res\n except ZeroDivisionError:\n print(\"Divide fun\")\n raise TypeError\n\ntry:\n num = int(input(\"Enter the numerator\"))\n den = int(input(\"Enter the numerator\"))\n\n result=divide(num,den)\n print(\"REs\",result)\n sys.exit(-1)\n\n\nexcept ValueError:\n print(\"value\")\n\nfinally:\n print(\"finally block\")\n\nprint(\"EOF\")\n","repo_name":"RaghavAwasthi/Lab","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32654871480","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nimport json\nimport re\n\ndef get_txt_from_url(request):\n try:\n url = request.args.get('url')\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.76',\n 'Referer': 'https://search.yahoo.co.jp/realtime/'\n }\n \n markdown_output_list = [] # ページごとの内容を格納する配列\n current_url = url\n\n while current_url:\n page = requests.get(current_url, headers=headers, verify=False)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # ここで各ページからテキストを抽出(詳細は後述)\n markdown_output_list.append(extract_content(soup))\n \n # 次のページのリンクを見つける\n next_page_tag = None\n for a_tag in soup.find_all('a', href=True):\n if re.search(r'次.*ページ|next.*page', a_tag.get_text(), re.IGNORECASE):\n next_page_tag = a_tag\n break\n \n if next_page_tag:\n current_url = next_page_tag.get('href')\n current_url = urljoin(url, current_url) # 相対パスを絶対URLに変換\n else:\n current_url = None\n\n output_json = json.dumps({\"url\": url, \"content\": markdown_output_list}, ensure_ascii=False)\n \n headers = {\n 'Access-Control-Allow-Origin': '*'\n }\n \n return (output_json, 200, headers)\n\n except requests.exceptions.RequestException as e:\n print(f\"Requests exception occurred: {e}, Exception Type: {e.__class__.__name__}\")\n error_json = json.dumps({\"error\": str(e)})\n headers = {\n 'Access-Control-Allow-Origin': '*'\n }\n return (error_json, 500, headers)\n except Exception as e:\n print(f\"General exception occurred: {e}, Exception Type: {e.__class__.__name__}\")\n error_json = json.dumps({\"error\": str(e)})\n headers = {\n 'Access-Control-Allow-Origin': '*'\n }\n return (error_json, 500, headers)\n\ndef extract_content(soup):\n # タイトルを抽出\n title = soup.title.string if soup.title else \"Untitled\"\n markdown_output = f\"# {title}\\n\"\n\n outputted_texts = set()\n\n # 主要なテキストを抽出\n for tag in soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'pre', 'li', 'table', 'strong', 'ol']):\n text = tag.get_text().strip()\n\n # 親が
  • タグであればスキップ\n if tag.find_parent(\"li\"):\n continue\n\n if text in outputted_texts:\n continue\n\n outputted_texts.add(text)\n \n if tag.name == 'h1':\n markdown_output += f\"# {tag.get_text().strip()}\\n\"\n elif tag.name == 'h2':\n markdown_output += f\"## {tag.get_text().strip()}\\n\"\n elif tag.name.startswith('h'):\n markdown_output += f\"### {tag.get_text().strip()}\\n\"\n elif tag.name == 'pre':\n pre_text = tag.get_text()\n pre_text = pre_text.replace('\\\\n', '\\n')\n markdown_output += f\"```\\n{pre_text}\\n```\\n\"\n elif tag.name == 'ol':\n for i, li_tag in enumerate(tag.find_all('li'), 1):\n markdown_output += f\"{i}. {li_tag.get_text().strip()}\\n\"\n elif tag.name == 'li':\n # 親が
      タグであればスキップ(既に処理済み)\n if tag.find_parent(\"ol\"):\n continue\n markdown_output += f\"- {tag.get_text().strip()}\\n\"\n elif tag.name == 'table':\n rows = tag.find_all('tr')\n for row in rows:\n cells = row.find_all(['td', 'th'])\n cell_texts = [cell.get_text().strip() for cell in cells]\n markdown_output += \"|\".join(cell_texts) + \"\\n\"\n if cells and all(cell.name == 'th' for cell in cells):\n markdown_output += \"|\".join([\"---\" for _ in cells]) + \"\\n\"\n elif tag.name == 'strong' or tag.name == 'b':\n markdown_output += f\"**{tag.get_text().strip()}**\\n\"\n else:\n markdown_output += f\"\\n{tag.get_text().strip()}\\n\\n\"\n\n return markdown_output\n","repo_name":"kloir-z/get_txt_from_url","sub_path":"get_txt_from_url.py","file_name":"get_txt_from_url.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15402282105","text":"\"\"\"\nPopulating Next Right Pointers In Each Node II\n\nGiven a binary tree\n\nstruct Node {\n int val;\n Node *left;\n Node *right;\n Node *next;\n}\nPopulate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.\n\nInitially, all next pointers are set to NULL.\n\n \n\nExample:\n\n\n\nInput: {\"$id\":\"1\",\"left\":{\"$id\":\"2\",\"left\":{\"$id\":\"3\",\"left\":null,\"next\":null,\"right\":null,\"val\":4},\"next\":null,\"right\":{\"$id\":\"4\",\"left\":null,\"next\":null,\"right\":null,\"val\":5},\"val\":2},\"next\":null,\"right\":{\"$id\":\"5\",\"left\":null,\"next\":null,\"right\":{\"$id\":\"6\",\"left\":null,\"next\":null,\"right\":null,\"val\":7},\"val\":3},\"val\":1}\n\nOutput: {\"$id\":\"1\",\"left\":{\"$id\":\"2\",\"left\":{\"$id\":\"3\",\"left\":null,\"next\":{\"$id\":\"4\",\"left\":null,\"next\":{\"$id\":\"5\",\"left\":null,\"next\":null,\"right\":null,\"val\":7},\"right\":null,\"val\":5},\"right\":null,\"val\":4},\"next\":{\"$id\":\"6\",\"left\":null,\"next\":null,\"right\":{\"$ref\":\"5\"},\"val\":3},\"right\":{\"$ref\":\"4\"},\"val\":2},\"next\":null,\"right\":{\"$ref\":\"6\"},\"val\":1}\n\nExplanation: Given the above binary tree (Figure A), your function should populate each next pointer to point to its next right node, just like in Figure B.\n \n\nNote:\n\nYou may only use constant extra space.\nRecursive approach is fine, implicit stack space does not count as extra space for this problem.\n\n\"\"\"\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, left, right, next):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\"\"\"\nThe algorithm is a BFS or level order traversal. We go through the tree level by level. node is the pointer in the parent level, tail is the tail pointer in the child level.\nThe parent level can be view as a singly linked list or queue, which we can traversal easily with a pointer.\nConnect the tail with every one of the possible nodes in child level, update it only if the connected node is not nil.\nDo this one level by one level. The whole thing is quite straightforward.\n\nTime: O(N)\n\"\"\"\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if not root:\n return root\n queue = collections.deque([root])\n while queue:\n node = queue.popleft()\n temp = collections.deque([node])\n while len(queue):\n node.next = queue[0]\n node = queue.popleft()\n temp.append(node)\n \n while temp:\n node = temp.popleft()\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return root\n\n","repo_name":"Bennyhwanggggg/Algorithm-and-Data-Structures-and-Coding-Challenges","sub_path":"Challenges/populatingNextRightPointerInEachNodeII.py","file_name":"populatingNextRightPointerInEachNodeII.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"21412116015","text":"from django.db import models\r\nimport re2 as re\r\n\r\n# Create your models here.\r\n\r\nINITIAL_RELATION_NAME = 'attend\\n'\r\nINITIAL_NETWORK_ID = 'UP000262375'\r\n\r\nclass Relation(models.Model):\r\n id = models.BigAutoField(primary_key=True)\r\n name = models.CharField(max_length=100)\r\n def __str__(self):\r\n return str(self.name) + ' (' + str(self.id) + ')'\r\n\r\nclass Friend(models.Model):\r\n id = models.BigAutoField(primary_key=True)\r\n network_id = models.CharField(max_length=11, unique=True)\r\n relations = models.ManyToManyField(Relation)\r\n def __str__(self):\r\n return str(self.id) + ' (' + str(self.network_id)[:-1] + ')'\r\n\r\nclass Person(models.Model):\r\n id = models.BigAutoField(primary_key=True)\r\n network_id = models.CharField(max_length=11, unique=True)\r\n name = models.CharField(max_length=100)\r\n friends = models.ManyToManyField(Friend)\r\n relations = models.ManyToManyField(Relation)\r\n\r\n def __str__(self):\r\n return str(self.name) + ' (' + str(self.network_id) + ')'\r\n\r\ndef init_relations_data():\r\n print('init relations data...')\r\n with open('data_files/verbs.txt', 'r') as file:\r\n verbs = file.readlines()\r\n with open('data_files/relations.txt', 'r') as file:\r\n relations = file.readlines()\r\n if len(verbs) == len(relations):\r\n Relation.objects.bulk_create([Relation(\r\n **{'id' : relations[i], 'name' : verbs[i]})\r\n for i in range(len(relations))\r\n ])\r\n else:\r\n print('ERROR: verbs.txt has not the same count of elements like relations.txt')\r\n\r\ndef init_friends_data():\r\n print('init friends data...')\r\n with open('data_files/networkIds.txt', 'r') as file:\r\n networkIds = file.readlines()\r\n Friend.objects.bulk_create([Friend(\r\n **{'id' : int('98' + networkId[2:]), 'network_id' : networkId[:-1]})\r\n for networkId in networkIds\r\n ])\r\n\r\ndef init_friends_relations_association():\r\n print('init friends <-> relations associations...')\r\n with open('data_files/network_file_no1.net', 'r') as file:\r\n data = file.read().replace('\\n', ' | ')\r\n relations = set(re.findall(r'[0-9]+ \\|', data))\r\n friends_relations = []\r\n i = 0\r\n for relation in relations:\r\n elementAndHisRelations = (set(re.findall(r'UP[0-9]*[0-9]\\t' + relation[:-2], data)))\r\n for element in elementAndHisRelations:\r\n item = (int('98' + element[2:11]), int(element[12:]))\r\n friends_relations.append(item)\r\n i += 1\r\n if i == 70000:\r\n print(\"almost there...\")\r\n Friend.relations.through.objects.bulk_create([\r\n Friend.relations.through(friend_id = f_id, relation_id = r_id)\r\n for (f_id, r_id) in friends_relations\r\n ])\r\n\r\ndef init_persons_data():\r\n print('init persons data...')\r\n with open('data_files/networkIds.txt', 'r') as file:\r\n networkIds = file.readlines()\r\n with open('data_files/persons.txt', 'r') as file:\r\n persons = file.readlines()\r\n if len(networkIds) == len(persons):\r\n Person.objects.bulk_create([Person(\r\n **{'id' : int('98' + networkIds[i][2:]), \r\n 'network_id' : networkIds[i][:-1], 'name' : persons[i][:-1]})\r\n for i in range(len(networkIds))\r\n ])\r\n else:\r\n print('ERROR: persons.txt has not the same count of elements like networkIds.txt')\r\n\r\ndef init_persons_relations_association():\r\n print('init persons <-> relations associations...')\r\n with open('data_files/network_file_no1.net', 'r') as file:\r\n data = file.read().replace('\\n', ' | ')\r\n relations = set(re.findall(r'[0-9]+ \\|', data))\r\n persons_relations = []\r\n i = 0\r\n for relation in relations:\r\n elementAndHisRelations = (set(re.findall(r'(UP[0-9]*[0-9]\\t)(?:UP[0-9]*[0-9]\\t)(' + relation[:-2] + ')', data)))\r\n for element in elementAndHisRelations:\r\n item = (int('98' + element[0][2:11]), int(element[1]))\r\n persons_relations.append(item)\r\n i += 1\r\n if i == 70000:\r\n print(\"almost there...\")\r\n Person.relations.through.objects.bulk_create([\r\n Person.relations.through(person_id = p_id, relation_id = r_id)\r\n for (p_id, r_id) in persons_relations\r\n ])\r\n\r\n \r\ndef init_data():\r\n RELATION_DATA_EXIST = Relation.objects.filter(name = INITIAL_RELATION_NAME).exists()\r\n FRIENDS_DATA_EXIST = Friend.objects.filter(network_id = INITIAL_NETWORK_ID).exists()\r\n PERSONS_DATA_EXIST = Person.objects.filter(network_id = INITIAL_NETWORK_ID).exists()\r\n if not RELATION_DATA_EXIST:\r\n init_relations_data()\r\n if not FRIENDS_DATA_EXIST:\r\n init_friends_data()\r\n init_friends_relations_association()\r\n if not PERSONS_DATA_EXIST:\r\n init_persons_data()\r\n init_persons_relations_association()\r\n\r\ntry:\r\n init_data()\r\nexcept:\r\n print('init database...')\r\n","repo_name":"mattyl006/network-file-exercise","sub_path":"backend/network_backend/network_api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29508491542","text":"from functools import partial, reduce\nimport operator\nimport xlsxwriter\nimport yaml\nimport os\nfrom sys import float_info\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nimport torch\nfrom torchnet.meter import AverageValueMeter\nimport logging\nfrom math import sqrt\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport distiller\n\nmsglogger = logging.getLogger()\n\n__all__ = [\n \"SummaryActivationStatsCollector\",\n \"RecordsActivationStatsCollector\",\n \"QuantCalibrationStatsCollector\",\n \"ActivationHistogramsCollector\",\n \"collect_quant_stats\",\n \"collect_histograms\",\n \"collector_context\",\n \"collectors_context\",\n]\n\n\nclass ActivationStatsCollector(object):\n \"\"\"Collect model activation statistics information.\n\n ActivationStatsCollector is the base class for classes that collect activations statistics.\n You may collect statistics on different phases of the optimization process (training, validation, test).\n\n Statistics data are accessible via .value() or by accessing individual modules.\n\n The current implementation has a few caveats:\n * It is slow - therefore it is advisable to use this only when needed.\n * It can't access the activations of torch.Functions, only torch.Modules.\n\n ActivationStatsCollector uses the forward hook of modules in order to access the\n feature-maps. This is both slow and limits us to seeing only the outputs of torch.Modules.\n We can remove some of the slowness, by choosing to log only specific layers or use it only\n during validation or test. This can be achieved using the `classes` argument.\n \"\"\"\n\n def __init__(self, model, stat_name, classes):\n \"\"\"\n Args:\n model - the model we are monitoring.\n stat_name - name for the statistics being collected.\n You can access a module's activation statistics by referring to module.\n For example:\n print(module.sparsity)\n classes - a list of class types for which we collect activation statistics.\n Passing an empty list or None will collect statistics for all class types.\n \"\"\"\n super(ActivationStatsCollector, self).__init__()\n self.model = model\n self.stat_name = stat_name\n self.classes = classes\n self.fwd_hook_handles = []\n\n # The layer names are mangled, because torch.Modules don't have names and we need to invent\n # a unique, human-readable name per layer.\n distiller.utils.assign_layer_fq_names(model)\n\n def value(self):\n \"\"\"Return a dictionary containing {layer_name: statistic}\"\"\"\n activation_stats = OrderedDict()\n self.model.apply(\n partial(self._collect_activations_stats, activation_stats=activation_stats)\n )\n return activation_stats\n\n def start(self):\n \"\"\"Start collecting activation stats.\n\n This will iteratively register the modules' forward-hooks, so that the collector\n will be called from the forward traversal and get exposed to activation data.\n \"\"\"\n assert len(self.fwd_hook_handles) == 0\n self.model.apply(self.start_module)\n\n def start_module(self, module):\n \"\"\"Iteratively register to the forward-pass callback of all eligible modules.\n\n Eligible modules are currently filtered by their class type.\n \"\"\"\n is_leaf_node = len(list(module.children())) == 0\n register_all_class_types = not self.classes\n if is_leaf_node and (\n register_all_class_types or (type(module) in self.classes)\n ):\n self.fwd_hook_handles.append(\n module.register_forward_hook(self._activation_stats_cb)\n )\n self._start_counter(module)\n\n def stop(self):\n \"\"\"Stop collecting activation stats.\n\n This will iteratively unregister the modules' forward-hooks.\n \"\"\"\n for handle in self.fwd_hook_handles:\n handle.remove()\n self.fwd_hook_handles = []\n\n def reset(self):\n \"\"\"Reset the statsitics counters of this collector.\"\"\"\n self.model.apply(self._reset_counter)\n return self\n\n def save(self, fname):\n raise NotImplementedError\n\n def _activation_stats_cb(self, module, input, output):\n \"\"\"Handle new activations ('output' argument).\n\n This is invoked from the forward-pass callback of module 'module'.\n \"\"\"\n raise NotImplementedError\n\n def _start_counter(self, module):\n \"\"\"Start a specific statistic counter - this is subclass-specific code\"\"\"\n raise NotImplementedError\n\n def _reset_counter(self, module):\n \"\"\"Reset a specific statistic counter - this is subclass-specific code\"\"\"\n raise NotImplementedError\n\n def _collect_activations_stats(self, module, activation_stats, name=\"\"):\n \"\"\"Handle new activations - this is subclass-specific code\"\"\"\n raise NotImplementedError\n\n\nclass SummaryActivationStatsCollector(ActivationStatsCollector):\n \"\"\"This class collects activiations statistical summaries.\n\n This Collector computes the mean of some statistic of the activation. It is rather\n light-weight and quicker than collecting a record per activation.\n The statistic function is configured in the constructor.\n \"\"\"\n\n def __init__(self, model, stat_name, summary_fn, classes=[torch.nn.ReLU]):\n super(SummaryActivationStatsCollector, self).__init__(model, stat_name, classes)\n self.summary_fn = summary_fn\n\n def _activation_stats_cb(self, module, input, output):\n \"\"\"Record the activation sparsity of 'module'\n\n This is a callback from the forward() of 'module'.\n \"\"\"\n try:\n getattr(module, self.stat_name).add(self.summary_fn(output.data))\n except RuntimeError as e:\n if \"The expanded size of the tensor\" in e.args[0]:\n raise ValueError(\n \"ActivationStatsCollector: a module ({} - {}) was encountered twice during model.apply().\\n\"\n \"This is an indication that your model is using the same module instance, \"\n \"in multiple nodes in the graph. This usually occurs with ReLU modules: \\n\"\n \"For example in TorchVision's ResNet model, self.relu = nn.ReLU(inplace=True) is \"\n \"instantiated once, but used multiple times. This is not permissible when using \"\n \"instances of ActivationStatsCollector.\".format(\n module.distiller_name, type(module)\n )\n )\n else:\n msglogger.info(\n \"Exception in _activation_stats_cb: {} {}\".format(\n module.distiller_name, type(module)\n )\n )\n raise\n\n def _start_counter(self, module):\n if not hasattr(module, self.stat_name):\n setattr(module, self.stat_name, AverageValueMeter())\n # Assign a name to this summary\n if hasattr(module, \"distiller_name\"):\n getattr(module, self.stat_name).name = \"_\".join(\n (self.stat_name, module.distiller_name)\n )\n else:\n getattr(module, self.stat_name).name = \"_\".join(\n (self.stat_name, module.__class__.__name__, str(id(module)))\n )\n\n def _reset_counter(self, module):\n if hasattr(module, self.stat_name):\n getattr(module, self.stat_name).reset()\n\n def _collect_activations_stats(self, module, activation_stats, name=\"\"):\n if hasattr(module, self.stat_name):\n mean = getattr(module, self.stat_name).mean\n if isinstance(mean, torch.Tensor):\n mean = mean.tolist()\n activation_stats[getattr(module, self.stat_name).name] = mean\n\n def save(self, fname):\n \"\"\"Save the records to an Excel workbook, with one worksheet per layer.\n \"\"\"\n fname = \".\".join([fname, \"xlsx\"])\n try:\n os.remove(fname)\n except OSError:\n pass\n\n records_dict = self.value()\n with xlsxwriter.Workbook(fname) as workbook:\n worksheet = workbook.add_worksheet(self.stat_name)\n col_names = []\n for col, (module_name, module_summary_data) in enumerate(\n records_dict.items()\n ):\n if not isinstance(module_summary_data, list):\n module_summary_data = [module_summary_data]\n worksheet.write_column(1, col, module_summary_data)\n col_names.append(module_name)\n worksheet.write_row(0, 0, col_names)\n return fname\n\n\nclass RecordsActivationStatsCollector(ActivationStatsCollector):\n \"\"\"This class collects activations statistical records.\n\n This Collector computes a hard-coded set of activations statistics and collects a\n record per activation. The activation records of the entire model (only filtered modules),\n can be saved to an Excel workbook.\n\n For obvious reasons, this is slower than SummaryActivationStatsCollector.\n \"\"\"\n\n def __init__(self, model, classes=[torch.nn.ReLU]):\n super(RecordsActivationStatsCollector, self).__init__(\n model, \"statistics_records\", classes\n )\n\n def _activation_stats_cb(self, module, input, output):\n \"\"\"Record the activation sparsity of 'module'\n\n This is a callback from the forward() of 'module'.\n \"\"\"\n\n def to_np(stats):\n if isinstance(stats, tuple):\n return stats[0].detach().cpu().numpy()\n else:\n return stats.detach().cpu().numpy()\n\n # We get a batch of activations, from which we collect statistics\n if not output.is_contiguous():\n output = output.contiguous()\n act = output.view(output.size(0), -1)\n batch_min_list = to_np(torch.min(act, dim=1)).tolist()\n batch_max_list = to_np(torch.max(act, dim=1)).tolist()\n batch_mean_list = to_np(torch.mean(act, dim=1)).tolist()\n # If activation contains only a single element, standard-deviation is meaningless (and std() returns NaN)\n # Return 0 instead\n if act.shape[0] == act.numel():\n batch_std_list = to_np(torch.zeros(act.shape[0])).tolist()\n else:\n batch_std_list = to_np(torch.std(act, dim=1)).tolist()\n batch_l2_list = to_np(torch.norm(act, p=2, dim=1)).tolist()\n\n module.statistics_records[\"min\"].extend(batch_min_list)\n module.statistics_records[\"max\"].extend(batch_max_list)\n module.statistics_records[\"mean\"].extend(batch_mean_list)\n module.statistics_records[\"std\"].extend(batch_std_list)\n module.statistics_records[\"l2\"].extend(batch_l2_list)\n module.statistics_records[\"shape\"] = distiller.size2str(output)\n\n @staticmethod\n def _create_records_dict():\n records = OrderedDict()\n for stat_name in [\"min\", \"max\", \"mean\", \"std\", \"l2\"]:\n records[stat_name] = []\n records[\"shape\"] = \"\"\n return records\n\n def save(self, fname):\n \"\"\"Save the records to an Excel workbook, with one worksheet per layer.\n \"\"\"\n fname = \".\".join([fname, \"xlsx\"])\n try:\n os.remove(fname)\n except OSError:\n pass\n\n records_dict = self.value()\n with xlsxwriter.Workbook(fname) as workbook:\n for module_name, module_act_records in records_dict.items():\n worksheet = workbook.add_worksheet(module_name)\n col_names = []\n for col, (col_name, col_data) in enumerate(module_act_records.items()):\n if col_name == \"shape\":\n continue\n worksheet.write_column(1, col, col_data)\n col_names.append(col_name)\n worksheet.write_row(0, 0, col_names)\n worksheet.write(0, len(col_names) + 2, module_act_records[\"shape\"])\n return fname\n\n def _start_counter(self, module):\n if not hasattr(module, \"statistics_records\"):\n module.statistics_records = self._create_records_dict()\n\n def _reset_counter(self, module):\n if hasattr(module, \"statistics_records\"):\n module.statistics_records = self._create_records_dict()\n\n def _collect_activations_stats(self, module, activation_stats, name=\"\"):\n if hasattr(module, \"statistics_records\"):\n activation_stats[module.distiller_name] = module.statistics_records\n\n\nclass _QuantStatsRecord(object):\n @staticmethod\n def create_records_dict():\n records = OrderedDict()\n records[\"min\"] = float_info.max\n records[\"max\"] = -float_info.max\n for stat_name in [\"avg_min\", \"avg_max\", \"mean\", \"std\"]:\n records[stat_name] = 0\n records[\"shape\"] = \"\"\n return records\n\n def __init__(self):\n # We don't know the number of inputs at this stage so we defer records creation to the actual callback\n self.inputs = []\n self.output = self.create_records_dict()\n\n\ndef _verify_no_dataparallel(model):\n if torch.nn.DataParallel in [type(m) for m in model.modules()]:\n raise ValueError(\n \"Model contains DataParallel modules, which can cause inaccurate stats collection. \"\n \"Either create a model without DataParallel modules, or call \"\n \"distiller.utils.make_non_parallel_copy on the model before invoking the collector\"\n )\n\n\nclass QuantCalibrationStatsCollector(ActivationStatsCollector):\n \"\"\"\n This class tracks activations stats required for quantization, for each layer and for each input\n and output. The tracked stats are:\n * Absolute min / max\n * Average min / max (calculate min / max per sample and average those)\n * Overall mean\n * Overall standard-deviation\n\n The generated stats dict has the following structure per-layer:\n 'layer_name':\n 'inputs':\n 0:\n 'min': value\n 'max': value\n ...\n ...\n n:\n 'min': value\n 'max': value\n ...\n 'output':\n 'min': value\n 'max': value\n ...\n Where n is the number of inputs the layer has.\n The calculated stats can be saved to a YAML file.\n\n If a certain layer operates in-place, that layer's input stats will be overwritten by its output stats.\n The collector can, optionally, check for such cases at runtime. In addition, a simple mechanism to disable inplace\n operations in the model can be used. See arguments details below.\n\n Args:\n model (torch.nn.Module): The model we are monitoring\n classes (list): List of class types for which we collect activation statistics. Passing an empty list or\n None will collect statistics for all class types.\n inplace_runtime_check (bool): If True will raise an error if an in-place operation is detected\n disable_inplace_attrs (bool): If True, will search all modules within the model for attributes controlling\n in-place operations and disable them.\n inplace_attr_names (iterable): If disable_inplace_attrs is enabled, this is the list of attribute name\n that will be searched for.\n\n TODO: Consider merging with RecordsActivationStatsCollector\n Current differences between the classes:\n * Track single value per-input/output-per-module for the entire run. Specifically, for standard deviation this\n cannot be done by tracking per-activation std followed by some post-processing\n * Track inputs in addition to outputs\n * Different serialization (yaml vs xlsx)\n \"\"\"\n\n def __init__(\n self,\n model,\n classes=None,\n inplace_runtime_check=False,\n disable_inplace_attrs=False,\n inplace_attr_names=(\"inplace\",),\n ):\n super(QuantCalibrationStatsCollector, self).__init__(\n model, \"quant_stats\", classes\n )\n\n _verify_no_dataparallel(model)\n\n self.batch_idx = 0\n self.inplace_runtime_check = inplace_runtime_check\n\n if disable_inplace_attrs:\n if not inplace_attr_names:\n raise ValueError(\"inplace_attr_names cannot by empty or None\")\n for m in model.modules():\n for n in inplace_attr_names:\n if hasattr(m, n):\n setattr(m, n, False)\n\n def _activation_stats_cb(self, module, inputs, output):\n def update_mean(old_mean, new_val):\n return old_mean + (new_val - old_mean) / module.batch_idx\n\n def update_std(values, old_std, old_mean, new_mean):\n # See here:\n # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_Online_algorithm\n numel = values.numel() if isinstance(values, torch.Tensor) else values.size\n total_values_so_far = numel * (module.batch_idx - 1)\n M = (old_std ** 2) * (total_values_so_far - 1)\n mean_diffs = (values - old_mean) * (values - new_mean)\n M += mean_diffs.sum()\n return sqrt((M / (total_values_so_far + numel - 1)).item())\n\n def update_record(record, tensor):\n if not tensor.is_contiguous():\n tensor = tensor.contiguous()\n act = tensor.view(tensor.size(0), -1)\n min_per_sample = act.min(dim=1)[0]\n max_per_sample = act.max(dim=1)[0]\n record[\"min\"] = min(record[\"min\"], min_per_sample.min().item())\n record[\"max\"] = max(record[\"max\"], max_per_sample.max().item())\n try:\n record[\"avg_min\"] = update_mean(\n record[\"avg_min\"], min_per_sample.mean().item()\n )\n record[\"avg_max\"] = update_mean(\n record[\"avg_max\"], max_per_sample.mean().item()\n )\n new_mean = update_mean(record[\"mean\"], act.mean().item())\n record[\"std\"] = update_std(\n tensor, record[\"std\"], record[\"mean\"], new_mean\n )\n except RuntimeError:\n record[\"avg_min\"] = update_mean(\n record[\"avg_min\"], min_per_sample.cpu().numpy().mean().item(0)\n )\n record[\"avg_max\"] = update_mean(\n record[\"avg_max\"], max_per_sample.cpu().numpy().mean().item(0)\n )\n new_mean = update_mean(record[\"mean\"], act.cpu().numpy().mean().item(0))\n record[\"std\"] = update_std(\n tensor.cpu().numpy(), record[\"std\"], record[\"mean\"], new_mean\n )\n record[\"mean\"] = new_mean\n\n if not record[\"shape\"]:\n record[\"shape\"] = distiller.size2str(tensor)\n\n if self.inplace_runtime_check and any(\n [id(input) == id(output) for input in inputs]\n ):\n raise RuntimeError(\n \"Inplace operation detected, meaning inputs stats are overridden by output stats. \"\n \"You can either disable this check or make sure no in-place operations occur. \"\n \"See QuantCalibrationStatsCollector class documentation for more info.\"\n )\n\n module.batch_idx += 1\n\n if not module.quant_stats.inputs:\n # Delayed initialization of inputs records, because only now we know the # of inputs\n for i in range(len(inputs)):\n module.quant_stats.inputs.append(\n _QuantStatsRecord.create_records_dict()\n )\n\n with torch.no_grad():\n for idx, input in enumerate(inputs):\n update_record(module.quant_stats.inputs[idx], input)\n update_record(module.quant_stats.output, output)\n\n def _start_counter(self, module):\n # We don't know the number of inputs at this stage so we defer records creation to the actual callback\n module.quant_stats = _QuantStatsRecord()\n module.batch_idx = 0\n\n def _reset_counter(self, module):\n # We don't know the number of inputs at this stage so we defer records creation to the actual callback\n module.quant_stats = _QuantStatsRecord()\n module.batch_idx = 0\n\n def _collect_activations_stats(self, module, activation_stats, name=\"\"):\n if distiller.utils.has_children(module):\n return\n if not hasattr(module, \"quant_stats\"):\n return\n\n activation_stats[module.distiller_name] = OrderedDict()\n if module.quant_stats.inputs:\n activation_stats[module.distiller_name][\"inputs\"] = OrderedDict()\n for idx, sr in enumerate(module.quant_stats.inputs):\n activation_stats[module.distiller_name][\"inputs\"][idx] = sr\n activation_stats[module.distiller_name][\"output\"] = module.quant_stats.output\n\n def save(self, fname):\n def ordered_dict_representer(self, value):\n return self.represent_mapping(\"tag:yaml.org,2002:map\", value.items())\n\n yaml.add_representer(OrderedDict, ordered_dict_representer)\n\n if not fname.endswith(\".yaml\"):\n fname = \".\".join([fname, \"yaml\"])\n try:\n os.remove(fname)\n except OSError:\n pass\n\n records_dict = self.value()\n with open(fname, \"w\") as f:\n yaml.dump(records_dict, f, default_flow_style=False)\n\n return fname\n\n\nclass ActivationHistogramsCollector(ActivationStatsCollector):\n \"\"\"\n This class collects activation histograms, for each layer and for each input and output tensor.\n It requires pre-computed min/max stats per tensor. This is done in order to prevent the need to save\n all of the activation tensors throughout the run. The histogram is created once according to these\n min/max values, and updated after each iteration. Any value outside the pre-computed range is clamped.\n\n The generated stats dict has the following structure per-layer:\n 'layer_name':\n 'inputs':\n 0:\n 'hist': tensor # Tensor with bin counts\n 'bin_centroids': tensor # Tensor with activation values corresponding to center of each bin\n ...\n n:\n 'hist': tensor\n 'bin_centroids': tensor\n 'output':\n 'hist': tensor\n 'bin_centroids': tensor\n Where n is the number of inputs the layer has.\n The generated stats dictionary can be saved to a file.\n Optionally, histogram images for all tensor can be saved as well\n\n Args:\n model (torch.nn.Module): The model we are monitoring\n activation_stats (str / dict): Either a path to activation stats YAML file, or a dictionary containing\n the stats. The stats are expected to be in the same structure as generated by QuantCalibrationStatsCollector.\n classes (list): List of class types for which we collect activation statistics. Passing an empty list or\n None will collect statistics for all class types.\n nbins (int): Number of histogram bins\n save_hist_imgs (bool): If set, calling save() will dump images of the histogram plots in addition to saving the\n stats dictionary\n hist_imgs_ext (str): The file type to be used when saving histogram images\n \"\"\"\n\n def __init__(\n self,\n model,\n activation_stats,\n classes=None,\n nbins=2048,\n save_hist_imgs=False,\n hist_imgs_ext=\".svg\",\n ):\n super(ActivationHistogramsCollector, self).__init__(model, \"histogram\", classes)\n\n _verify_no_dataparallel(model)\n\n if isinstance(activation_stats, str):\n if not os.path.isfile(activation_stats):\n raise ValueError(\n \"Model activation stats file not found at: \" + activation_stats\n )\n msglogger.info(\"Loading activation stats from: \" + activation_stats)\n with open(activation_stats, \"r\") as stream:\n activation_stats = distiller.utils.yaml_ordered_load(stream)\n elif not isinstance(activation_stats, (dict, OrderedDict)):\n raise TypeError(\n \"model_activation_stats must either be a string, a dict / OrderedDict or None\"\n )\n\n self.act_stats = activation_stats\n self.nbins = nbins\n self.save_imgs = save_hist_imgs\n self.imgs_ext = (\n hist_imgs_ext if hist_imgs_ext[0] == \".\" else \".\" + hist_imgs_ext\n )\n\n def _get_min_max(self, *keys):\n stats_entry = reduce(operator.getitem, keys, self.act_stats)\n return stats_entry[\"min\"], stats_entry[\"max\"]\n\n def _activation_stats_cb(self, module, inputs, output):\n def get_hist(t, stat_min, stat_max):\n # torch.histc doesn't work on integral data types, so convert if needed\n if t.dtype not in [torch.float, torch.double, torch.half]:\n t = t.float()\n t_clamped = t.clamp(stat_min, stat_max)\n hist = torch.histc(\n t_clamped.cpu(), bins=self.nbins, min=stat_min, max=stat_max\n )\n return hist\n\n with torch.no_grad():\n for idx, input in enumerate(inputs):\n stat_min, stat_max = self._get_min_max(\n module.distiller_name, \"inputs\", idx\n )\n curr_hist = get_hist(input, stat_min, stat_max)\n module.input_hists[idx] += curr_hist\n\n stat_min, stat_max = self._get_min_max(module.distiller_name, \"output\")\n curr_hist = get_hist(output, stat_min, stat_max)\n module.output_hist += curr_hist\n\n def _reset(self, module):\n num_inputs = len(self.act_stats[module.distiller_name][\"inputs\"])\n module.input_hists = module.input_hists = [\n torch.zeros(self.nbins) for _ in range(num_inputs)\n ]\n module.output_hist = torch.zeros(self.nbins)\n\n def _start_counter(self, module):\n self._reset(module)\n\n def _reset_counter(self, module):\n if hasattr(module, \"output_hist\"):\n self._reset(module)\n\n def _collect_activations_stats(self, module, activation_stats, name=\"\"):\n if distiller.utils.has_children(module):\n return\n if not hasattr(module, \"output_hist\"):\n return\n\n def get_hist_entry(min_val, max_val, hist):\n od = OrderedDict()\n od[\"hist\"] = hist\n bin_width = (max_val - min_val) / self.nbins\n od[\"bin_centroids\"] = torch.linspace(\n min_val + bin_width / 2, max_val - bin_width / 2, self.nbins\n )\n return od\n\n stats_od = OrderedDict()\n inputs_od = OrderedDict()\n for idx, hist in enumerate(module.input_hists):\n inputs_od[idx] = get_hist_entry(\n *self._get_min_max(module.distiller_name, \"inputs\", idx),\n module.input_hists[idx]\n )\n\n output_od = get_hist_entry(\n *self._get_min_max(module.distiller_name, \"output\"), module.output_hist\n )\n\n stats_od[\"inputs\"] = inputs_od\n stats_od[\"output\"] = output_od\n activation_stats[module.distiller_name] = stats_od\n\n def save(self, fname):\n hist_dict = self.value()\n\n if not fname.endswith(\".pt\"):\n fname = \".\".join([fname, \"pt\"])\n try:\n os.remove(fname)\n except OSError:\n pass\n\n torch.save(hist_dict, fname)\n\n if self.save_imgs:\n msglogger.info(\"Saving histogram images...\")\n save_dir = os.path.join(os.path.split(fname)[0], \"histogram_imgs\")\n if not os.path.isdir(save_dir):\n os.mkdir(save_dir)\n\n def save_hist(\n layer_name, tensor_name, idx, bin_counts, bin_centroids, normed=True\n ):\n if normed:\n bin_counts = bin_counts / bin_counts.sum()\n plt.figure(figsize=(12, 12))\n plt.suptitle(\n \"\\n\".join((layer_name, tensor_name)), fontsize=18, fontweight=\"bold\"\n )\n for subplt_idx, yscale in enumerate([\"linear\", \"log\"]):\n plt.subplot(2, 1, subplt_idx + 1)\n plt.fill_between(\n bin_centroids, bin_counts, step=\"mid\", antialiased=False\n )\n if yscale == \"linear\":\n plt.ylim(bottom=0)\n plt.title(yscale + \" scale\")\n plt.yscale(yscale)\n plt.xlabel(\"Activation Value\")\n plt.ylabel(\"Normalized Count\")\n plt.tight_layout(rect=[0, 0, 1, 0.93])\n idx_str = \"{:03d}\".format(idx)\n plt.savefig(\n os.path.join(\n save_dir,\n \"-\".join((idx_str, layer_name, tensor_name)) + self.imgs_ext,\n )\n )\n plt.close()\n\n cnt = 0\n for layer_name, data in hist_dict.items():\n for idx, od in data[\"inputs\"].items():\n cnt += 1\n save_hist(\n layer_name,\n \"input_{}\".format(idx),\n cnt,\n od[\"hist\"],\n od[\"bin_centroids\"],\n normed=True,\n )\n od = data[\"output\"]\n cnt += 1\n save_hist(\n layer_name,\n \"output\",\n cnt,\n od[\"hist\"],\n od[\"bin_centroids\"],\n normed=True,\n )\n msglogger.info(\"Done\")\n return fname\n\n\ndef collect_quant_stats(\n model,\n test_fn,\n save_dir=None,\n classes=None,\n inplace_runtime_check=False,\n disable_inplace_attrs=False,\n inplace_attr_names=(\"inplace\",),\n):\n \"\"\"\n Helper function for collecting quantization calibration statistics for a model using QuantCalibrationStatsCollector\n\n Args:\n model (nn.Module): The model for which to collect stats\n test_fn (function): Test/Evaluation function for the model. It must have an argument named 'model' that\n accepts the model. All other arguments should be set in advance (can be done using functools.partial), or\n they will be left with their default values.\n save_dir (str): Path to directory where stats YAML file will be saved. If None then YAML will not be saved\n to disk.\n classes (iterable): See QuantCalibrationStatsCollector\n inplace_runtime_check (bool): See QuantCalibrationStatsCollector\n disable_inplace_attrs (bool): See QuantCalibrationStatsCollector\n inplace_attr_names (iterable): See QuantCalibrationStatsCollector\n\n Returns:\n Dictionary with quantization stats (see QuantCalibrationStatsCollector for a description of the dictionary\n contents)\n \"\"\"\n msglogger.info(\"Collecting quantization calibration stats for model\")\n quant_stats_collector = QuantCalibrationStatsCollector(\n model,\n classes=classes,\n inplace_runtime_check=inplace_runtime_check,\n disable_inplace_attrs=disable_inplace_attrs,\n inplace_attr_names=inplace_attr_names,\n )\n with collector_context(quant_stats_collector):\n test_fn(model=model)\n msglogger.info(\"Stats collection complete\")\n if save_dir is not None:\n save_path = os.path.join(save_dir, \"acts_quantization_stats.yaml\")\n quant_stats_collector.save(save_path)\n msglogger.info(\"Stats saved to \" + save_path)\n\n return quant_stats_collector.value()\n\n\ndef collect_histograms(\n model,\n test_fn,\n save_dir=None,\n activation_stats=None,\n classes=None,\n nbins=2048,\n save_hist_imgs=False,\n hist_imgs_ext=\".svg\",\n):\n \"\"\"\n Helper function for collecting activation histograms for a model using ActivationsHistogramCollector.\n Will perform 2 passes - one to collect the required stats and another to collect the histograms. The first\n pass can be skipped by passing pre-calculated stats.\n\n Args:\n model (nn.Module): The model for which to collect histograms\n test_fn (function): Test/Evaluation function for the model. It must have an argument named 'model' that\n accepts the model. All other arguments should be set in advance (can be done using functools.partial), or\n they will be left with their default values.\n save_dir (str): Path to directory where histograms will be saved. If None then data will not be saved to disk.\n activation_stats (str / dict / None): Either a path to activation stats YAML file, or a dictionary containing\n the stats. The stats are expected to be in the same structure as generated by QuantCalibrationStatsCollector.\n If None, then a stats collection pass will be performed.\n classes: See ActivationsHistogramCollector\n nbins: See ActivationsHistogramCollector\n save_hist_imgs: See ActivationsHistogramCollector\n hist_imgs_ext: See ActivationsHistogramCollector\n\n Returns:\n Dictionary with histograms data (See ActivationsHistogramCollector for a description of the dictionary\n contents)\n \"\"\"\n msglogger.info(\"Pass 1: Stats collection\")\n if activation_stats is not None:\n msglogger.info(\n \"Pre-computed activation stats passed, skipping stats collection\"\n )\n else:\n activation_stats = collect_quant_stats(\n model,\n test_fn,\n save_dir=save_dir,\n classes=classes,\n inplace_runtime_check=True,\n disable_inplace_attrs=True,\n )\n\n msglogger.info(\"Pass 2: Histograms generation\")\n histogram_collector = ActivationHistogramsCollector(\n model,\n activation_stats,\n classes=classes,\n nbins=nbins,\n save_hist_imgs=save_hist_imgs,\n hist_imgs_ext=hist_imgs_ext,\n )\n with collector_context(histogram_collector):\n test_fn(model=model)\n msglogger.info(\"Histograms generation complete\")\n if save_dir is not None:\n save_path = os.path.join(save_dir, \"acts_histograms.pt\")\n histogram_collector.save(save_path)\n msglogger.info(\"Histogram data saved to \" + save_path)\n if save_hist_imgs:\n msglogger.info(\n \"Histogram images saved in \" + os.path.join(save_dir, \"histogram_imgs\")\n )\n\n return histogram_collector.value()\n\n\n@contextmanager\ndef collector_context(collector):\n \"\"\"A context manager for an activation collector\"\"\"\n if collector is not None:\n collector.reset().start()\n yield collector\n if collector is not None:\n collector.stop()\n\n\n@contextmanager\ndef collectors_context(collectors_dict):\n \"\"\"A context manager for a dictionary of collectors\"\"\"\n if len(collectors_dict) == 0:\n yield collectors_dict\n return\n for collector in collectors_dict.values():\n collector.reset().start()\n yield collectors_dict\n for collector in collectors_dict.values():\n collector.stop()\n\n\nclass TrainingProgressCollector(object):\n def __init__(self, stats={}):\n super(TrainingProgressCollector, self).__init__()\n object.__setattr__(self, \"_stats\", stats)\n\n def __setattr__(self, name, value):\n stats = self.__dict__.get(\"_stats\")\n stats[name] = value\n\n def __getattr__(self, name):\n if name in self.__dict__[\"_stats\"]:\n return self.__dict__[\"_stats\"][name]\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(type(self).__name__, name)\n )\n\n def value(self):\n return self._stats\n","repo_name":"anonymous47823493/EagleEye","sub_path":"distiller/data_loggers/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":36150,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"47"} +{"seq_id":"25637347371","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0018_siteconfiguration_payment_support_url'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='siteconfiguration',\n name='affiliate_cookie_name',\n field=models.CharField(default=b'', help_text='Name of cookie storing affiliate data.', max_length=255, verbose_name='Affiliate Cookie Name', blank=True),\n ),\n migrations.AddField(\n model_name='siteconfiguration',\n name='utm_cookie_name',\n field=models.CharField(default=b'', help_text='Name of cookie storing UTM data.', max_length=255, verbose_name='UTM Cookie Name', blank=True),\n ),\n ]\n","repo_name":"openedx/ecommerce","sub_path":"ecommerce/core/migrations/0019_auto_20161012_1404.py","file_name":"0019_auto_20161012_1404.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"47"} +{"seq_id":"3827588704","text":"from django.urls import path\n\nfrom . import views\n\n# 이 urls모듈의 app_name에 'posts를 사용\n\napp_name = 'posts'\n\nurlpatterns = [\n # posts.urls내의 패턴들은, prefix가 '/posts/'임\n path('', views.post_list, name='post-list'),\n path('create/', views.post_create, name='post-create'),\n path('/comments/create/', views.comment_create, name='comment-create'),\n path('tag-search/', views.tag_search, name='tag-search'),\n path('/like-toggle', views.post_like_toggle, name='post-like-toggle'),\n path('/delete/', views.post_delete, name='post-delete'),\n # path('', views.post_detail, name='post-detail'),\n]\n","repo_name":"orca9s/instagram","sub_path":"app/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33632385204","text":"from __future__ import unicode_literals\nimport codecs\nimport numpy as np\nfrom nltk.tag.perceptron import PerceptronTagger\nimport nltk\n\nclass DummyPosTagger:\n def tag(self, tokens):\n return [(token, \"DUMMY\") for token in tokens]\n\nclass NERTagger:\n def __init__(self):\n self.pos_tagger = PerceptronTagger()\n\n def tag(self, tokens):\n tree = nltk.ne_chunk(self.pos_tagger.tag(tokens))\n tagged_tokens = []\n for t in tree:\n if type(t) == nltk.tree.Tree:\n label = t.label()\n for token in t:\n tagged_tokens.append((token[0], label))\n else:\n tagged_tokens.append(t)\n return tagged_tokens\n","repo_name":"ttakamura/en_marker","sub_path":"lib/text_tagger.py","file_name":"text_tagger.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13552869442","text":"\nimport sys\n\n## Sets of a Pygame display and controller for the given Game with the given environment dimensions.\n#\n# `env_size` should be passed as a tuple of (width, height).\n#\n# Returns the gameDisplay from `PG.display.set_mode`. This is useful for\n# setting up a DrawTool later.\n#\ndef setup_pygame_window(sim, env_size, window_title='Pygame Window'):\n\timport pygame as PG\n\n\tPG.init()\n\tgameDisplay = PG.display.set_mode(env_size)\n\tPG.display.set_caption(window_title)\n\tdef render_pygame(*args):\n\t\tPG.display.update()\n\tsim.add_trigger('post_update_display', render_pygame)\n\n\t## Handles pygame events.\n\t#\n\t# Processes any received keypresses or mouse clicks.\n\t#\n\tdef handle_pygame_events():\n\t\tfor event in PG.event.get():\n\t\t\tif event.type == PG.QUIT:\n\t\t\t\tsim.quit()\n\t\t\telif event.type == PG.KEYDOWN:\n\t\t\t\tif event.key == PG.K_u:\n\t\t\t\t\tself.update_game_image()\n\t\t\t\telif event.key == PG.K_q:\n\t\t\t\t\tsim.quit()\n\t\t\t\telif event.key == PG.K_e:\n\t\t\t\t\tsim_display_every_frame = (not sim._display_every_frame)\n\t\t\t\telif event.key == PG.K_p:\n\t\t\t\t\tsim.pause()\n\t\t\t\telif event.key == PG.K_s:\n\t\t\t\t\tsim.step()\n\tsim.add_trigger('pre_frame', handle_pygame_events)\n\n\treturn gameDisplay\n\n\ndef create_default_cmdline_parser():\n\timport argparse\n\n\tparser = argparse.ArgumentParser(description=\"Safe Navigation simulator\", prog=sys.argv[0])\n\tparser.add_argument('--show-real-time-plot',\n\t\t\thelp='Show a real-time plot of PDFs',\n\t\t\tdest='show_real_time_plot',\n\t\t\tdefault=False,\n\t\t\taction='store_true'\n\t);\n\tparser.add_argument('--display-every-frame',\n\t\t\thelp='Display every frame ',\n\t\t\tdest='display_every_frame',\n\t\t\tdefault=False,\n\t\t\taction='store_true'\n\t);\n\tparser.add_argument('--unique-id',\n\t\t\thelp='A unique identifier for this simulation (printed in the CSV output). If omitted, a random identifier is generated.',\n\t\t\tdest='unique_id',\n\t\t\tdefault='',\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--robot-movement-momentum',\n\t\t\thelp='Momentum for robot movement (range 0 to 1, 0 means no momentum)',\n\t\t\tdest='robot_movement_momentum',\n\t\t\ttype=float,\n\t\t\tdefault=0.0,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--max-fps',\n\t\t\thelp='Max number of frames per second. Note that setting this to a small value will NOT improve the performance of the simulation, because it runs at one step per frame. For best performance, set this to a very high value, but low values may be useful for debugging.',\n\t\t\tdest='max_fps',\n\t\t\ttype=int,\n\t\t\tdefault=0,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--max-steps',\n\t\t\thelp='Maximum number of steps to take before terminating the simulation. Defaults to 1,000,000',\n\t\t\tdest='max_steps',\n\t\t\ttype=int,\n\t\t\tdefault=1000000,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--robot-speed',\n\t\t\thelp='Base speed of the robot',\n\t\t\tdest='robot_speed',\n\t\t\ttype=float,\n\t\t\tdefault=6,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--map-modifier-num',\n\t\t\thelp='Numeric ID of desired map modifier.',\n\t\t\tdest='map_modifier_num',\n\t\t\ttype=int,\n\t\t\tdefault=-1,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--speed-mode',\n\t\t\thelp='Speed mode of the Obstacles and the Robot',\n\t\t\tdest='speedmode',\n\t\t\ttype=int,\n\t\t\tdefault=1,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--radar-range',\n\t\t\thelp='Range of the rader, in pixels',\n\t\t\tdest='radar_range',\n\t\t\ttype=float,\n\t\t\tdefault=100,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--radar-resolution',\n\t\t\thelp='Resolution of the rader, in pixels',\n\t\t\tdest='radar_resolution',\n\t\t\ttype=float,\n\t\t\tdefault=4,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--map-name',\n\t\t\thelp='Name of the map file to test on',\n\t\t\tdest='map_name',\n\t\t\ttype=str,\n\t\t\tdefault='',\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--start-delay',\n\t\t\thelp='Number of seconds to wait before starting',\n\t\t\tdest='start_delay',\n\t\t\ttype=int,\n\t\t\tdefault=0,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--window-title',\n\t\t\thelp='What to set the window title to',\n\t\t\tdest='window_title',\n\t\t\ttype=str,\n\t\t\tdefault='Robot Simulator',\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--output-prng-state',\n\t\t\thelp='Include the starting state of the PRNG in the final output (pickled, encoded as base64)',\n\t\t\tdest='output_prng_state',\n\t\t\tdefault=False,\n\t\t\taction='store_true'\n\t);\n\tparser.add_argument('--prng-start-state',\n\t\t\thelp='Base64-encoded pickle of the starting state for the PRNG',\n\t\t\tdest='prng_start_state',\n\t\t\ttype=str,\n\t\t\tdefault=None,\n\t\t\taction='store'\n\t);\n\tparser.add_argument('--params-file',\n\t\t\thelp='JSON file containing values for various simulator/algorithm parameters',\n\t\t\tdest='params_file',\n\t\t\ttype=str,\n\t\t\tdefault='',\n\t\t\taction='store'\n\t);\n\n\n\treturn parser\n\n\ndef load_params(params_file):\n\timport json\n\n\tparams = {'robots': {}}\n\n\tif params_file is None or params_file == '':\n\t\treturn params\n\n\twith open(params_file) as f:\n\t\tparams = json.load(f)\n\n\tif 'robots' not in params:\n\t\tparams['robots'] = {}\n\n\treturn params\n","repo_name":"mdarcy220/safe-navigation","sub_path":"GameSetupUtils.py","file_name":"GameSetupUtils.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"16339959548","text":"import argparse\nimport json\nimport os\nfrom tqdm import tqdm\nfrom scipy.stats import chisquare\n\n\ndef find_missing(d1, d2):\n missing = dict()\n for tree, stats in tqdm(d1.items()):\n if tree not in d2.keys():\n missing[tree] = stats\n return missing\n\ndef compare_distributions(d1, d2):\n results = dict()\n expected_frequencies = list()\n observed_frequencies = list()\n total_d1 = sum([t['count'] for t in d1.values()])\n total_d2 = sum([t['count'] for t in d2.values()])\n missing_count = 0\n for tree, stats in tqdm(d1.items()):\n expected_frequencies.append((stats['count']/total_d1)*total_d2)\n if tree not in d2.keys():\n portion_of_test = stats['count']/total_d1\n portion_of_train = 0\n missing_count += 1\n results[tree] = {'prop_diff': portion_of_test,\n 'test_portion': portion_of_test,\n 'train_portion': portion_of_train,\n 'example': stats['texts'][0]\n }\n observed_frequencies.append(0)\n else:\n observed_frequencies.append(d2[tree]['count'])\n portion_of_test = stats['count']/total_d1\n portion_of_train = d2[tree]['count']/total_d2\n results[tree] = {'prop_diff': portion_of_test-portion_of_train,\n 'test_portion': portion_of_test,\n 'train_portion': portion_of_train,\n 'example': stats['texts'][0]\n }\n sorted_results = dict(sorted(results.items(), key=lambda item:\n abs(item[1]['prop_diff']),\n reverse=True))\n total_share_difference = sum([abs(v['prop_diff']) for v in\n sorted_results.values()])\n\n chisq, p = chisquare(observed_frequencies, expected_frequencies,\n ddof=len(expected_frequencies)-1)\n return {'results': {**sorted_results}, \n 'missing_proportion': missing_count/total_d1,\n 'test_length':len(d1), 'training_length':len(d2), \n 'total_difference': total_share_difference,\n 'chisq': chisq, 'p': p}\n\n\nif __name__ == \"__main__\":\n p = argparse.ArgumentParser()\n p.add_argument('-d', '--data_sets', type=str, required=True, nargs=2)\n p.add_argument('-o', '--output_path', type=str, required=True)\n p.add_argument('-f', '--full-comparison', dest='full', action='store_true')\n options = p.parse_args()\n test_set_fp = options.data_sets[0]\n train_set_fp = options.data_sets[1]\n with open(test_set_fp, 'r') as f:\n d1 = json.load(f)\n with open(train_set_fp, 'r') as f:\n d2 = json.load(f)\n if options.full:\n res = compare_distributions(d1, d2)\n file_name = f\"{test_set_fp.split('.')[0]}_{train_set_fp.split('.')[0]}_comparison.json\"\n with open(os.path.join(options.output_path, file_name), 'w') as f:\n json.dump(res, f, indent=2) \n else:\n missing = find_missing(d1, d2)\n with open(os.path.join(options.output_path, 'missing.json'), 'w') as f:\n json.dump(missing, f,indent=2) \n","repo_name":"adamjhawley/GraDiAn","sub_path":"src/gradian/compare_tree_counts.py","file_name":"compare_tree_counts.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70935600784","text":"# .. In previous homework task 4, you wrote a cache function that remembers other function output value.\n# .. Modify it to be a parametrized decorator, so that the following code::\n\n# .. @cache(times=3)\n# .. def some_function():\n# .. pass\n\n# .. Would give out cached value up to `times` number only.\n# .. Example::\n\n# .. @cache(times=2)\n# .. def f():\n# .. return input('? ') # careful with input() in python2, use raw_input() instead\n\n# .. >>> f()\n# .. ? 1\n# .. '1'\n# .. >>> f() # will remember previous value\n# .. '1'\n# .. >>> f() # but use it up to two times only\n# .. '1'\n# .. >>> f()\n# .. ? 2\n# .. '2'\n\nfrom typing import Callable\nfrom collections import defaultdict\n\n\ndef cache(times: int) -> Callable:\n if times < 0:\n raise ValueError(\"times must be non-negative\")\n\n def custom_hash(func: Callable):\n log = defaultdict(list)\n\n def my_wrapper(*args, **kwargs):\n if times == 0:\n return func(*args, **kwargs)\n if not args in log:\n log[args].append(1)\n log[args].append(func(*args, **kwargs))\n return log[args][1]\n if log[args][0] < times:\n log[args][0] += 1\n return log[args][1]\n return log.pop(args)[1]\n\n return my_wrapper\n\n return custom_hash\n\n\n# @cache(times=1)\n# def f():\n# return input('? ')\n\n\nif __name__ == \"__main__\":\n ...\n","repo_name":"Amudah41/EPAM_homeworks","sub_path":"hw3/tasks/task31.py","file_name":"task31.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10628357931","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.serializers import json\nfrom django.forms import model_to_dict\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.db.models import Q\nfrom collections import OrderedDict\n\nimport calendar\nfrom datetime import datetime, date, timedelta\n\n# Create your views here.\nfrom .models import Cost\nfrom .models import Plots\nfrom .models import PatientFinancial\nfrom .forms import CostForm\nfrom .forms import PlotForm\n\n@login_required\ndef list_costs(request, year=None, month=None):\n begin = end = None\n if not year and not month:\n range = calendar.monthrange(datetime.now().year, datetime.now().month)\n begin = datetime.now().replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n end = datetime.now().replace(day=range[1], hour=0, minute=0, second=0, microsecond=0)\n else:\n range = calendar.monthrange(year, month)\n begin = datetime.now().replace(day=1, month=month, year=year, hour=0, minute=0, second=0, microsecond=0)\n end = datetime.now().replace(day=range[1], month=month, year=year, hour=0, minute=0, second=0, microsecond=0)\n \n costs = Cost.objects.all()\n financier = {\"input\": 0, \"output\": 0, \"opened\": 0, \"balance\": 0}\n all = {}\n monthly_plots_pay = Plots.objects.filter(paid_day__range=[begin, end])\n monthly_plots = Plots.objects.filter(paid_day=None, date__range=[begin, end])\n monthly_costs = Cost.objects.filter(cost_type=True)\n for cost in monthly_costs:\n cost_as_plot = cost.as_plot(year, month)\n if not cost_as_plot.paid_day:\n if not (cost_as_plot.date in all.keys()):\n all[cost_as_plot.date] = []\n all[cost_as_plot.date].append(cost_as_plot)\n else:\n if not (cost_as_plot.date in all.keys()):\n all[cost_as_plot.paid_day] = []\n all[cost_as_plot.paid_day].append(cost_as_plot)\n\n\n # print(monthly_costs)\n for plot in monthly_plots_pay:\n if not (plot.paid_day in all.keys()):\n all[plot.paid_day] = []\n all[plot.paid_day].append(plot)\n for plot in monthly_plots:\n if not (plot.date in all.keys()):\n all[plot.date] = []\n all[plot.date].append(plot)\n all = OrderedDict(sorted(all.items(), key=lambda t: t[0], reverse=True))\n # print(all)\n for value in all.values():\n for plot in value:\n if plot.type == 1:\n if plot.paid_day:\n financier[\"input\"] = financier[\"input\"]+plot.price\n else:\n financier[\"opened\"] = financier[\"opened\"] + plot.price\n else:\n if plot.paid_day:\n if datetime.date(begin) < plot.paid_day < datetime.date(end):\n financier[\"output\"] = financier[\"output\"] + plot.price\n else:\n financier[\"opened\"] = financier[\"opened\"] - plot.price\n financier[\"balance\"] = financier[\"input\"]-financier[\"output\"]\n\n if request.method == \"GET\" and request.is_ajax():\n data = []\n for cost in costs:\n data.append(cost.as_dict())\n return JsonResponse(data, safe=False)\n return render(request, 'accounting/list.html', {'costs': costs, 'all': all, \"financier\": financier, 'today': date.today(), 'year': year, 'month': month})\n\n@login_required\ndef edit_plot(request, id):\n plot= Plots.objects.get(pk=id)\n old_plot_price= plot.price\n form = PlotForm(request.POST or None, request.FILES or None, instance=plot)\n if request.method ==\"POST\" and form.is_valid():\n patient_financial = plot.patient_financial\n plot = form.save()\n if plot.price==0:\n patient_financial.amount-=old_plot_price\n if plot.input:\n patient_financial.amount_paid=0\n plot.delete()\n patient_financial.num_plots-=1\n patient_financial.save()\n messages.add_message(request, messages.SUCCESS, 'Parcela Excluida Pois o Valor Estava Zerado!')\n return redirect('view_schedules', id=plot.patient_financial.consultation.id)\n\n if plot.price>old_plot_price:\n diff = plot.price-old_plot_price\n patient_financial.amount+=diff\n else:\n diff= old_plot_price-plot.price\n patient_financial.amount-=diff\n # print(\"Valor Antigo da Parcela:\"+str(old_plot_price))\n # print(\"Valor Atual da Parcela:\"+str(plot.price))\n # print(\"Diferença:\"+str(diff))\n if plot.input:\n patient_financial.amount_paid=plot.price\n patient_financial.save()\n messages.add_message(request, messages.SUCCESS, 'Valor da Parcela Alterado!')\n return redirect('view_schedules', id=plot.patient_financial.consultation.id)\n return render(request, 'plots/edit.html', {'form': form,'consultation_id':plot.patient_financial.consultation.id})\n\n@login_required\ndef new_cost(request):\n cost_form = CostForm(request.POST or None, request.FILES or None)\n if request.method == \"POST\":\n if cost_form.is_valid():\n cost = cost_form.save()\n if not cost.cost_type:\n if cost.payment_form:\n plots = Plots()\n plots.create(cost.amount, cost.payday, cost, 2)\n else:\n plots_price = cost.amount/cost.num_plots\n for i in range(0, cost.num_plots):\n plots = Plots()\n date = cost.payday + timedelta(days=(30 * (i + 1)))\n plots.create(plots_price, date, cost, 2)\n messages.add_message(request, messages.SUCCESS, 'Lançamento Cadastrado')\n else:\n cost.payday.replace(month=datetime.now().month)\n cost.save()\n return redirect('list_costs')\n return render(request, 'accounting/new.html', {'cost_form': cost_form})\n\n\n@login_required\ndef pay_plot(request, location, id, year=None, month=None):\n patient_financier = None\n if location == \"consultation\" or location == \"financier\":\n plot = Plots.objects.get(pk=id)\n plot.pay(datetime.now().date())\n messages.add_message(request, messages.SUCCESS, 'Pagamento da Parcela Realizado!')\n if plot.patient_financial:\n patient_financier = PatientFinancial.objects.get(pk=plot.patient_financial.id)\n else:\n cost = Cost.objects.get(pk=id)\n plot = cost.as_plot(year, month)\n plot.pay(datetime.now())\n if \"financier\" in location:\n return redirect('list_costs')\n return redirect('view_schedules', id=patient_financier.consultation.id)\n\n@login_required\ndef delete_cost(request, id):\n cost = Cost.objects.get(pk=id)\n if cost.delete():\n messages.add_message(request, messages.SUCCESS, 'Custo Deletado!')\n return redirect('list_costs')","repo_name":"gabrielhrp31/sisclin","sub_path":"financier/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"36410162678","text":"'''\nThis bot upvotes LINK SUBMISSION ONLY (replies too)\n\nCHANGELOG:\n-all reading/saving removed (ctrl-f SKIP READING FROM FILE)\n-latest visit_time is START TIME, no more save last voted\n'''\n\nimport praw\nimport time\nfrom bot_class import Bot_Instance\nfrom bot_class import Perma_to_subreddit\nfrom random import randint\n\nclass SubVoteBot(Bot_Instance):\n\n '''==================================================================//\n Init\n super(D, self).__init__()\n //=================================================================='''\n def __init__(self, threadID, user_agent, handler, latest_visit_time_txt):\n\n #login--------------------------------------//\n super(SubVoteBot, self).__init__(threadID, user_agent, handler)\n\n #Variables---------------------//\n self.latest_sub_visit_time = time.time() #the last submission created time in previous visit\n self.latest_sub_ID = 0 #D of latest visited post (So we do not go to it again)\n\n ''' SKIP READING FROM FILE\n #LATEST SUB VISIT TIME: load from save file----------------------//\n fo = open(latest_visit_time_txt, \"r\")\n full = fo.read(100).split('\\n')\n\n #if not first time, get the last latest time-------------------------//\n self.latest_sub_visit_time = float(full[0])\n self.latest_sub_ID = full[1]\n fo.close()\n\n #Get the latest time form previous visit---------------------------------------//\n self.latest_visit_time_txt = latest_visit_time_txt\n '''\n\n '''==================================================================//\n Check sub time if is skippable\n //=================================================================='''\n def Check_sub_time(self, sub):\n\n #if comment is too early, do not DV--------------------------------------------//\n if(sub.created_utc >= self.latest_sub_visit_time):\n return False\n return True\n\n '''==================================================================//\n Get reply\n //=================================================================='''\n def Reply(self, subname, comment):\n\n #the total karma given in string form------------------------------------//\n link_uv_str = str(Bot_Instance.link_uv_count[subname])\n comment_uv_str = str(Bot_Instance.comment_uv_count[subname])\n\n #overall-------------------------------------------------------------------//\n overall_link = 0\n overall_comment = 0\n for i in Bot_Instance.sub_list:\n overall_link += Bot_Instance.link_uv_count[i]\n overall_comment += Bot_Instance.comment_uv_count[i]\n\n reply_str = \"\"\n\n if(subname == 'fansOfHahahahut3' or subname == 'fansOfHahahahut4'):\n reply_str += \"Upvoted link post\\n\\n\"\n reply_str += \"##Karma given today:\\n\\n\"\n reply_str += \"#####\" + subname + \":\\n\\n\"\n reply_str += \">Link:`\" + link_uv_str + \"`\" + \" Comment:`\" + comment_uv_str + \"`\\n\\n\"\n reply_str += \"#####Overall:\\n\\n\"\n reply_str += \">Link:`\" + str(overall_link) + \"`\" + \" Comment:`\" + str(overall_comment) + \"`\\n\\n\"\n reply_str += \"***\\n\\n^FUCK ^UR ^LAO ^MA\"\n else:\n\n which_reply = randint(0,5)\n\n if(which_reply == 0):\n reply_str += \"Upvoted you:)\"\n elif(which_reply == 1):\n reply_str += \"Upvoted\"\n elif(which_reply == 2):\n reply_str += \"here catch\"\n elif(which_reply == 3):\n reply_str += \"Donee\"\n elif(which_reply == 4):\n reply_str += \"Have an upvote:)\"\n\n comment.add_comment(reply_str)\n\n '''==================================================================//\n main loop\n //=================================================================='''\n def run(self):\n\n #Call super----------------------------//\n super(SubVoteBot, self).run()\n\n sub_stream = praw.helpers.submission_stream(self.r, Bot_Instance.subs_string, 1)\n\n #loop thru submissions from earliest to latest--------------------------------//\n for c in sub_stream:\n\n if(not c.author): #if deleted\n continue\n\n #login access-----------------------------------------//\n self.o.refresh()\n\n #submission ID check-------------------------------//\n if( self.Check_sub_time(c) == True):\n #print(\"---skipping....\")\n continue\n\n #if like b4... do not go in again------------------------//\n #if(c.likes == True or c.likes == False):\n # continue\n if(c.id == self.latest_sub_ID): #test ID check\n continue\n\n\n #print title--------------------------------------------//\n if('https://www.reddit.com/r/' not in c.url):\n\n #reset data if end of day--------------------------//\n Bot_Instance.CheckData()\n\n subname = Perma_to_subreddit(c.permalink)\n #print(\"Sub Text: %s\" %c.permalink)\n\n c.upvote()\n Bot_Instance.link_uv_count[subname] += 1 #upvoted a link\n\n #reply comment made------------------------------//\n self.Reply(subname, c)\n\n #save and sleep for 2 secs-----------------------------------//\n #self.save_sub_time_and_data(c) #save latest submission time and data\n time.sleep(2)\n\n #else:\n #print(\"IS NOT LINK POST: %s\" %c.url)\n\n #exit---------------------------------//\n if(self.end_now == True):\n break\n\n '''==================================================================//\n Save data\n //=================================================================='''\n def save_sub_time_and_data(self, submission):\n\n self.latest_sub_visit_time = submission.created_utc\n self.latest_sub_ID = submission.id\n\n ''' SKIP READING FROM FILE\n #save the latest time----------------//\n fo = open(self.latest_visit_time_txt, \"w\") #write the updated index to txt file\n fo.write(str(self.latest_sub_visit_time) + '\\n')\n fo.write(str(self.latest_sub_ID))\n fo.close()\n '''\n\n #save the latest data---------------------------//\n Bot_Instance.CheckData()\n\n '''==================================================================//\n Exit\n //=================================================================='''\n def exit(self):\n\n #Call super----------------------------//\n super(SubVoteBot, self).exit()","repo_name":"Mamama22/VoteBot","sub_path":"bot_vote_sub.py","file_name":"bot_vote_sub.py","file_ext":"py","file_size_in_byte":6723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5464542475","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 4 13:21:39 2020\r\n\r\n@author: HenrryPuente\r\n\"\"\"\r\ncinicial=int(input(\"contador inicial : \"))\r\ncfinal=int(input(\"contador final : \"))\r\nif cfinal 0:\n\t\tfor (x, y, w, h) in faces:\n\t\t\tcropped = frame[y - int(h / 10):y + h + int(h/10), x - int(w / 10):x + w + int(w / 10)]\n\t\t\tcv2.imwrite(\"./face_img/\" + str(datetime.now()) + \".png\", cropped)\n\t\tbreak\n\n\t# Display the resulting frame\n\tcv2.imshow('frame', frame)\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\ncv2.imshow('cropped', cropped)\ncv2.waitKey(0)\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"yoojihwan/2019_winter_internship","sub_path":"live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5882194859","text":"# Michelle Liang, liangmic@usc.edu\n# ITP 115, Spring 2020\n# Assignment 1\n\n# Description:\n# This program creates a Mad Libs story by taking the players input and printing it out.\n\ndef main():\n # Gather input in string form\n number = input(\"Enter an integer: \") # needs converting to integer and then back to string\n animal = input(\"Enter an animal (plural if number entered is greater than 1): \")\n verb = input(\"Enter a verb that ends with -ing: \")\n place = input(\"Enter a place: \")\n name = input(\"Enter a name: \")\n adjective = input(\"Enter an adjective: \")\n number2 = input(\"Enter another integer: \")\n number3 = input(\"Enter another integer: \")\n number4 = input(\"Enter another integer that is smaller than the previous one: \")\n decimal = input(\"Enter a number that includes a decimal: \")\n\n # math\n result = int(number3) - int(number2)\n\n # convert integers and floats to string\n number = str(number)\n number2 = str(number2)\n number3 = str(number3)\n decimal = str(decimal)\n result = str(result)\n\n # Print the statement - filled in\n print(\"\\nThere were \\\"\" + number + \"\\\" young \\\"\" + animal + \"\\\" that loved going out and \\\"\" + verb + \"\\\" around\"\n \" \\\"\" + place + \"\\\". \\nOne day they saw Mrs.\\\"\" + name + \"\\\" and decided to stop to ask her a question. \\n\"\n \"They asked: \\\"Why are you not \\\"\" + adjective + \"\\\"?\\\" \\n\"\n \"She replied: \\\"Back in my day, when I was born \\\"\"\n + number2 + \"\\\" years ago, only \\\"\" + number3 + \"\\\" people dared to ask me this question.\"\n \"\\nNow only \\\"\" + number4 + \"\\\" of them still live today. That means that I got rid of \\\"\" + result +\n \"\\\" of them.\\\" \\nThe children paled in fear and promised to stay a minimum of \\\"\"\n + decimal + \"\\\" kilometers away from her forever.\")\n\nmain()\n","repo_name":"mliang810/itp115","sub_path":"Assignments/A1.MadLibs.py","file_name":"A1.MadLibs.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29756528701","text":"#\n# [3] Longest Substring Without Repeating Characters\n#\n# https://leetcode.com/problems/longest-substring-without-repeating-characters/description/\n#\n# algorithms\n# Medium (25.65%)\n# Total Accepted: 684.2K\n# Total Submissions: 2.7M\n# Testcase Example: '\"abcabcbb\"'\n#\n# Given a string, find the length of the longest substring without repeating\n# characters.\n#\n#\n# Example 1:\n#\n#\n# Input: \"abcabcbb\"\n# Output: 3\n# Explanation: The answer is \"abc\", with the length of 3.\n#\n#\n#\n# Example 2:\n#\n#\n# Input: \"bbbbb\"\n# Output: 1\n# Explanation: The answer is \"b\", with the length of 1.\n#\n#\n#\n# Example 3:\n#\n#\n# Input: \"pwwkew\"\n# Output: 3\n# Explanation: The answer is \"wke\", with the length of 3.\n# ⁠ Note that the answer must be a substring, \"pwke\" is a\n# subsequence and not a substring.\n#\n#\n#\n#\n#\n#\nclass Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s) == 0:\n return 0\n dp = [0] * len(s)\n ss = {}\n for idx, x in enumerate(s):\n if idx == 0:\n dp[0] = 1\n ss[x] = 0\n continue\n if x not in ss.keys():\n dp[idx] = dp[idx-1] + 1\n else:\n dp[idx] = min(dp[idx-1] + 1, idx - ss[x])\n ss[x] = idx\n return max(dp)\n","repo_name":"dwy927/leetcode","sub_path":"medium/3.longest-substring-without-repeating-characters/3.longest-substring-without-repeating-characters.python3.py","file_name":"3.longest-substring-without-repeating-characters.python3.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29987809474","text":"#Import the folium package for making maps\nimport folium\n\n#Create a map, centered (0,0), and zoomed out a bit:\nmapWorld = folium.Map(location=[0, 0],zoom_start=3)\n\nfolium.Marker(location = [23.777176, 90.399452], popup = \"Where I'm from\").add_to(mapWorld)\nfolium.Marker(location = [35.964668, -83.926453], popup = \"My first home in the US\").add_to(mapWorld)\nfolium.Marker(location = [40.730610, -73.935242], popup = \"Where I currently live\").add_to(mapWorld)\n\naline=folium.PolyLine(locations=[(23.777176, 90.399452),(35.964668, -83.926453)],weight=2,color = 'blue')\nbline=folium.PolyLine(locations=[(35.964668, -83.926453), (40.230610, -74.935242)],weight=2,color = 'blue')\n\n\n#Save the map:\nmapWorld.save(outfile='Map.html')","repo_name":"tnameera/website","sub_path":"files/foliumCode.py","file_name":"foliumCode.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74951486906","text":"from django import template\nfrom django.template import Library, loader, Context\nfrom django.contrib.sites.models import Site\n\nimport markdown\n\nregister = template.Library()\n\n@register.simple_tag\ndef build_section(section):\n objects = None\n project = section.project\n\n if section.section.content_type:\n objects = section.section.content_type.model_class().objects.filter(project=project, published=True)\n\n\n t = template.Template(section.template.content)\n return t.render(Context({\n 'project': project,\n 'title': section.section.title,\n 'backgroun_color': section.section.background_color,\n 'icon': section.section.icon,\n 'order': section.order,\n 'objects': objects\n }))\n\n\n@register.filter\ndef markdownify(text):\n return markdown.markdown(text)\n","repo_name":"argaen/django_zonecms","sub_path":"djzone_cms/templatetags/project_tags.py","file_name":"project_tags.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43612522286","text":"import json\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import TemplateView\nfrom django.views.generic.edit import FormMixin\n\nfrom .models import Landing\n\n\nclass LandingView(TemplateView):\n\n def get(self, request, *args, **kwargs):\n self.piece = request.META[\"PATH_INFO\"].split('/')[2]\n self.object = Landing.objects.get(slug__exact=self.piece)\n self.template_name = self.object.get_template_path\n return super(LandingView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['object'] = self.object\n css = self.object.template.static_files.filter(file_type=0)\n context['css_static_files'] = [{'file': x.make_file_path} for x in css]\n js = self.object.template.static_files.filter(file_type=2)\n context['js_static_files'] = [{'file': x.make_file_path} for x in js]\n images = self.object.landingimage_set.all()\n for i in images:\n context[f'back{i.position}'] = i\n return context\n","repo_name":"CrazyChief/album_agency","sub_path":"album_agency/landings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38977186851","text":"'''\n\n使用多项式特征的线性SVM分类器\n\n'''\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_moons\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import PolynomialFeatures\n\nX,y = make_moons(n_samples=100,noise=0.15,random_state=42)\nprint(X)\nprint(y)\n# 搭建一个流水线\npolynomial_svm_clf = Pipeline([\n ('poly_features',PolynomialFeatures(degree=3)), # 3阶多项式\n ('scaler',StandardScaler()),\n ('svm_clf',LinearSVC(C=10,loss='hinge',random_state=42))\n])\n\npolynomial_svm_clf.fit(X,y)\n\n# 绘制样本点\ndef plot_dataset(X,y,axes):\n # 分类为0的蓝色方块\n plt.plot(X[:,0][y==0],X[:,1][y==0],'bs')\n # 分类为1的绿色三角\n plt.plot(X[:,0][y==1],X[:,1][y==1],'g^')\n plt.axis(axes)\n plt.grid(True)\n plt.xlabel('$x_1$',fontsize=18)\n plt.ylabel('$x_2$', fontsize=18,rotation=0)\n# 用于绘制分类曲线\ndef plot_predictions(clf,axes):\n x0s = np.linspace(axes[0],axes[1],100)\n x1s = np.linspace(axes[2],axes[3],100)\n x0,x1 = np.meshgrid(x0s,x1s)\n X = np.c_[x0.ravel(),x1.ravel()]\n y_pred = clf.predict(X).reshape(x0.shape)\n y_decision = clf.decision_function(X).reshape(x0.shape)\n plt.contourf(x0,x1,y_pred,cmap=plt.cm.brg,alpha=0.2)\nplot_predictions(polynomial_svm_clf,[-1.5,2.5,-1,1.5])\nplot_dataset(X,y,[-1.5,2.5,-1,1.5])\n\nplt.show()\nprint(polynomial_svm_clf.predict([[-0.75,1],[0.5,-0.5]]))\n\n\n\n","repo_name":"geekori/sklearn","sub_path":"src/svm/使用多项式特征的线下SVM分类器.py","file_name":"使用多项式特征的线下SVM分类器.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"1097478474","text":"import tkinter as tk\r\n\r\n\r\n# Defining a Fuction to calculate gst \r\ndef gst():\r\n net_price = float(net_price_entry.get())\r\n orignal_cost = float(orignal_cost_entry.get())\r\n gst_rate = ((net_price - orignal_cost) * 100) / orignal_cost\r\n gst_result_label.config(text=f\"GST Rate: {gst_rate}%\")\r\n\r\n# Making a window\r\nwindow = tk.Tk()\r\n\r\n# Making a label to get entry for net price from the user \r\nnet_price_label = tk.Label(text=\"Enter Net Price\", width=40)\r\nnet_price_label.pack()\r\n\r\nnet_price_entry = tk.Entry()\r\nnet_price_entry.pack()\r\n\r\n# Making a label to get entry for the orignal cost from the user\r\norignal_cost_label = tk.Label(text='Original cost', width=40)\r\norignal_cost_label.pack()\r\n\r\norignal_cost_entry = tk.Entry()\r\norignal_cost_entry.pack()\r\n\r\n# Creating a button to execute the function to calculate gst\r\ngst_button = tk.Button(text=\"GST Tax Calculator\", command= gst)\r\ngst_button.pack()\r\n\r\n# creating a label to display the results \r\ngst_result_label = tk.Label(window)\r\ngst_result_label.pack()\r\n\r\nwindow.mainloop()","repo_name":"divyamrana22/ITC-7","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43965993603","text":"import unittest\n\nfrom dummyserver.testcase import HTTPDummyServerTestCase\nfrom urllib3.poolmanager import PoolManager\nfrom urllib3.connectionpool import port_by_scheme\nfrom urllib3.exceptions import MaxRetryError\n\n\nclass TestPoolManager(HTTPDummyServerTestCase):\n base_url = 'http://%s:%d' % (HTTPDummyServerTestCase.host, HTTPDummyServerTestCase.port)\n base_url_alt = 'http://%s:%d' % (HTTPDummyServerTestCase.host_alt, HTTPDummyServerTestCase.port)\n\n def test_redirect(self):\n http = PoolManager()\n\n r = http.request('GET', '%s/redirect' % self.base_url,\n fields={'target': '%s/' % self.base_url},\n redirect=False)\n\n self.assertEqual(r.status, 303)\n\n r = http.request('GET', '%s/redirect' % self.base_url,\n fields={'target': '%s/' % self.base_url})\n\n self.assertEqual(r.status, 200)\n self.assertEqual(r.data, b'Dummy server!')\n\n def test_cross_host_redirect(self):\n http = PoolManager()\n\n cross_host_location = '%s/echo?a=b' % self.base_url_alt\n try:\n http.request('GET', '%s/redirect' % self.base_url,\n fields={'target': cross_host_location},\n timeout=0.01, retries=1)\n self.fail(\"Request succeeded instead of raising an exception like it should.\")\n\n except MaxRetryError:\n pass\n\n r = http.request('GET', '%s/redirect' % self.base_url,\n fields={'target': '%s/echo?a=b' % self.base_url_alt},\n timeout=0.01, retries=2)\n\n self.assertEqual(r._pool.host, self.host_alt)\n\n def test_missing_port(self):\n # Can a URL that lacks an explicit port like ':80' succeed, or\n # will all such URLs fail with an error?\n\n http = PoolManager()\n\n # By globally adjusting `port_by_scheme` we pretend for a moment\n # that HTTP's default port is not 80, but is the port at which\n # our test server happens to be listening.\n port_by_scheme['http'] = self.port\n try:\n r = http.request('GET', 'http://%s/' % self.host, retries=0)\n finally:\n port_by_scheme['http'] = 80\n\n self.assertEqual(r.status, 200)\n self.assertEqual(r.data, b'Dummy server!')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jtlai0921/urllib3","sub_path":"test/with_dummyserver/test_poolmanager.py","file_name":"test_poolmanager.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71082491067","text":"import LinearModels\nfrom Constants import *\nfrom numpy import matmul\nimport numpy as np\nfrom math import pi, sqrt\nfrom numpy import exp\nimport time\n\n'''\nMain IMM Kalman filter class. Contains the methods to mix, predict and update the states.\n'''\n\n\ndef predict(s, Q, model, delta):\n #Returns sp, qp\n\n if model == MODEL_CVS:\n F,G = LinearModels.cvs(s, delta)\n QW = CVS_QW\n\n if model == MODEL_CTR:\n F,G = LinearModels.ctr(s, delta)\n QW = CTR_QW\n\n if model == MODEL_VOID:\n F,G = LinearModels.no_object(s, delta)\n QW = VOID_QW\n \n\n s2 = matmul(F,s)\n Q2 = matmul(F, matmul(Q, F.transpose())) + matmul(G, matmul(QW, G.transpose()))\n return s2, Q2\n\n \ndef update(s, z, Q, model, delta):\n #Returns su, Qu\n\n if model == MODEL_CVS:\n H = CVS_H\n QN = CVS_QN\n\n if model == MODEL_CTR:\n H = CTR_H\n QN = CTR_QN\n\n if model == MODEL_VOID:\n H = VOID_H\n QN = VOID_QN\n\n #Kalman gain\n V = matmul(H, matmul(Q, H.transpose())) + QN\n Vinv = np.linalg.inv(V)\n\n K = matmul(Q, matmul(H.transpose(), Vinv))\n\n\n # s and Q update\n zp = matmul(H,s)\n z_tilde = z-zp\n s2 = s + matmul(K, z_tilde)\n Q2 = Q - matmul(K, matmul(H, Q))\n\n #Computing model log-likelihood\n n = max(z.shape)\n l = np.log( 1/sqrt((2*pi)**n * np.linalg.det(V)) ) + matmul( -0.5 * z_tilde.transpose(), matmul(Vinv, z_tilde) )\n\n return s2, Q2, l\n\n\n\n\n\nclass KalmanIMM:\n\n def __init__(self, logger):\n\n self.nModels = 3\n self.states = []\n self.covariances = []\n\n #Logging file\n self.logger = logger\n\n self.resetState(0,0)\n\n self.cputime = 0\n self.steps=0\n \n\n\n\n def compute(self, x, y, delta):\n #Function called when new measurement received\n now = time.time()\n\n z = np.matrix([[x], [y]])\n\n #MIXING\n sfm, Qfm, pm = self.mix()\n\n #PREDICTION\n for i in range(self.nModels):\n s, Q = predict(sfm[i], Qfm[i], i, delta)\n self.states[i] = s\n self.covariances[i] = Q\n\n #UPDATE\n for i in range(self.nModels):\n s, Q, l = update(self.states[i], z, self.covariances[i], i, delta)\n self.states[i] = np.asarray(s)\n self.covariances[i] = np.asarray(Q)\n self.likelihoods[i] = l\n \n #Update of the model probabilities\n plog = self.likelihoods + np.log(pm)\n plog = plog - np.max(plog)*np.ones(plog.shape)\n\n p = np.exp(plog)\n self.p = (1/np.sum(p))*p #normalizing\n\n\n #Logging\n if self.logger is not None:\n for m in range(self.nModels):\n self.logger.write('states'+str(m), self.states[m])\n self.logger.write('covar'+str(m), self.covariances[m])\n \n self.logger.write('p', self.p)\n self.logger.write('meas', [x, y, delta])\n\n #Measurement of the CPU time\n comtime = time.time()-now\n self.cputime += comtime\n self.steps += 1\n #print(self.cputime/self.steps)\n\n\n return self.states, self.p\n\n\n\n def mix(self):\n\n #Mixing of the models (IMM)\n\n pm = matmul(TRANS, self.p)\n sfm = []\n Qfm = []\n\n for i in range(self.nModels):\n\n if pm[i] > 0.000001:\n sfm.append(np.zeros(self.states[i].shape))\n Qfm.append(np.zeros(self.covariances[i].shape))\n \n for j in range(self.nModels):\n #Call transition function\n s, Q = TRANS_FUNC[j][i](self.states[j], self.covariances[j])\n \n #Mixing\n sfm[i] = sfm[i] + s * TRANS[i][j] * self.p[j]\n Qfm[i] = Qfm[i] + np.asarray(Q + s * s.transpose()) * TRANS[i][j] * self.p[j]\n\n sfm[i] = sfm[i]/pm[i]\n Qfm[i] = Qfm[i]/pm[i] - matmul(sfm[i], sfm[i].transpose())\n\n else:\n sfm[i] = self.states[i]\n Qfm[i] = self.covariances[i]\n\n return sfm, Qfm, pm\n\n\n def resetState(self, x, y):\n \n #States and covariances initialization\n self.states = []\n self.states.append(np.asarray(np.matrix([[x],[y]]))) # 0 = VOID\n\n self.covariances = []\n self.covariances.append(np.eye(2)) #0 = VOID\n\n for i in range(self.nModels-1):\n s, Q = TRANS_FUNC[0][i+1](self.states[0], self.covariances[0])\n self.states.append(s)\n self.covariances.append(Q)\n\n # Log-Likelihoods\n self.likelihoods = np.zeros((3,1))\n self.p = np.ones((self.nModels, 1))/self.nModels\n \n\n","repo_name":"bjoukovs/SSDP_Handtracking","sub_path":"Kalman.py","file_name":"Kalman.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25844012472","text":"\"\"\"Permission Model and Its Manager.\"\"\"\nimport datetime\nimport uuid\nfrom sqlalchemy import desc, asc, or_\nfrom sqlalchemy import inspect\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom marshmallow import fields\nfrom app.serializers.role_custom import RoleCustomSchema\n\nfrom . import db, ma\n\n\nclass Permission(db.Model):\n \"\"\" permission table model \"\"\"\n\n id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)\n name = db.Column(db.String(100), nullable=False)\n code = db.Column(db.String(30), unique=True, nullable=False)\n active = db.Column(db.Boolean, nullable=False)\n created_by = db.Column(db.String(100), nullable=True)\n updated_by = db.Column(db.String(100), nullable=True)\n created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)\n updated_at = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)\n\n def __init__(self, **kwargs):\n \"\"\"constructor.\"\"\"\n self.name = kwargs.get('name')\n self.code = kwargs.get('code')\n self.active = kwargs.get('active')\n\n def save(self, commit=True):\n \"\"\"Permission save method.\"\"\"\n db.session.add(self)\n if commit is True:\n db.session.commit()\n\n def get_permission(self, **kwargs):\n \"\"\" this is common method for returing list of permission including filter & search\"\"\"\n search = kwargs.get('search', None)\n sort_by = kwargs.get('sort_by', 'active')\n order_by = kwargs.get('order_by', 'asc')\n limit = kwargs.get('limit', 10)\n offset = kwargs.get('offset', 0)\n filter_by = {}\n # check whether there is filtering option for that\n mapper = inspect(Permission)\n for column in mapper.attrs:\n if kwargs.get(column.key):\n filter_by[column.key] = kwargs.get(column.key)\n\n all_permission = Permission.query\n if search:\n result = all_permission.filter(\n or_(Permission.name.like('%'+search+'%'), Permission.code.like(\n '%'+search+'%'))).order_by(desc(getattr(Permission, sort_by))\n ).offset(offset).limit(limit).all()\n elif filter_by:\n result = all_permission.filter_by(**filter_by).offset(offset).limit(limit).all()\n # this will sort by field name either asc or desc\n elif sort_by:\n if order_by == 'desc':\n result = all_permission.order_by(desc(getattr(Permission, sort_by))\n ).offset(offset).limit(limit).all()\n else:\n result = all_permission.order_by(asc(getattr(Permission, sort_by))\n ).offset(offset).limit(limit).all()\n else:\n result = all_permission.offset(offset).limit(limit).all()\n return result\n\n\nclass PermissionSchema(ma.ModelSchema):\n \"\"\"Permission Schema \"\"\"\n roles = fields.Nested(RoleCustomSchema, many=True)\n\n class Meta:\n \"\"\" Meta class \"\"\"\n model = Permission\n # fields = (\"id\", \"name\", \"code\", \"active\", 'created_at', 'updated_at')\n # ordered = True\n","repo_name":"ekramulmostafa/ms-auth","sub_path":"app/models/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72531764989","text":"\"\"\"\n Factory to build catalog of i/o metadata for functions implemented in the front-end\n\n NOTE: These definitions are currently needed in the catalog and director2\n services. Since it is static data, instead of making a call from\n director2->catalog, it was decided to share as a library\n\"\"\"\n\nimport logging\n\nfrom ..function_services_catalog.services import nodes_group\nfrom ._settings import FunctionServiceSettings\nfrom ._utils import FunctionServices\nfrom .services import (\n demo_units,\n file_picker,\n iter_range,\n iter_sensitivity,\n parameters,\n probes,\n)\n\n_logger = logging.getLogger(__name__)\n\n\ncatalog = FunctionServices(settings=FunctionServiceSettings())\ncatalog.extend(demo_units.services)\ncatalog.extend(file_picker.services)\ncatalog.extend(iter_range.services)\ncatalog.extend(iter_sensitivity.services)\ncatalog.extend(nodes_group.services)\ncatalog.extend(parameters.services)\ncatalog.extend(probes.services)\n","repo_name":"ITISFoundation/osparc-simcore","sub_path":"packages/models-library/src/models_library/function_services_catalog/_registry.py","file_name":"_registry.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"6"} +{"seq_id":"36689976754","text":"from PyQt5.QtCore import QThread,QThread,pyqtSignal\nimport Cartoon\nfrom time import sleep\nimport threading\nfrom os import chdir,makedirs,getcwd,remove\nfrom os.path import isdir\nfrom urllib.request import urlopen,Request\nfrom requests.exceptions import RequestException\nclass homeRecommendThread(QThread):\n homeRecommend_startSignal = pyqtSignal() # 括号里填写信号传递的参数\n homeRecommend_endSignal = pyqtSignal()\n def __init__(self):\n super().__init__()\n\n def __del__(self):\n self.wait()\n\n def run(self):\n self.spider = Cartoon.CartoonSpider(\"update\")\n self.spider.ifhomeRecommendUpdate()\n self.homeRecommend_endSignal.emit() # 发射信号\n\nclass comicSearch(QThread):\n signal = pyqtSignal(list) # 括号里填写信号传递的参数\n def __init__(self,name):\n super().__init__()\n self.name = name\n\n def __del__(self):\n self.wait()\n\n def run(self):\n self.spider = Cartoon.CartoonSpider(\"search\")\n self.spider.comicSearch(self.name)\n self.signal.emit(self.spider.comicSearchList)\n\nclass comicDetail(QThread):\n signal = pyqtSignal(list,list,list,str) # 括号里填写信号传递的参数\n def __init__(self,name,url,choice,img):\n super().__init__()\n self.img = img\n self.choice=choice\n self.url = url\n self.name = name\n def __del__(self):\n self.wait()\n\n def run(self):\n self.spider = Cartoon.CartoonSpider(\"search\")\n if self.choice==1:\n req = Request(self.img,headers = self.spider.headers)\n data = urlopen(req,timeout=10).read()\n with open(\"./home/top/0.jpg\",\"wb+\")as f:\n f.write(data)\n self.info,self.urlList,self.titleList = self.spider.getUrlList(self.url)\n self.signal.emit(self.info,self.urlList,self.titleList,self.name)\n\nclass comicImgList(QThread):\n progress_signal = pyqtSignal(float,int)\n finish_signal = pyqtSignal(str,int,int)\n def __init__(self,urlList,index,name):\n super().__init__()\n self.urlList = urlList\n self.index = index\n self.name = name\n self.threads1 =[]\n self.threads2 = []\n def __del__(self):\n self.wait()\n\n def run(self):\n self.finish = 0\n num = len(self.urlList)\n download = 0\n self.spider = Cartoon.CartoonSpider(\"search\")\n self.img = []\n for i in range(len(self.urlList)):\n #self.progress_signal.emit(progress)\n t1 = MyThread(self.spider.comicImgList,(self.urlList[i],))\n self.threads1.append(t1)\n #t2 = threading.Thread(target=self.spider.comicImgDownload,args=())\n #self.threads2.append(t2)\n path = getcwd()\n downloadPath = path +'\\\\Download'\n if isdir(downloadPath+'\\\\'+self.name)==True:\n pass\n else:\n makedirs(downloadPath+'\\\\'+self.name)\n downloadPath += '\\\\'+self.name\n for i in range(len(self.urlList)):\n if isdir(downloadPath+'\\\\'+str(i))==True:\n pass\n else:\n makedirs(downloadPath+'\\\\'+str(i))\n for i in range(len(self.threads1)):\n self.threads1[i].setDaemon(True)\n self.threads1[i].start()\n for i in range(len(self.threads1)):\n imgList = self.threads1[i].get_result()\n t2 = MyThread(self.spider.comicImgDownload,(imgList,self.name,i))\n self.threads2.append(t2)\n download+=1\n self.progress_signal.emit(float(download/num)*30,self.index)\n for t in self.threads2:\n t.setDaemon(True)\n t.start()\n for t in self.threads2:\n self.finish += t.get_result()\n self.progress_signal.emit(30+float(self.finish)/num*70,self.index)\n self.finish_signal.emit(self.name,self.index,num)\n\nclass MyThread(threading.Thread):\n def __init__(self, func, args=()):\n super(MyThread, self).__init__()\n self.func = func\n self.args = args\n \n def run(self):\n self.result = self.func(*self.args)\n \n def get_result(self):\n threading.Thread.join(self) # 等待线程执行完毕\n try:\n return self.result\n except Exception:\n return None\n \nclass movie(QThread):\n signal = pyqtSignal(str) # 括号里填写信号传递的参数\n def __init__(self,name):\n super().__init__()\n self.name = name\n def __del__(self):\n self.wait()\n\n def run(self):\n self.spider = Cartoon.CartoonSpider(\"search\")\n movieList = self.spider.movieGet(self.name)\n print(movieList)\n if len(movieList)!=0:\n movie = movieList[0]\n else:\n movie = \"未寻找到该资源\"\n self.signal.emit(movie)","repo_name":"850552586/ComicSpider","sub_path":"EasyDownThread.py","file_name":"EasyDownThread.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1822683693","text":"'* Directed Acyclic graph of n nodes 0 to n-1*'\r\n'* print all the possible paths from 0 to n-1 *'\r\n\r\n\r\ndef allPaths(graph):\r\n q = []\r\n visited = [False for i in range(len(graph))]\r\n path = []\r\n\r\n \r\n def dfs(visted,graph,vertex,q):\r\n print(\"\\tVertex is \",vertex)\r\n q.append(vertex)\r\n visted[vertex] = True\r\n\r\n for v in graph[vertex]:\r\n print(\"\\tv:\",v)\r\n path.append(q.pop())\r\n print(\"\\t\\tPath:\",path)\r\n if not visted[v]:\r\n return dfs(visited,graph,v,q)\r\n \r\n \r\n dfs(visited,graph,0,q)\r\n path.append(q.pop())\r\n print(\"Path:\",path)\r\n print(\"\\n graph:{}\\n visited:{}\\n\".format(q,visited))\r\n\r\n path = []\r\n dfs(visited,graph,1,q)\r\n path.append(q.pop())\r\n print(\"Path:\",path)\r\n print(\"\\n graph:{}\\n visited:{}\\n\".format(q,visited))\r\n \r\n\r\n\r\n\r\ngraph = [[1,2],[3],[3],[]]\r\nallPaths(graph)\r\n","repo_name":"sushasru/LeetCodeCrunch","sub_path":"LeetCode_M_AllPathsFromSourceToTarget.py","file_name":"LeetCode_M_AllPathsFromSourceToTarget.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30489628913","text":"import argparse\nimport random\nimport torch\nimport numpy as np\n\nfrom torch.autograd import Variable\n\nfrom utils import load_checkpoint , load_checkpoint_wo_step\nfrom utils import inception_score\n\nimport models.wgan_model as wgan\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', required=True, help=\"path to generator\")\nparser.add_argument('--dataset', required=True, help=\"cifar-10 | folder | mnist\")\nparser.add_argument('--device', default=\"gpu\", help=\"(CPU or GPU)\")\nparser.add_argument('--nc', type=int, default=3, help='input image channels')\nparser.add_argument('--nz', type=int, default=256, help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--num_data', type=int, default=1024)\nparser.add_argument('--batchSize', type=int, default=32)\nparser.add_argument('--imageSize', type=int, default=64, help='output image size')\nopt = parser.parse_args()\n\n# =======\n# device\n# =======\ncuda = True if torch.cuda.is_available() else False\nif(opt.device == \"gpu\"):\n if(cuda):\n device = torch.device(\"cuda\")\n else:\n device = torch.device( \"cpu\" )\nelse:\n device = torch.device( \"cpu\" )\n\n# ======\n# model\n# ======\ngenerator = wgan.Generator(img_size=opt.imageSize, nz=opt.nz, nc=opt.nc, ngf=opt.ngf).to(device)\n\n# ===========\n# load model\n# ===========\nG = load_checkpoint_wo_step(generator, device, opt.model)\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n# ====================\n# calc inception score\n# ===================\nscore_z = Variable(Tensor(np.random.normal(0, 1, (opt.num_data, opt.nz, 1, 1)))).to(device)\nscore_imgs = G(score_z)\nscore_mean, score_std = inception_score(imgs=score_imgs, cuda=cuda, batch_size=opt.batchSize, resize=True, splits=1)\nprint(score_mean, score_std)","repo_name":"tiruota/WGAN-PyTorch","sub_path":"inception_socre.py","file_name":"inception_socre.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"71210443707","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport time\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.common.by import By\ndriver = webdriver.Chrome(executable_path=\"C:\\\\chromedriver.exe\")\nlist1 = []\nlist2 = []\ndriver.get(\"https://rahulshettyacademy.com/seleniumPractise/#/\")\ndriver.find_element_by_xpath(\"//input[@type='search']\").send_keys(\"ber\")\ntime.sleep(2)\n#veggies = driver.find_elements_by_xpath(\"//h4[@class='product-name']\")\n#for vegie in veggies:\n# print(vegie.text)\nlist = driver.find_elements_by_xpath(\"//div[@class='product-action']/button\")\n#//div[@class='product-action']/button/parent::div/parent::div/h4\nfor i in list:\n list1.append(i.find_element_by_xpath(\"parent::div/parent::div/h4\").text)\n i.click()\nprint(list1)\ndriver.find_element_by_xpath('//img[@alt=\"Cart\"]').click()\n\ndriver.find_element_by_xpath(\"//button[text()='PROCEED TO CHECKOUT']\").click()\nwait = WebDriverWait(driver,7)\nwait.until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR,'.promoCode')))\nproducts = driver.find_elements_by_css_selector(\".product-name\")\nfor product in products:\n list2.append(product.text)\nprint(list2)\nassert list1 == list2\ndriver.find_element_by_css_selector('.promoCode').send_keys(\"rahulshettyacademy\")\ndriver.find_element_by_css_selector('.promoBtn').click()\nwait.until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR,\"span.promoInfo\")))\norgamt = driver.find_element_by_css_selector(\".totAmt\").text\nprint(orgamt)\ndiscamt = driver.find_element_by_css_selector(\".discountAmt\").text\nprint(discamt)\nassert int(orgamt) > float(discamt)\nprint(driver.find_element_by_css_selector(\"span.promoInfo\").text)\n\ntotamt = driver.find_elements_by_xpath('//tr/td[5]/p')\nsum = 0\nfor amt in totamt:\n sum+=int(amt.text)\nprint(sum)\namtor = int(driver.find_element_by_css_selector('.totAmt').text)\nprint(amtor)\nassert amtor == sum\n\ndriver.close()","repo_name":"MounikaAsula/gitdemo","sub_path":"venv/explicit wait.py","file_name":"explicit wait.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18988692616","text":"#!/usr/bin/env python3\n\n# Created by: Ntare-Katarebe\n# Created on: June 2021\n# This program uses an array(list) to find the largest number\n\n\nimport math\nimport random\n\nmy_numbers = []\n\n\ndef min_number(my_numbers):\n min = my_numbers[0]\n\n for loop_counter in my_numbers:\n if loop_counter < min:\n min = loop_counter\n\n return min\n\n\ndef main():\n # this function uses an array\n\n # input\n for loop_counter in range(0, 10):\n a_single_number = random.randint(1, 100)\n my_numbers.append(a_single_number)\n print(\"The random is: {0}\".\n format(my_numbers[loop_counter]), end=\"\\n\")\n print(\"\")\n\n print(\"The smallest number is {}\".format(min_number(my_numbers)))\n print(\"\\nDone.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ntare-Katarebe/ICS3U-Unit6-03-Python-Array_Min","sub_path":"array_min.py","file_name":"array_min.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"9582669265","text":"# Read from hive with pyhive\nfrom pyhive import hive\n\n# conn = hive.Connection(host=\"localhost\", port=10000, username=\"YOU\")\nconn = hive.Connection(host=\"localhost\", port=10000)\n\ncursor = conn.cursor()\ncursor.execute(\"use testdb\")\ncursor.execute(\"SELECT * FROM testtable\")\n\nfor result in cursor.fetchall():\n print(result)\n\n# Read from hive with spark","repo_name":"royassis/sparkExample","sub_path":"test_hive.py","file_name":"test_hive.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41756500293","text":"names = []\n\n#reading include the extension \n#same folder location as tx_files.py\n\nwith open(\"names.txt\") as txt_file:\n # print(txt_file)\n for line in txt_file:\n #to remove the new line in between\n line = line.strip()\n names.append(line)\n \n#it will print \\n for new line by default\n#by unindenting we are closing the connection with the file, we dont file to be open for too long\nprint(names)\n\nfor name in names:\n print(name)\n\n# # \"w\" tells it to write as by default it will read\n# with open(\"names_output.txt\", \"w\") as txt_file:\n# for name in names:\n# # \\n is needed if you want the output in the new file with names on new line\n# txt_file.write(name + \"\\n\")\n\ncounter = 1\nwith open(\"names_output.txt\", \"w\") as txt_file:\n for name in names:\n txt_file.write(f\"{counter}. {name}\" + \"\\n\")\n counter +=1\n\n\n","repo_name":"KritiBhardwaj/PythonExercises","sub_path":"ReadingWriting/txt_files/txt_files.py","file_name":"txt_files.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1924252671","text":"\"\"\"Implementation of the potential vorticity operator.\"\"\"\n\n# Third-party\nimport numpy as np\nimport xarray as xr\n\n# Local\nfrom .. import constants as const\nfrom . import diff\nfrom .curl import curl\nfrom .support_operators import get_grid_coords\nfrom .total_diff import TotalDiff\n\n\ndef fpotvortic(\n u: xr.DataArray,\n v: xr.DataArray,\n w: xr.DataArray,\n theta: xr.DataArray,\n rho_tot: xr.DataArray,\n total_diff: TotalDiff,\n) -> xr.DataArray:\n r\"\"\"Compute the potential vorticity.\n\n The potential vorticity is computed with the following formula:\n\n .. math::\n v_p = \\frac{1}{\\rho} * \\frac{\\partial \\Theta}{\\partial \\z} * (c_v + 2 \\Omega)\n\n where\n :math:`\\rho` is the total air density,\n :math:`\\frac{\\partial \\Theta}{\\partial \\z}`\n is the vertical gradient of the potential temperature,\n :math:`c_v` is the curl of the wind in y direction and\n :math`\\Omega` is the coriolis term.\n\n Parameters\n ----------\n u: xr.DataArray\n Wind in x direction [m/s]\n v: xr.DataArray\n Wind in y direction [m/s]\n w: xr.DataArray\n Wind in z direction [m/s]\n theta: xr.DataArray\n Potential Temperature [K]\n rho_tot: xr.DataArray\n Total density [kg m-3]\n total_diff: TotalDiff\n Terrain following grid derivative helper\n\n Returns\n -------\n xr.DataArray:\n The potential vorticity\n\n \"\"\"\n # target coordinates\n deg2rad = np.pi / 180\n lat = (rho_tot[\"lat\"] * deg2rad).astype(np.float32)\n\n geo = w.attrs[\"geography\"]\n ny = geo[\"Nj\"]\n lat_min = geo[\"latitudeOfFirstGridPointInDegrees\"]\n dlat = geo[\"jDirectionIncrementInDegrees\"]\n rlat = get_grid_coords(ny, lat_min, dlat, \"y\") * deg2rad\n\n # compute curl\n curl1, curl2, curl3 = curl(u, v, w, rlat, total_diff)\n\n # coriolis terms\n cor2 = 2 * const.pc_omega / const.earth_radius * np.cos(lat)\n cor3 = 2 * const.pc_omega * np.sin(lat)\n\n dt_dlam = total_diff.d_dlam(diff.dx(theta), diff.dz(theta))\n dt_dphi = total_diff.d_dphi(diff.dy(theta), diff.dz(theta))\n dt_dzeta = total_diff.d_dzeta(diff.dz(theta))\n\n # potential vorticity\n out = (\n dt_dlam * curl1 + dt_dphi * (curl2 + cor2) - dt_dzeta * (curl3 + cor3)\n ) / rho_tot\n\n out.attrs = theta.attrs\n\n return out\n","repo_name":"MeteoSwiss-APN/icon_data_processing_incubator","sub_path":"src/idpi/operators/pot_vortic.py","file_name":"pot_vortic.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22190341449","text":"from bert_serving.server.helper import get_args_parser\nfrom bert_serving.server.helper import get_shutdown_parser\nfrom bert_serving.server import BertServer\nfrom bert_serving.client import BertClient\nimport numpy as np\n\nSTART_ARGS = [\n \"-model_dir\",\n \"../models/multi_cased_L-12_H-768_A-12\",\n \"-num_worker\",\n \"2\",\n \"-port\",\n \"5555\",\n \"-port_out\",\n \"5556\",\n \"-max_seq_len\",\n \"NONE\",\n \"-pooling_strategy\",\n \"NONE\",\n \"-mask_cls_sep\",\n \"-cpu\",\n]\nSHUT_ARGS = [\"-ip\", \"localhost\", \"-port\", \"5555\", \"-timeout\", \"5000\"]\n\n\nclass BertWordEmbedding:\n def __init__(self):\n self.start_args = get_args_parser().parse_args(START_ARGS)\n self.shut_args = get_shutdown_parser().parse_args(SHUT_ARGS)\n\n def vectorize(self, client, tokens):\n return np.squeeze(client.encode(tokens, is_tokenized=True))[1:-1]\n\n\nif __name__ == \"__main__\":\n bert = BertWordEmbedding()\n tokens = [[\"hello\", \"world\", \"!\"]]\n with BertServer(bert.start_args):\n with BertClient() as client:\n vecs = bert.vectorize(client, tokens)\n print(vecs)\n print(vecs.shape)\n","repo_name":"neuromaancer/MWEIs","sub_path":"mwei/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20281062834","text":"num = list()\n\nwhile True:\n n = int(input('Digite um numero '))\n if n not in num:\n num.append(n)\n else:\n print('Valor ja adicionado...')\n r = str(input('Quer Continuar?[S/N] '))\n if r in 'Nn':\n break\n\nnum.sort()\nprint(f'Valores digitados: {num}')\n","repo_name":"JoooNatan/CursoPython","sub_path":"Mundo03/Exs/Ex079.py","file_name":"Ex079.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72307174269","text":"class Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n if not digits:\n return []\n dic = {'2':'abc',\n '3':'def',\n '4':'ghi',\n '5':'jkl',\n '6':'mno',\n '7':'pqrs',\n '8':'tuv',\n '9':'wxyz'}\n def dfs(i = 0, combo = \"\", res = []):\n if i == len(digits):\n res.append(combo)\n else:\n nextDigit = digits[i]\n children = dic[nextDigit]\n for child in children:\n dfs(i+1, combo + child, res)\n return res \n return dfs(0, \"\", [])","repo_name":"AndrewAct/LeetCode","sub_path":"Python/017LetterCombinationofPhoneNumber.py","file_name":"017LetterCombinationofPhoneNumber.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12747012990","text":"from rest_framework import serializers\nfrom rest_framework.fields import empty\nfrom .models import CourseCategory\nfrom .models import Course,Chapter\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model=CourseCategory\n fields=['id','title','description']\n\nclass CourseSerializer(serializers.ModelSerializer):\n class Meta:\n model=Course\n fields=['id','category','teacher','title','description','featured_img','techs','course_chapters','related_videos','tech_list','total_enrolled_students']\n \n def __init__(self, *args,**kwargs):\n super(CourseSerializer,self).__init__(*args,**kwargs)\n request=self.context.get('request')\n self.Meta.depth=0\n if request and request.method =='GET':\n self.Meta.depth=2\n\nclass ChapterSerializer(serializers.ModelSerializer):\n class Meta:\n model=Chapter\n fields=['id','course','title','description','video','remarks']\n\n def __init__(self, *args,**kwargs):\n super(ChapterSerializer,self).__init__(*args,**kwargs)\n request=self.context.get('request')\n self.Meta.depth=0\n if request and request.method =='GET':\n self.Meta.depth=1\n\n\n","repo_name":"Athulya-k-k/Learning-Management-system-back-end","sub_path":"course/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8366649340","text":"#!/usr/bin/env python\n# -*- utf-8 -*-\nimport glob\n\nimport file\nimport os\n\nBASE_PATH = \"D:\\\\02helloWorld\\\\03Python\\\\a01pythonLearn\\\\\"\nfile_path = \"file\\\\p054\"\n\nprint(\"扫描目录所有文件,方法一\")\nprint(tuple(os.walk(BASE_PATH + file_path)))\nrs_list: list = []\nfor root, sub, files in os.walk(BASE_PATH + file_path):\n for f in files:\n rs_str: str = \"\"\n file01 = open(BASE_PATH + file_path + \"\\\\\" + f, \"r+\", encoding=\"utf-8\")\n for s in file01.readlines():\n rs_str += s\n rs_list.append(rs_str)\nprint(rs_list)\n\nprint(\"扫描目录所有文件,方法二\")\nprint(glob.glob(BASE_PATH + file_path + \"\\\\*.txt\"))\nrs_list: list = []\nfor f in glob.glob(BASE_PATH + file_path + \"\\\\*.txt\"):\n rs_str: str = \"\"\n file01 = open(f, \"r+\", encoding=\"utf-8\")\n # for s in file01.readlines():\n # rs_str += s\n rs_str = file01.read().strip()\n rs_list.append(rs_str)\nprint(rs_list)\n\n","repo_name":"renxiaowei-1991/pythonLearn","sub_path":"a01PythonLearn/package/b06baseTopic100/topic055.py","file_name":"topic055.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12932146928","text":"from types import GeneratorType\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nfrom django.http import HttpRequest\nfrom rest_framework.decorators import api_view\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom swh.web.api.apiurls import api_route, api_urls\nfrom swh.web.utils.exc import NotFoundExc\n\nEnrichFunction = Callable[[Dict[str, str], Optional[HttpRequest]], Dict[str, str]]\n\nEnrichFunctionSearchResult = Callable[\n [Tuple[List[Dict[str, Any]], Optional[str]], Optional[HttpRequest]],\n Tuple[List[Dict[str, Any]], Optional[str]],\n]\n\n\ndef api_lookup(\n lookup_fn: Callable[..., Any],\n *args: Any,\n notfound_msg: Optional[str] = \"Object not found\",\n enrich_fn: Optional[Union[EnrichFunction, EnrichFunctionSearchResult]] = None,\n request: Optional[HttpRequest] = None,\n **kwargs: Any,\n):\n r\"\"\"\n Capture a redundant behavior of:\n - looking up the backend with a criteria (be it an identifier or\n checksum) passed to the function lookup_fn\n - if nothing is found, raise an NotFoundExc exception with error\n message notfound_msg.\n - Otherwise if something is returned:\n - either as list, map or generator, map the enrich_fn function to\n it and return the resulting data structure as list.\n - either as dict and pass to enrich_fn and return the dict\n enriched.\n\n Args:\n - lookup_fn: function expects one criteria and optional supplementary\n \\*args.\n - \\*args: supplementary arguments to pass to lookup_fn.\n - notfound_msg: if nothing matching the criteria is found,\n raise NotFoundExc with this error message.\n - enrich_fn: Function to use to enrich the result returned by\n lookup_fn. Default to the identity function if not provided.\n - request: Input HTTP request that will be provided as parameter\n to enrich_fn.\n\n\n Raises:\n NotFoundExp or whatever `lookup_fn` raises.\n\n \"\"\"\n\n def _enrich_fn_noop(x, request):\n return x\n\n if enrich_fn is None:\n enrich_fn = _enrich_fn_noop\n res = lookup_fn(*args, **kwargs)\n if res is None:\n raise NotFoundExc(notfound_msg)\n if isinstance(res, (list, GeneratorType)) or type(res) == map:\n return [enrich_fn(x, request) for x in res]\n return enrich_fn(res, request)\n\n\n@api_view([\"GET\", \"HEAD\"])\ndef api_home(request: Request):\n return Response({}, template_name=\"api.html\")\n\n\napi_urls.add_url_pattern(r\"^api/$\", api_home, view_name=\"api-1-homepage\")\n\n\n@api_route(r\"/\", \"api-1-endpoints\")\ndef api_endpoints(request):\n \"\"\"Display the list of opened api endpoints.\"\"\"\n routes_by_category = {}\n for route, doc in api_urls.get_app_endpoints().items():\n doc[\"doc_intro\"] = doc[\"docstring\"].split(\"\\n\\n\")[0]\n routes_by_category.setdefault(doc[\"category\"], []).append(doc)\n\n for routes in routes_by_category.values():\n routes.sort(key=lambda route: route[\"route\"])\n\n # sort routes by alphabetical category name, with 'miscellaneous' at the end\n misc_routes = routes_by_category.pop(\"Miscellaneous\")\n sorted_routes = sorted(routes_by_category.items())\n sorted_routes.append((\"Miscellaneous\", misc_routes))\n\n env = {\"doc_routes\": sorted_routes}\n return Response(env, template_name=\"api-endpoints.html\")\n","repo_name":"SoftwareHeritage/swh-web","sub_path":"swh/web/api/views/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"6"} +{"seq_id":"9661478077","text":"\"\"\"empty message\n\nRevision ID: 3588a2f703d9\nRevises: 7806b1db044c\nCreate Date: 2020-11-10 11:31:26.918630\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3588a2f703d9'\ndown_revision = '7806b1db044c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('following',\n sa.Column('follower_id', sa.Integer(), nullable=False),\n sa.Column('followee_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['followee_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['follower_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('follower_id', 'followee_id')\n )\n op.create_table('posts',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('author_id', sa.Integer(), nullable=False),\n sa.Column('author', sa.String(length=32), nullable=True),\n sa.Column('title', sa.String(length=32), nullable=True),\n sa.Column('content', sa.String(length=256), nullable=False),\n sa.Column('posted_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['author_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_posts_posted_at'), 'posts', ['posted_at'], unique=False)\n op.add_column('user', sa.Column('bio', sa.String(length=256), nullable=True))\n op.add_column('user', sa.Column('full_name', sa.String(length=32), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'full_name')\n op.drop_column('user', 'bio')\n op.drop_index(op.f('ix_posts_posted_at'), table_name='posts')\n op.drop_table('posts')\n op.drop_table('following')\n # ### end Alembic commands ###\n","repo_name":"acalvino4/virally","sub_path":"migrations/versions/3588a2f703d9_.py","file_name":"3588a2f703d9_.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73239036669","text":"\"\"\" Program that checks the given two strings are anagram or not \"\"\"\n\ndef check_anagram(str1, str2):\n \"\"\"\n Check the length of two args, if not equal return False.\n Take two args and return True if anagram otherwise False.\n\n Parameters:\n str1(string): first string\n str2(stirng): second string\n\n Returns:\n True(bool): if anagram\n false(bool): if not anagram\n \"\"\"\n\n if len(str1) != len(str2):\n return False\n list_str1 = (list(str1)).sort()\n list_str2 = (list(str2)).sort()\n return list_str1 == list_str2\n\n# main function\ndef main():\n # user input untile it is string\n user_input1 = input('Enter the first string to check anagram: ')\n user_input2 = input('Enter the second string to check anagram: ')\n\n # display the result\n if check_anagram(user_input1, user_input2):\n print('The given strings are anagram.')\n\n else:\n print('The given strings are not anagram.')\n\nif __name__ == '__main__':\n # execute only if run as a script\n main()\n","repo_name":"danny237/Python-Assignment2","sub_path":"check_anagram.py","file_name":"check_anagram.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4818095533","text":"def search(item, item_list):\r\n first = 0\r\n last = len(item_list) -1\r\n\r\n while first <= last:\r\n mid = (first + last) // 2\r\n if item_list[mid] == item:\r\n return mid\r\n else:\r\n if item_list[mid] < item:\r\n first = mid + 1\r\n else:\r\n last = mid - 1\r\n\r\n return -1\r\n\r\nitems = [11, 24, 45, 77, 88, 100, 115, 125]\r\nitem = 25\r\n\r\nprint(search(item, items))\r\n","repo_name":"Nick-Feldman/PythonExamples","sub_path":"exercise5_4.py","file_name":"exercise5_4.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73931862909","text":"#!python\n\"\"\"\nEuler discovered the remarkable quadratic formula:\n\nn² + n + 41\n\nIt turns out that the formula will produce 40 primes for the consecutive values n = 0 to 39. However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is divisible by 41, and certainly when n = 41, 41² + 41 + 41 is clearly divisible by 41.\n\nThe incredible formula n² − 79n + 1601 was discovered, which produces 80 primes for the consecutive values n = 0 to 79. The product of the coefficients, −79 and 1601, is −126479.\n\nConsidering quadratics of the form:\n\nn² + an + b, where |a| < 1000 and |b| < 1000\n\nwhere |n| is the modulus/absolute value of n\ne.g. |11| = 11 and |−4| = 4\nFind the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n = 0.\n\"\"\"\n\nfrom math import sqrt, floor\n\ndef is_prime(n):\n\tif n < 2:\n\t\treturn False\n\tfor i in range(2,floor(sqrt(n))):\n\t\tif n%i==0:\n\t\t\treturn False\n\treturn True\n\ndef resolve_quad(a,b,n):\n\treturn n*n+a*n+b\n\ndef quadratic_prime_generator(a, b):\n\ti=0\n\tq = resolve_quad(a,b,i)\n\twhile is_prime(q):\n\t\tyield q\n\t\ti+=1\n\t\tq = resolve_quad(a,b,i)\n\nif __name__==\"__main__\":\n\tl=0\n\tp=0\n\tfor a in range(-1000,1001):\n\t\tfor b in range(-1000,1001):\n\t\t\tx = len(list(quadratic_prime_generator(a,b)))\n\t\t\tif x > l:\n\t\t\t\tl=x\n\t\t\t\tp=a*b\n\tprint(p)\n","repo_name":"DanMayhem/project_euler","sub_path":"027.py","file_name":"027.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36766305512","text":"# 플로이드워셜 템플릿\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\ninf = sys.maxsize\r\n\r\nn = int(input()) # 노드개수\r\nm = int(input()) # 간선개수\r\ngraph = [[inf]*(n+1) for _ in range(n+1)] # 최단거리 테이블 (2차원리스트)\r\n\r\n# n번에서 n번으로 가는 비용 0 초기화\r\nfor a in range(1, n+1):\r\n for b in range(1, n+1):\r\n if a == b:\r\n graph[a][b] = 0\r\n\r\n# 간선정보 입력\r\nfor _ in range(m):\r\n # a->b 로 가는 비용이 c. 테이블에 모든 c값이 들어온다\r\n a, b, c = map(int, input().split())\r\n graph[a][b] = c\r\n\r\n# 플로이드워셜 알고리즘 수행\r\nfor k in range(1, n+1):\r\n for a in range(1, n+1):\r\n graph[a][b] = min(graph[a][b], graph[a][k] + graph[k][b])\r\n\r\n# 수행된 결과를 출력\r\nfor a in range(1, n+1):\r\n for b in range(1, n+1):\r\n # 도달할 수 없는경우 -1 출력\r\n if graph[a][b] == inf:\r\n print(\"-1\", end=\" \")\r\n else: # 거리를 순서대로 출력\r\n print(graph[a][b], end=\" \")\r\n print()\r\n\r\n'''\r\n4\r\n7\r\n1 2 4\r\n1 4 6\r\n2 1 3\r\n2 3 7\r\n3 1 5\r\n3 4 4\r\n1 4 6\r\n\r\n0 4 -1 6 \r\n3 0 7 9 \r\n5 -1 0 4 \r\n-1 -1 -1 0 \r\n'''","repo_name":"jiyoung-dev/Algorithm","sub_path":"2021study/Floyd-Warshall/Template/플로이드워셜 알고리즘.py","file_name":"플로이드워셜 알고리즘.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71509394748","text":"# Django\nfrom django.urls import reverse\n\n# Third Party Libraries\nfrom box import Box\nfrom dateutil import parser\n\n\nclass BaseShowModel:\n def __init__(self, genres):\n self.genres = {g[\"id\"]: g[\"name\"] for g in genres}\n\n def show_model(self, show):\n show = Box(show, default_box=True)\n show.poster_path = (\n f\"https://image.tmdb.org/t/p/w500/{show.poster_path}\"\n if show.poster_path\n else \"https://via.placeholder.com/500x750\"\n )\n show.vote_average = f\"{int(round(show.vote_average * 10))}%\"\n show.first_air_date = parser.parse(show.first_air_date)\n show.genres = show.genres = \", \".join(\n [\n self.genres[genre_id]\n for genre_id in show.genre_ids\n if genre_id in self.genres\n ]\n )\n\n return show\n\n\nclass ShowListViewModel(BaseShowModel):\n def __init__(self, popular_shows, top_rated_shows, genres):\n super().__init__(genres)\n self.popular_shows = [self.show_model(show) for show in popular_shows]\n self.top_rated_shows = [self.show_model(show) for show in top_rated_shows]\n\n def show_model(self, show):\n show = super().show_model(show)\n show.url = reverse(\"tv_shows:show_detail\", args=[show.id])\n return show\n\n def as_dict(self):\n return {\n \"top_rated_shows\": self.top_rated_shows,\n \"popular_shows\": self.popular_shows,\n }\n\n\nclass ShowDetailViewModel(BaseShowModel):\n def __init__(self, show, genres):\n super().__init__(genres)\n self.show = self.show_model(show)\n self.show.cast = [self.cast_model(cast) for cast in self.show.credits.cast[:5]]\n self.show.images = [\n self.image_model(image) for image in self.show.images.backdrops[:9]\n ]\n\n if self.show.videos.results:\n self.show.trailer = self.show.videos.results[0].key\n else:\n self.show.trailer = None\n\n def image_model(self, image):\n image.original = f\"https://image.tmdb.org/t/p/original/{image['file_path']}\"\n image.thumbnail = f\"https://image.tmdb.org/t/p/w500/{image['file_path']}\"\n return image\n\n def cast_model(self, cast):\n cast.profile_path = (\n f\"https://image.tmdb.org/t/p/w300{cast.profile_path}\"\n if cast.profile_path\n else \"https://via.placeholder.com/300x450\"\n )\n return cast\n\n def as_dict(self):\n return {\"show\": self.show}\n","repo_name":"danjac/movieapp","sub_path":"movieapp/tv_shows/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"32553628253","text":"from copy import deepcopy\nfrom itertools import combinations as combi\ndef hunt(h,c):\n for i in range(1,d+1):\n for dx in range(-i,i+1):\n x,y=h+dx,n-(i-abs(dx))\n if 0<=x1231213->abcdeg\n\tdata,server=client.recvfrom(1024)\n\tprint('message from server',data.decode('utf-8'))\nclient.close()\n#TCP 服务器 在heyman霸占的时间内 其他同学是不是进不来\n","repo_name":"HarveyWang81/PythonScript","sub_path":"xuegod/上课老师笔记/if提供socket_tcp_udp/udpclient.py","file_name":"udpclient.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40429725096","text":"from django.conf.urls.defaults import *\nfrom coffeeclubapp.views import *\nfrom rapidsms_httprouter.urls import urlpatterns as router_urls\nfrom django.conf import settings\nfrom generic.views import generic, generic_row\nfrom generic.sorters import SimpleSorter\nfrom coffeeclubapp.models import Customer\nfrom django.contrib.auth.models import Permission\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom generic.urls import urlpatterns as generic_urls\n\nurlpatterns = patterns('',\n\n url(r'^$', dashboard , name=\"coffee-dashboard\"),\n url(r'^account/', include('rapidsms.urls.login_logout')),\n url('^accounts/login', 'rapidsms.views.login'),\n url('^accounts/logout', 'rapidsms.views.logout'),\n\n url(r'^admin/', include(admin.site.urls)),\n\n # RapidSMS contrib app URLs\n (r'^ajax/', include('rapidsms.contrib.ajax.urls')),\n (r'^export/', include('rapidsms.contrib.export.urls')),\n (r'^httptester/', include('rapidsms.contrib.httptester.urls')),\n (r'^messagelog/', include('rapidsms.contrib.messagelog.urls')),\n (r'^messaging/', include('rapidsms.contrib.messaging.urls')),\n (r'^registration/', include('auth.urls')),\n (r'^scheduler/', include('rapidsms.contrib.scheduler.urls')),\n\n url(r'^management/', management, name=\"management_dashboard\"),\n (r'^polls/', include('poll.urls')),\n url(r'^customers/(?P\\d+)/edit/', edit_customer, name=\"edit_customer\"),\n url(r'^customers/(?P\\d+)/delete/', delete_customer, name=\"delete_customer\"),\n url(r'^customers/(?P\\d+)/view/', customer_details, name=\"view_customer\"),\n url(r'^customers/new/', edit_customer, name=\"new_customer\"),\n url(r'^customers/upload/', upload_customers, name=\"upload_customers\"),\n url(r'^customers/export/', export_cusomers, name=\"export_customers\"),\n url(r'^customers/leaderboard/', leaderboard, name=\"leaderboard\"),\n url(r'^emails/compose/', compose_newsletter, name=\"compose-newsletter\"),\n url(r'^customers/emails/', scheduled_emails, name=\"emails\"),\n url(r'^emails/(?P\\d+)/edit/', edit_email, name=\"edit_email\"),\n url(r'^emails/(?P\\d+)/new/', edit_email, name=\"new_email\"),\n url(r'^emails/(?P\\d+)/delete/', delete_email, name=\"delete_email\"),\n url(r'^test/$', generic, {\n 'model':Permission,\n }),\n url(r'^customers/$', generic, {\n 'model':Customer,\n 'queryset':Customer.objects.all(),\n 'results_title':'All Customers',\n 'filter_forms':[],\n 'action_forms':[],\n 'objects_per_page':10,\n 'partial_row':'coffeeclubapp/partials/customer_row.html',\n 'partial_header':'coffeeclubapp/partials/partial_header_dashboard.html',\n 'base_template':'coffeeclubapp/customers.html',\n 'selectable':False,\n 'columns':[('Name', True, 'name', SimpleSorter()),\n ('Extension', True, 'extension', SimpleSorter(),),\n ('Location', True, 'start_date', SimpleSorter(),),\n ('# Standard Drink', False, 'participants', None,),\n ('Milk Type', False, '', None,),\n ('Running Order', False, '', None,),\n ('Days/Week', False, '', None,),\n ('Own Cup', False, '', None,),\n ('Notes', False, '', None,),\n ('Balance', True, 'accounts__balance', SimpleSorter(),),\n ],\n }, name=\"poll_dashboard\"),\n ) + router_urls + generic_urls\n\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n # helper URLs file that automatically serves the 'static' folder in\n # INSTALLED_APPS via the Django static media server (NOT for use in\n # production)\n (r'^', include('rapidsms.urls.static_media')),\n )\n\nfrom rapidsms_httprouter.router import get_router\nget_router()\n","repo_name":"unicefuganda/coffeeclub","sub_path":"coffeeclub_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"1007834272","text":"'''From Yohaku Twitter post\r\nhttps://twitter.com/YohakuPuzzle/status/1118482362491256832April 17, 2019\r\nApril 17, 2019\r\n3x3 Grid Primes, Row and Column Products'''\r\n\r\nimport copy\r\nimport time\r\nimport random\r\nfrom math import sqrt\r\n\r\nstarttime = time.time()\r\n\r\nPOP_N = 10000\r\nsolved = False\r\n\r\n#from specific puzzle 04/17/19\r\nROWS = [65,35,12]\r\nCOLS = [44,47,21]\r\n\r\ndef isPrime(num):\r\n '''Returns True if num is Prime'''\r\n if num == 2: return True\r\n if num % 2 == 0: return False\r\n for i in range(3,int(sqrt(num))+1,2):\r\n if num % i == 0:\r\n return False\r\n return True\r\n \r\n#generate list of primes up to highest sum in puzzle\r\nNUMBERS = [i for i in range(2,73) if isPrime(i)]\r\n\r\nclass Puzzle(object):\r\n def __init__(self):\r\n self.numList = random.sample(NUMBERS,9)\r\n self.score = 0\r\n self.mutations = 0\r\n self.crossovers = 0\r\n self.replacements = 0\r\n\r\n def calc_score(self):\r\n self.score = 0\r\n for i in range(3):\r\n row = self.numList[i*3:(i+1)*3]\r\n #print(row)\r\n self.score += abs(sum(row)-ROWS[i])\r\n col = int(self.numList[i % 3] + self.numList[i % 3 + 3]) + \\\r\n int(self.numList[i % 3 + 6])\r\n #print(col)\r\n self.score += abs(col-COLS[i])\r\n return self.score\r\n\r\n def mutate(self,num):\r\n '''Swaps num numbers in the numList'''\r\n indices = random.sample(list(range(9)),num)\r\n child = Puzzle()\r\n child.mutations = self.mutations + 1\r\n child.crossovers = self.crossovers\r\n child.numList = self.numList[::]\r\n for i in range(num-1):\r\n child.numList[indices[i]],child.numList[indices[(i+1)%num]] = \\\r\n child.numList[indices[(i+1)%num]], child.numList[indices[i]]\r\n return child\r\n\r\n def replace(self,n):\r\n '''Replaces n numbers in numList with other primes'''\r\n child = Puzzle()\r\n child.crossovers = self.crossovers\r\n child.mutations = self.mutations\r\n child.replacements = self.replacements + 1\r\n child.numList = self.numList[::]\r\n notinlist = [x for x in NUMBERS if x not in child.numList]\r\n indices = random.sample(list(range(0,9)),n)\r\n for ind in indices:\r\n num = random.choice(notinlist)\r\n child.numList[ind] = num\r\n notinlist.remove(num)\r\n return child\r\n\r\n def crossover(self,partner):\r\n '''Splice together nums with partner's nums'''\r\n child = Puzzle()\r\n child.crossovers = self.crossovers + partner.crossovers + 1\r\n child.mutations = self.mutations + partner.mutations\r\n #randomly choose slice point\r\n index = random.randint(1,7)\r\n #add numbers up to slice point\r\n child.numList = self.numList[:index]\r\n #half the time reverse them\r\n if random.random()<0.5:\r\n child.numList = child.numList[::-1]\r\n #list of numbers not in the slice\r\n notinslice = [x for x in partner.numList if x not in child.numList]\r\n #add the numbers not in the slice\r\n child.numList += notinslice\r\n return child\r\n\r\n def print_board(self):\r\n #board = []\r\n for i in range(5):\r\n print(self.numList[5*i:5*i+5])\r\n print(\"Mutations:\",self.mutations)\r\n print(\"Crossover:\",self.crossovers)\r\n print(\"Replacements:\",self.replacements)\r\n print()\r\n \r\ndef setup():\r\n \r\n size(600,600)\r\n fill(0)\r\n textSize(48)\r\n reset()\r\n \r\ndef draw():\r\n global cycles_without_improvement,population,record_score,best\r\n background(255)\r\n cycles_without_improvement += 1\r\n if cycles_without_improvement >= 50:\r\n reset()\r\n textSize(36)\r\n text(\"Error: \"+str(record_score),50,50)\r\n printBoard(best.numList)\r\n textSize(18)\r\n text(\"Mutations: \"+str(best.mutations),450,50)\r\n \r\n #if this is the solution\r\n if record_score == 0:\r\n saveFrame(\"####.png\")\r\n elapsed = round(time.time() - starttime,1)\r\n println(\"Time: \"+str(elapsed)+\"seconds\")\r\n noLoop()\r\n # print(best.calc_score())\r\n # best.print_board()\r\n # solved = True\r\n # return\r\n #sort the population, best score first \r\n population.sort(key=Puzzle.calc_score)\r\n population = population[:POP_N]\r\n #check if it's better than the record score\r\n score2 = population[0].calc_score()\r\n if score2 < record_score:\r\n record_score = score2\r\n best = population[0]\r\n \r\n saveFrame(\"####.png\")\r\n cycles_without_improvement = 0\r\n #mutate the best Puzzle\r\n for j in range(1000):\r\n for i in range(1,9):\r\n new = best.mutate(i)\r\n population.append(new)\r\n #mutate some random Puzzles\r\n for i in range(1,9):\r\n ran = random.choice(population)\r\n new = ran.mutate(i)\r\n population.append(new)\r\n #replace some in best puzzles\r\n #### for i in range(1,9):\r\n #### new = best.replace(i)\r\n #### population.append(new)\r\n #crossover\r\n #### parenta,parentb = random.sample(population,2)\r\n #### child = parenta.crossover(parentb)\r\n #### population.append(child)\r\n #add some new Puzzles\r\n for i in range(1000):\r\n population.append(Puzzle())\r\n '''if frameCount %100 == 0:\r\n saveFrame(\"####.png\")'''\r\n\r\ndef reset():\r\n global cycles_without_improvement,population,record_score,best\r\n population = []\r\n cycles_without_improvement = 0\r\n #fill population with Puzzles\r\n for i in range(POP_N):\r\n population.append(Puzzle())\r\n population.sort(key=Puzzle.calc_score)\r\n best = population[0]#random.choice(population)\r\n record_score = best.calc_score()\r\n first = record_score\r\n \r\ndef printBoard(numList):\r\n line(440,75,440,425)\r\n line(100,425,440,425)\r\n for i in range(3):\r\n text(ROWS[i],475,100+125*i)\r\n text(COLS[i],100+125*i,475)\r\n for j in range(3):\r\n text(numList[j+3*i],100+125*j,100+125*i)\r\n \r\n\r\n'''Solution:\r\n[23, 29, 13]\r\n[19, 11, 5]\r\n[2, 7, 3]\r\nMutations: 11\r\nCrossover: 2\r\nReplacements: 0\r\n'''\r\n","repo_name":"hackingmath/puzzles","sub_path":"Puzzle_Yohaku_Primes_Apr_17_19_GA.pyde","file_name":"Puzzle_Yohaku_Primes_Apr_17_19_GA.pyde","file_ext":"pyde","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"40155947604","text":"minibatches = 1\nexamples_per_minibatch = 16\nframelength = 128\noverlap = framelength/2\nfreq_bins = 128\ntime_bins = 64\n\npercent_background_latents = 0.25\npercent_noise_only_examples = 0.5\n\nlambduh = 0.75\nlambduh_finetune = 8\n\nfs = 44100\n\nniter_pretrain = 200\nniter_finetune = 200\n\nimport lasagne\nget_output = lasagne.layers.get_output\nget_all_params = lasagne.layers.get_all_params\n\n\n# calculated based on params\nn_noise_only_examples = int(\n percent_noise_only_examples * examples_per_minibatch)\n\nimport theano\ndtype = theano.config.floatX\n\nsnr = 1000 # db\n","repo_name":"luster/thesis","sub_path":"thesis/cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"21275870566","text":"import mindspore as ms\nimport numpy as np\n\nfrom mindpose.models.decoders.bottom_up_decoder import BottomUpHeatMapAEDecoder\nfrom mindspore import Tensor\n\n\ndef test_heatmap_ae_decoder():\n ms.set_context(mode=ms.GRAPH_MODE)\n\n decoder = BottomUpHeatMapAEDecoder(max_num=30)\n heatmap_1 = Tensor(np.random.random((8, 34, 32, 32)), dtype=ms.float32)\n heatmap_2 = Tensor(np.random.random((8, 17, 64, 64)), dtype=ms.float32)\n mask = Tensor(np.random.randint(2, size=(8, 128, 128)), dtype=ms.uint8)\n\n val_k, tag_k, ind_k = decoder([heatmap_1, heatmap_2], mask)\n assert val_k.shape == (8, 17, 30)\n assert tag_k.shape == (8, 17, 30, 1)\n assert ind_k.shape == (8, 17, 30, 2)\n","repo_name":"mindspore-lab/mindpose","sub_path":"tests/models/decoders/test_bottom_up_decoder.py","file_name":"test_bottom_up_decoder.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"6"} +{"seq_id":"9826176091","text":"from django import forms\r\nfrom functools import partial\r\nimport openpyxl\r\n\r\nfrom . import models\r\n\r\n\r\nclass NewExpenseForm(forms.ModelForm):\r\n class Meta:\r\n model = models.Expense\r\n fields = ['portfolio', 'expense_type', 'ammount', 'date']\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(NewExpenseForm, self).__init__(*args, **kwargs)\r\n self.fields['portfolio'].widget.attrs.update({'class': 'w3-input w3-border w3-light-grey'})\r\n self.fields['expense_type'].widget.attrs.update({'class': 'w3-input w3-border w3-light-grey'})\r\n self.fields['ammount'].widget.attrs.update({'class': 'w3-input w3-border w3-light-grey'})\r\n self.fields['date'].widget.attrs.update({'class': 'w3-input w3-border w3-light-grey', 'type': 'date'})\r\n\r\n\r\nclass UploadFileForm(forms.Form):\r\n file = forms.FileField()\r\n\r\n def clean_file(self):\r\n file = self.cleaned_data['file']\r\n try:\r\n openpyxl.load_workbook(file)\r\n except:\r\n raise forms.ValidationError(\"Not an excel file\")","repo_name":"Tokyros/Expenses","sub_path":"expenses/expense_manager/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12929978836","text":"import keyboard\r\nfrom cryptography.fernet import Fernet\r\nimport os\r\n\r\n\r\n\r\narchivo = open(\"archivo.txt\", \"w\")\r\n\r\n\r\ndef on_key(event):\r\n if event.name == '}': \r\n archivo.close()\r\n encriptar()\r\n print('encriptar y borrar')\r\n \r\n \r\n \r\n if archivo.closed:\r\n print('archivo cerrado' , event.name)\r\n else:\r\n print('Tecla presionada:', event.name)\r\n archivo.write(event.name)\r\n \r\n\r\n\r\n\r\n\r\ndef encriptar():\r\n \r\n # Generar una clave de cifrado\r\n print('# Generar una clave de cifrado')\r\n clave = Fernet.generate_key()\r\n\r\n # Crear un objeto Fernet con la clave generada\r\n cipher_suite = Fernet(clave)\r\n # key = open(\"key.txt\", \"wb\")\r\n with open(\"key.txt\", \"wb\") as archivo_clave:\r\n archivo_clave.write(clave)\r\n # key.write(clave)\r\n \r\n with open(\"archivo.txt\", \"rb\") as archivo:\r\n contenido = archivo.read()\r\n contenido_encriptado = cipher_suite.encrypt(contenido)\r\n\r\n with open(\"archivo_encriptado.txt\", \"wb\") as archivo_encriptado:\r\n archivo_encriptado.write(clave)\r\n archivo_encriptado.write(contenido_encriptado)\r\n\r\n borrar_archivo_base()\r\n\r\ndef borrar_archivo_base():\r\n ruta_archivo=\"archivo.txt\"\r\n if os.path.isfile(ruta_archivo):\r\n os.remove(ruta_archivo)\r\n\r\nkeyboard.on_press(on_key)\r\n\r\nkeyboard.wait('esc') # Espera hasta que se presione la tecla 'esc'\r\n\r\n\r\n","repo_name":"Sr-Hernandez/bot-escucha-teclado","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"69985043023","text":"import numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport logging\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n# %matplotlib inline\n\nfrom rpy2.robjects.packages import importr\ndevtools = importr('devtools')\n# devtools.install_github(\"dynverse/netdist\", dependencies = True)\n# devtools.install_github(\"alan-turing-institute/network-comparison\")\n\nfrom utils import generate_null_models, get_parameters\nfrom generator import ER_generator, draw_anomalies\nfrom basic_test import basic_features\nfrom com_detection import community_detection\nfrom spectral_localisation import spectral_features\nfrom NetEMD import NetEMD_features\nfrom path_finder import path_features\n\nimport json\nfrom numpyencoder import NumpyEncoder\nfrom networkx.readwrite import json_graph\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\n\n\nnum_models = 20 # original = 20\nnum_nodes = 1000 # original = 1000\nnum_basic_mc_samples = 300 # original = 500\nnum_references = 5 # original = 10\nnum_null_models = 12 # original = 60\n\nps = np.linspace(0.001, 0.05, 50)\nws = np.linspace(0.0, 0.01, 11)\ncandidate_parameters = get_parameters(num_nodes, ps, ws)\nnum_cand_param = len(candidate_parameters)\n\nAML_TYPE_DICT = {None: 0, 'path': 1, 'star': 2, 'ring': 3, 'clique': 4, 'tree': 5}\n\n\ndef generate_feature_graph(model_id, p, w):\n # p, w = candidate_parameters[np.random.choice(range(num_cand_param))]\n logging.info(\"Computing {}-th/{} model (p={:.3f}, w={:.3f})\".format(model_id, num_models, p, w))\n graph = ER_generator(n=num_nodes, p=p, seed=None)\n # graph = draw_anomalies(graph, w=1 - w)\n logging.info(\"\\n\\nGenerating null models 1\\n\\n\")\n _, references = generate_null_models(graph, num_models=num_references, min_size=10) # min_size=20 original\n logging.info(\"\\n\\nGenerating null models 2\\n\\n\")\n null_samples_whole, null_samples = generate_null_models(graph, num_models=num_null_models, min_size=20)\n logging.info(\"\\n\\nGenerating NetEMD features\\n\\n\")\n graph = NetEMD_features(graph, references, null_samples, num_references=num_references, num_samples=num_null_models)\n logging.info(\"\\n\\nGenerating basic features\\n\\n\")\n graph = basic_features(graph, num_samples=num_basic_mc_samples)\n logging.info(\"\\n\\nGenerating community features\\n\\n\")\n graph = community_detection(graph, null_samples, num_samples=20)\n logging.info(\"\\n\\nGenerating spectral features\\n\\n\")\n graph = spectral_features(graph, null_samples, num_samples=num_null_models)\n logging.info(\"\\n\\nGenerating path features\\n\\n\")\n graph = path_features(graph, null_samples_whole, num_samples=num_null_models)\n return graph\n\n\ndef write_json_graph(graph, model_id, p, w):\n data = json_graph.node_link_data(graph)\n with open('./data_fastgcn/input/Network_p_{:.3f}_w_{:.3f}_{}.json'.format(p, w, model_id), 'w') as outfile:\n json.dump(data, outfile, cls=NumpyEncoder)\n\n\ndef write_csv_df(graph, model_id, p, w):\n features = set()\n for node in graph.nodes():\n features |= set(graph.node[node].keys())\n # features.remove('type')\n logging.info(\"\\n\\nComposing DataFrame\\n\\n\")\n X = pd.DataFrame.from_dict(dict(graph.nodes(data=True, default=0)), orient='index')\n X.fillna(0, inplace=True)\n X.replace([np.inf, -np.inf], 0, inplace=True)\n logging.info(\"\\n\\nWriting to local file\\n\\n\")\n X.to_csv('./data_fastgcn/input/Network_p_{:.3f}_w_{:.3f}_{}.csv'.format(p, w, model_id))\n\n\ndef generate_multiple_graph_to_json_and_csv():\n for model_id in range(num_models):\n p, w = candidate_parameters[np.random.choice(range(num_cand_param))]\n graph = generate_feature_graph(model_id, p, w)\n write_json_graph(graph, model_id, p, w)\n write_csv_df(graph, model_id, p, w)\n\n\ndef generate_graph_dataset_json_for_fastgcn(model_id):\n p, w = candidate_parameters[np.random.choice(range(num_cand_param))]\n graph = generate_feature_graph(model_id, p, w)\n data = json_graph.node_link_data(graph)\n with open('data_run_test/big_graph_50k.json', 'w') as outfile:\n json.dump(data, outfile, cls=NumpyEncoder)\n\n\ndef create_class_map_json(G, path, file_name):\n class_map_json = {n: AML_TYPE_DICT[(G.node[n]).get(\"type\", None)] for n in G.nodes()}\n with open(path + file_name + '-class_map.json', 'w') as outfile:\n json.dump(class_map_json, outfile, cls=NumpyEncoder)\n\n\ndef create_id_map_json(G, path, file_name):\n id_map_json = {n: ind for ind, n in enumerate(G.nodes())}\n with open(path + file_name + '-id_map.json', 'w') as outfile:\n json.dump(id_map_json, outfile, cls=NumpyEncoder)\n\n\ndef create_feats_npy(G, path, file_name):\n g_df = pd.DataFrame.from_dict(G.nodes, orient='index')\n g_df = g_df.fillna(0)\n g_df = g_df.drop('type', axis=1)\n feats = g_df.to_numpy() # get df after removing type and index columns\n np.save(path + file_name + '-feats.npy', feats)\n\n\ndef create_train_val_test_graph(G, path, file_name):\n mapping = dict(zip(G.nodes(), map(str, G.nodes())))\n G = nx.relabel_nodes(G, mapping)\n\n class_map_json = {n: AML_TYPE_DICT[G[n].get(\"type\", None)] for n in G.nodes()}\n x = list(class_map_json.keys())\n y = list(class_map_json.values())\n x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=.2, random_state=0, stratify=y)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=.50, random_state=0, stratify=y_train)\n\n for n in G.nodes():\n G.node[n]['test'] = False\n G.node[n]['val'] = False\n for n in x_train:\n G.node[n]['test'] = True\n for n in x_val:\n G.node[n]['val'] = True\n\n data = json_graph.node_link_data(G)\n with open(path + file_name + '-updated.json', 'w') as outfile:\n json.dump(data, outfile, cls=NumpyEncoder)\n\n\ndef standard_graph_to_multiple_datasource(path, file_name):\n G = json_graph.node_link_graph(json.load(open(path + file_name + '.json')))\n create_class_map_json(G, path, file_name)\n create_id_map_json(G, path, file_name)\n create_feats_npy(G, path, file_name)\n create_train_val_test_graph(G, path, file_name)\n\n\nif __name__==\"__main__\":\n start = datetime.now()\n print('starting...................................: ', start)\n\n generate_multiple_graph_to_json_and_csv()\n # generate_graph_dataset_json_for_fastgcn(10)\n # standard_graph_to_multiple_datasource('data_fastgcn/input/', 'Network_p_0.016_w_0.003_1')\n\n end = datetime.now()\n print('starting...................................: ', start)\n print('finish.....................................: ', end)\n print('duration...................................: ', (end - start))\n\n","repo_name":"kimmintu/aml-transaction-monitoring","sub_path":"1_Transaction_and_Feature_Generator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38968715826","text":"from django import forms\n\nfrom core.forms import AddressCreateForm\n\n\nclass CustomCreateAddressForm(AddressCreateForm):\n def clean_city(self):\n city = self.cleaned_data['city']\n\n if city is None:\n raise forms.ValidationError(\n \"Veuillez donner un nom de ville.\")\n return city\n\n def clean_street(self):\n street = self.cleaned_data['street']\n\n if street is None:\n raise forms.ValidationError(\n \"Veuillez donner un nom de rue.\")\n return street\n\n def clean_number(self):\n number = self.cleaned_data['number']\n\n if number is None:\n raise forms.ValidationError(\n \"Veuillez donner un numéro\")\n return number\n","repo_name":"ppalex/projet_13_marcaurel","sub_path":"match/forms/address_creation_form.py","file_name":"address_creation_form.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1413407935","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the beautifulDays function below.\ndef beautifulDays(i, j, k):\n days=0\n for m in range (i,j+1):\n reversedday = str(m)[::-1]\n dayresult = (m-int(reversedday))%k\n if dayresult==0:\n days+=1\n elif dayresult!=0:\n continue\n return days\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n ijk = input().split()\n\n i = int(ijk[0])\n\n j = int(ijk[1])\n\n k = int(ijk[2])\n\n result = beautifulDays(i, j, k)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"faiyazkhanwif/ProblemSolving-Python","sub_path":"Problem Solving/Hackerrank/BeautifulDaysAtMovies.py","file_name":"BeautifulDaysAtMovies.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20029802963","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2016-09-08\n\n@author: Cheng Shangguan\n\"\"\"\nimport sys\nimport zmq\nimport simplejson\nimport subprocess\n\n\ndef execute_command():\n if len(sys.argv) != 2:\n return\n config = simplejson.load(open(\"event.json\", 'r'))\n if sys.argv[1] == \"start\":\n cmd = '%s %s' % (sys.executable, \"event_controller.py\")\n subprocess.Popen(cmd, shell=True)\n partitions = config.get('kafka').get('partitions')\n for i in range(0, partitions):\n cmd = '%s %s %d' % (sys.executable, \"event_pipeline.py\", i)\n subprocess.Popen(cmd, shell=True)\n else:\n cmd_port = config.get('controller').get('cmd_port')\n context = zmq.Context()\n req = context.socket(zmq.REQ)\n endpoint = \"tcp://localhost:%d\" % cmd_port if sys.platform == \"win32\" else \"ipc://cmd.ipc\"\n req.connect(endpoint)\n req.send_string(sys.argv[1])\n print(req.recv_string())\n req.close()\n context.term()\n\nexecute_command()\n","repo_name":"xbin1994/kafka_to_greenplum","sub_path":"event_cluster_cmd.py","file_name":"event_cluster_cmd.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74102829262","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework_api.views import StandardAPIView\nfrom rest_framework import permissions\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\nfrom .models import *\nfrom apps.user.models import UserAccount\nfrom core.producer import producer\nimport json\nfrom .consumers import FriendsConsumer\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\n\n\nclass SendFriendRequest(StandardAPIView):\n permission_classes = (permissions.IsAuthenticated,)\n def put(self, request, format=None):\n # Get the email of the user to send a friend request to\n email = request.data.get('email')\n\n if not email:\n return Response({'message': 'Email is required'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Get the receiver user object\n try:\n receiver = User.objects.get(email=email)\n except User.DoesNotExist:\n return Response({'message': 'User with email {} does not exist'.format(email)}, status=status.HTTP_404_NOT_FOUND)\n\n sender = self.request.user\n\n if sender == receiver:\n return Response({'message': 'You cannot send friend request to yourself'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Check if a friend request has already been sent\n if FriendRequest.objects.filter(from_user=sender, to_user=receiver).exists():\n return Response({'message': 'You have already sent a friend request to {}'.format(receiver.username)}, status=status.HTTP_400_BAD_REQUEST)\n\n # Create a new friend request\n FriendRequest.objects.create(from_user=sender, to_user=receiver)\n\n # # Create Notification Object through kafka producer\n # notification_data = {\n # 'from_user': str(sender.id), # convert UUID to string\n # 'to_user': str(receiver.id), # convert UUID to string\n # 'notification_type': 3,\n # 'text_preview': f\"{sender.username} wants to be your friend.\",\n # 'url': '/@'+sender.username,\n # 'is_seen': False,\n # 'icon': 'bx bx-user-circle',\n # }\n # producer.produce(\n # 'notifications',\n # key='friend_request',\n # value=json.dumps(notification_data).encode('utf-8')\n # )\n # # encode notification data as JSON and produce to Kafka topic\n # producer.flush()\n\n return self.send_response({'message': 'Friend request has been sent to {}'.format(receiver.username)}, status=status.HTTP_200_OK)\n \n\n\n\nclass CancelFriendRequest(StandardAPIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def put(self, request, format=None):\n # Get the email of the user to cancel friend request\n email = request.data.get('email')\n\n if not email:\n return Response({'message': 'Email is required'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Get the receiver user object\n try:\n receiver = User.objects.get(email=email)\n except User.DoesNotExist:\n return Response({'message': 'User with email {} does not exist'.format(email)}, status=status.HTTP_404_NOT_FOUND)\n\n sender = self.request.user\n\n # Check if a friend request has been sent\n try:\n friend_request = FriendRequest.objects.get(from_user=sender, to_user=receiver)\n except FriendRequest.DoesNotExist:\n return Response({'message': 'No friend request found for {} from {}'.format(receiver.username, sender.username)}, status=status.HTTP_400_BAD_REQUEST)\n\n # Cancel the friend request\n friend_request.delete()\n\n return Response({'message': 'Friend request canceled'})\n\n\n\nclass RemoveFriend(StandardAPIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def put(self, request, format=None):\n email = request.data.get('email')\n try:\n friend = User.objects.get(email=email)\n except User.DoesNotExist:\n return self.send_error({'message': 'Friend not found'}, status=status.HTTP_404_NOT_FOUND)\n \n # Remove the friend from each other's friend list\n try:\n request.user.friend_list.friends.remove(friend)\n friend.friend_list.friends.remove(request.user)\n\n # Get channel layer and group name\n channel_layer = get_channel_layer()\n group_name = f'friends_{friend.id}'\n\n # Send message to WebSocket group\n async_to_sync(channel_layer.group_send)(group_name, {\n 'type': 'send_check_friends',\n 'is_friend': False\n })\n\n except Exception as e:\n # Handle any exceptions that might occur during the process\n print(f\"Error removing friend: {str(e)}\")\n return self.send_error({'message': 'Failed to remove friend'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return self.send_response('Friend removed')\n\n\nclass AcceptRequest(StandardAPIView):\n permission_classes = (permissions.IsAuthenticated,)\n def put(self, request, format=None):\n friend_request_id=request.data['friend_request_id']\n action=request.data['action']\n try:\n friend_request = FriendRequest.objects.get(from_user__id=friend_request_id, to_user=request.user)\n except FriendRequest.DoesNotExist:\n return self.send_error({'message': 'Friend request does not exist'}, status=status.HTTP_404_NOT_FOUND)\n \n \n if action==True:\n friend_request.is_accepted = True\n friend_request.save()\n\n # Add the users to each other's friend list\n try:\n friend_request.from_user.friend_list.friends.add(request.user)\n request.user.friend_list.friends.add(friend_request.from_user)\n except Exception as e:\n # Handle any exceptions that might occur during the process\n print(f\"Error adding users to friend list: {str(e)}\")\n\n # Create Notification Object through kafka producer\n notification_data = {\n 'from_user': str(friend_request.to_user.id), # convert UUID to string\n 'to_user': str(friend_request.from_user.id), # convert UUID to string\n 'notification_type': 3,\n 'text_preview': f\"{friend_request.to_user.username} accepted your friend request.\",\n 'url': '/@'+friend_request.to_user.username,\n 'is_seen': False,\n 'icon': 'bx bx-user-circle',\n }\n producer.produce(\n 'notifications',\n key='friend_request',\n value=json.dumps(notification_data).encode('utf-8')\n )\n # encode notification data as JSON and produce to Kafka topic\n producer.flush()\n\n # Create message to send to WebSocket\n # message = {\n # 'type': 'send_check_friends',\n # 'is_friend': True\n # }\n \n # Get channel layer and group name\n channel_layer = get_channel_layer()\n group_name = f'friends_{friend_request.from_user.id}'\n\n # Send message to WebSocket group\n async_to_sync(channel_layer.group_send)(group_name, {\n 'type': 'send_check_friends',\n 'is_friend': True\n })\n friend_request.delete()\n else:\n friend_request.is_archived = True\n friend_request.save()\n\n # Notify the sender that the friend request is accepted\n try:\n user_id = request.user.id\n friend_requests = FriendRequest.objects.filter(to_user_id=user_id, is_archived=False, is_accepted=False)[0:0+20]\n friend_requests_count = FriendRequest.objects.filter(to_user_id=user_id, is_archived=False, is_accepted=False).count()\n\n # Create message to send to WebSocket\n message = {\n 'type': 'send_friend_requests',\n 'data': list(friend_requests.values()),\n 'total_count': friend_requests_count\n }\n \n # Get channel layer and group name\n channel_layer = get_channel_layer()\n group_name = f'friends_{user_id}'\n\n # Send message to WebSocket group\n async_to_sync(channel_layer.group_send)(group_name, message)\n \n except Exception as e:\n # Handle any exceptions that might occur during the process\n print(f\"Error sending group message: {str(e)}\")\n \n return self.send_response('Friend request updated successfully', status=status.HTTP_200_OK)\n \n\nclass CheckRequestSentView(StandardAPIView):\n permission_classes = (permissions.IsAuthenticated,)\n \n def post(self, request, format=None):\n # Get the email of the user to check\n email = request.data.get('email')\n\n if not email:\n return Response({'message': 'Email is required'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Get the user objects\n sender = self.request.user\n try:\n receiver = User.objects.get(email=email)\n except User.DoesNotExist:\n return Response({'message': 'User with email {} does not exist'.format(email)}, status=status.HTTP_404_NOT_FOUND)\n\n # Check if a friend request has already been sent\n if FriendRequest.objects.filter(from_user=sender, to_user=receiver).exists():\n return Response({'request_sent': True})\n\n return Response({'request_sent': False})\n\n\nclass CheckUsersAreFriendsView(StandardAPIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, format=None):\n # Get the email of the user to check\n email = request.data.get('email')\n\n if not email:\n return Response({'message': 'Email is required'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Get the user objects\n user = self.request.user\n try:\n friend = User.objects.get(email=email)\n except User.DoesNotExist:\n return Response({'message': 'User with email {} does not exist'.format(email)}, status=status.HTTP_404_NOT_FOUND)\n\n # Check if they are friends\n friends_list = FriendList.objects.get(user=user)\n if friend in friends_list.friends.all():\n # return Response({'are_friends': True})\n return self.send_response(True, status=status.HTTP_200_OK)\n\n # return Response({'are_friends': False})\n return self.send_response(False, status=status.HTTP_200_OK)","repo_name":"apholdings/curso_microservicios_backend","sub_path":"auth/apps/friends/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10741,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"} +{"seq_id":"4155262983","text":"import urllib\nfrom urllib import request\nimport numpy as np\nfrom numpy import linalg\n\n# Sample Input:\n#\n# https://stepic.org/media/attachments/lesson/16462/boston_houses.csv\n# tmpFileName = 'https://stepic.org/media/attachments/lesson/16462/boston_houses.csv'\n# fname = tmpFileName # read file name from stdin\nfname = input() # read file name from stdin\nf = urllib.request.urlopen(fname) # open file from URL\ndata = np.loadtxt(f, delimiter=',', skiprows=1) # load data to work with\n\ny = data[:, 0].reshape((-1, 1))\n\nxFakeColumn = np.ones_like(y)\nx = np.hstack((xFakeColumn, data[:, 1:]))\n\nstep1 = x.T @ x\nstep2 = linalg.inv(step1) @ x.T\nstep3 = step2 @ y\nresult = np.around(step3.ravel(), decimals=4)\n\nprint(' '.join(str(x) for x in result))\n\n","repo_name":"scuderia1000/NeuralNetworkEducation","sub_path":"src/step_1_7.py","file_name":"step_1_7.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13073368806","text":"import os\nimport boto3\nimport requests\nfrom requests_aws4auth import AWS4Auth\nes_host = \" \" # enter you open search domain name with https://\nes_index = \"shop\"\nes_type = \"product\"\nurl = es_host + '/' + es_index + '/' + es_type + '/'\nregion = '' #enter your region e.g., us-west-2\nservice = 'es'\ncredentials = boto3.Session().get_credentials()\nawsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)\ndef lambda_handler(event, context):\n print(event)\n for record in event['Records']:\n id = str(record['dynamodb']['Keys']['ID']['S'])\n if record['eventName'] == 'REMOVE':\n res = requests.delete(url + id, auth=awsauth)\n else:\n document = record['dynamodb']['NewImage']\n res = requests.put(url + id, auth=awsauth, json=document, headers={\"Content-Type\": \"application/json\"})\n print(\"Successfully executed\")\n \n\n","repo_name":"urvi-28/ecommerce-app","sub_path":"Lambda codes/storeandupdate.py","file_name":"storeandupdate.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3888355800","text":"'''\nFind the relationship between the background topics and machine learning topics\nFirst draft by Scott Liu @ 2020 Oct\n'''\n\n# Import statements\nimport os\nimport csv\nimport numpy as np\nfrom metrics import get_f1\nfrom plot.plot_utils import topic_capacity_image, plot_relation_heatmap\nfrom architecture import ExperimentalAdditiveFactorModel, BackgroundRelationModel\nfrom train_model import NAMES, SCORE_FILES, TAG_FILES, EMAIL_INDEX, QUESTION_START_INDEX\nfrom train_model import StringToIndex, form_Q_matrix, make_tag_question_map, merge_tag_question_maps\nfrom train_model import shuffle_together, strip_points, clean_scores_and_tags, read_scores, read_tags\n\ndef main():\n\n question_index_map = StringToIndex() # custom class\n tag_index_map = StringToIndex() # custom class\n user_index_map = StringToIndex() # custom class\n tag_question_maps = []\n user_list_bundle, score_list_bundle, max_scores_bundle = [], [], []\n for name, score_file, tag_file in zip(NAMES, SCORE_FILES, TAG_FILES):\n user_list, score_list, max_scores = read_scores(score_file)\n tag_list = read_tags(tag_file)\n score_list, max_scores, tag_list = clean_scores_and_tags(score_list, max_scores, tag_list)\n user_list_bundle.append(user_list)\n score_list_bundle.append(score_list)\n max_scores_bundle.append(max_scores)\n tag_question_maps.append(make_tag_question_map(name, tag_list, question_index_map, tag_index_map, len(max_scores)))\n final_tag_question_map = merge_tag_question_maps(tag_question_maps)\n\n # construct the data for model training\n users, questions, scores = [], [], []\n question_starting_index = 0\n for idx, name in enumerate(NAMES):\n user_list = user_list_bundle[idx]\n score_list = score_list_bundle[idx]\n max_scores = max_scores_bundle[idx]\n for i in range(len(user_list)):\n user = user_list[i]\n scores_row = score_list[i]\n user_index_map.register(user)\n user_index = user_index_map.get(user)\n for j in range(len(scores_row)):\n normalized_score = scores_row[j] / max_scores[j]\n users.append(user_index)\n questions.append(j + question_starting_index)\n scores.append(normalized_score)\n # we have to update the question_starting_index by an offset equal to the number of questions\n question_starting_index += len(score_list[0])\n\n all_users = user_index_map.get_all()\n all_questions = question_index_map.get_all()\n all_tags = tag_index_map.get_all()\n\n n_users = len(user_index_map.get_all())\n n_questions = len(question_index_map.get_all())\n\n print(\"##### Loading model: EAFM ####\")\n model3 = ExperimentalAdditiveFactorModel.load(\"model/eafm.bin\")\n\n # student-topic vector: alpha\n alpha = model3.alpha.cpu().detach().numpy()\n if alpha.shape[0] != len(all_users) or alpha.shape[1] != len(all_tags):\n print(\"Student capacity parameter shape: {}\".format(alpha.shape))\n print(\"Number of unique users in data: {}\".format(len(all_users)))\n print(\"Number of unique tags in data: {}\".format(len(all_tags)))\n raise RuntimeError(\"Dimension mismatch: please make sure the model is trained with the same data\")\n\n # find alpha_bg and alpha_ml\n alpha_bg_list = []\n alpha_ml_list = []\n bg_tags = []\n ml_tags = []\n for idx, tag in enumerate(all_tags):\n if tag.startswith(\"bkgrd\"):\n alpha_bg_list.append(alpha[:,idx])\n bg_tags.append(tag)\n elif tag.startswith(\"ml\"):\n alpha_ml_list.append(alpha[:,idx])\n ml_tags.append(tag)\n alpha_bg = np.vstack(alpha_bg_list).T\n alpha_ml = np.vstack(alpha_ml_list).T\n\n # split train / val / test\n print(\"alpha_bg.shape:\", alpha_bg.shape)\n print(\"alpha_ml.shape:\", alpha_ml.shape)\n train_alpha_bg, val_alpha_bg, test_alpha_bg = alpha_bg[:-140], alpha_bg[-140:-70], alpha_bg[-70:]\n train_alpha_ml, val_alpha_ml, test_alpha_ml = alpha_ml[:-140], alpha_ml[-140:-70], alpha_ml[-70:]\n\n # train or load\n load = False\n if not load:\n model = BackgroundRelationModel(alpha_bg.shape[1], alpha_ml.shape[1])\n model.auto_fit(train_alpha_bg, train_alpha_ml, val_alpha_bg, val_alpha_ml,\n lr=1e-3, reg=1e-3, patience=50)\n model.save(\"model/bg.bin\")\n else:\n model = BackgroundRelationModel.load(\"model/bg.bin\")\n\n # visualize relation\n relation_matrix = model.W.cpu().detach().numpy()\n plot_relation_heatmap(relation_matrix, bg_tags, ml_tags)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"scott0123/psychometrics","sub_path":"find_bg_ml_relation.py","file_name":"find_bg_ml_relation.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72522204301","text":"# 2. Дан список, вывести отдельно буквы и цифры:\n# 123 Hello! 5456 How 1 are 6546 you, men? \n\n\n## Вариант 1:\n\ndata = input('Please input some numbers and words: ').split()\nfiltered_nums = ' '.join(filter(lambda x: x.isdigit(), data))\nfiltered_letters = ' '.join(filter(lambda x: not x.isdigit(), data))\nprint(filtered_nums)\nprint(filtered_letters)\n\n\n## Вариант 2:\n\na = ( \"a\", 'b', '2', '3' ,'c')\n\nb= filter(str.isalpha, a)\nc= filter(str.isdigit, a)\n\nprint(*b)\nprint(*c)","repo_name":"igRISk/PythonLessons","sub_path":"Seminar_06/S6T2_FilterLettersNumbers.py","file_name":"S6T2_FilterLettersNumbers.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31443214780","text":"from train import StructAgg\nfrom utils import *\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndataset_name = \"DD\"\n\nargs = parse_args()\n\nnum_layers = int(args[\"num_layers\"])\nnum_epochs = int(args[\"num_epochs\"])\nverbose = args[\"verbose\"]\nreg = args[\"reg\"]\npath_results = str(args[\"path_results\"])\npath_weights = str(args[\"path_weights\"])\npath_data = str(args[\"path_data\"])\nlr = float(args[\"lr\"])\nbins = int(args[\"bins\"])\nn_dim = int(args[\"n_dim\"])\nalpha_reg = float(args[\"alpha_reg\"])\n\nnet = StructAgg\n\n# Load dataset\nprint('Load dataset {}'.format(dataset_name))\ntrain_idx = None\ntest_idx = None\nval_idx = None\nif dataset_name.split(\"-\")[0] != \"ogbg\":\n dataset_path = path_data + dataset_name + \"/\" + dataset_name\nelse:\n dataset_path = dataset_name\n\n# Load dataset\ndataset = GraphDataset(dataset_path)\ntry:\n train_idx = dataset.train_idx\n test_idx = dataset.valid_idx\n val_idx = dataset.test_idx\nexcept:\n pass\n\nX, y = normalize_adj(dataset)\nn_feat = X[0][1].shape[1]\nprint(\"feature size: {}\".format(n_feat))\nk = 0\ng_size = X[0][1].shape[0]\nout_dim = int(np.max(y)) + 1\n\ncross_val(net,\n X,\n y,\n dataset_name,\n path_results,\n n_feat=n_feat,\n n_dim=n_dim,\n g_size=g_size,\n bins=bins,\n num_layers=num_layers,\n out_dim=out_dim,\n lr=lr,\n num_epochs=num_epochs,\n path_weights=path_weights,\n device=device,\n k=k,\n verbose=verbose,\n reg=reg,\n alpha_reg=alpha_reg,\n train_idx=train_idx,\n test_idx=test_idx,\n val_idx=val_idx)\nprint(\"----------------\")\nprint(\"new set\")\nprint(\"----------------\")\n","repo_name":"agalland/StructuralAggregation","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9057664324","text":"from hbase import db\n\nstudent = db['student']\ncourse = db['course']\nstudent_course = db['student_course']\nsid = '0'\n\n\ndef data_input():\n global sid\n print('欢迎进入选课系统!')\n sid = str(input('请输入你的学号:'))\n # sid = '200900130985'\n\n\ndef course_output():\n selected_courses = student_course.scan(row_prefix=sid.encode('utf8'))\n print('\\n以下为你目前选择的课程:')\n print('########################')\n print('\\t课程号\\t课程名')\n for key, s_c in selected_courses:\n s_c = dict(s_c)\n cid = s_c[b'info:CID'].decode('utf8')\n oneC = course.row(cid)\n name = oneC[b'info:NAME'].decode('utf8')\n print('\\t{}\\t{}'.format(cid, name))\n print('########################\\n')\n\n\ndef course_select(cid):\n select_course = course.row(cid)\n if len(select_course) == 0:\n print('该课程不存在,请重新输入!')\n return\n\n sc_data = {\n 'info:SID': sid,\n 'info:CID': cid\n }\n student_course.put(sc_data['info:SID']+':'+sc_data['info:CID'], sc_data)\n print('选课成功')\n course_output()\n\n\nif __name__ == '__main__':\n data_input()\n course_output()\n target_cid = str(input('请输入你要选择的课程号(输入0终止):'))\n while target_cid != '0':\n course_select(target_cid)\n target_cid = str(input('请输入你要选择的课程号(输入0终止):'))\n","repo_name":"zyrest/nosql-lab","sub_path":"hbase/lab6.py","file_name":"lab6.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"22000381439","text":"\"\"\"\nhttps://leetcode.com/problems/binary-tree-level-order-traversal/\nDefinition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\"\"\"\n\nfrom collections import deque\n\n\nclass Solution:\n \"\"\"BFS\n 1. initial root insert\n 2. while queue && len(queue)는 level을 의미하니 빈 리스트를 만든다.\n 3. pop 시점에 리스트에 append\n \"\"\"\n\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n if not root:\n return []\n\n result = []\n queue = deque([root])\n while queue:\n level_nodes = []\n n = len(queue)\n for _ in range(n):\n node = queue.pop()\n\n if node.left:\n queue.appendleft(node.left)\n if node.right:\n queue.appendleft(node.right)\n level_nodes.append(node.val)\n result.append(level_nodes)\n return result\n","repo_name":"minkj1992/algorithm","sub_path":"practice/leetcode/binaryTree/102.BinaryTreeLevelOrderTraversal.py","file_name":"102.BinaryTreeLevelOrderTraversal.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12302541241","text":"import numpy as np\nimport sys\nsys.path.append(\"/home/astro/dforero/codes/pypowspec/powspec/\")\nfrom pypowspec import compute_auto_box, compute_cross_box\n\n\ndata_cat_fn = \"/home/astro/dforero/codes/BAOrec/data/CATALPTCICz0.466G960S1010008301_zspace.dat.npy\"\ndata = np.load(data_cat_fn)\ndata += 2500\ndata %= 2500\nw = np.ones(data.shape[0], dtype = data.dtype)\npk = compute_auto_box(data[:,0], data[:,1], data[:,2], w, \n powspec_conf_file = \"test/powspec_auto.conf\",\n output_file = \"test/box_auto_test.powspec\")","repo_name":"dforero0896/CosmoCorr.jl","sub_path":"test/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42198154973","text":"\"\"\"\n2. Crie uma classe Pessoa que pode armazenaPessoa 10 pessoas e seja capas de realizar as\nseguintes operações:\n\n• void armazenaPessoa(String nome, int idade, float altura); \n• void removePessoa(String nome);\n• int buscaPessoa(String nome); // informa em que posição da agenda está a pessoa\n• void imprimeAgenda(); // imprime os dados de todas as pessoas da agenda\n• void imprimePessoa(int index); // imprime os dados da pessoa que está na posição “i” da agenda.\n\n\"\"\"\n\nclass Agenda:\n\n lista = dict()\n\n @classmethod\n def removePessoa(cls, quem):\n try:\n del cls.lista[quem]\n print('Agenda Atualizada.')\n except KeyError:\n print('Pessoa inexistente nesta agenda')\n\n @classmethod\n def buscaPessoa(cls, quem):\n try:\n busca = cls.lista[quem]\n print(f'{busca[0]} {quem}')\n except KeyError:\n print('Pessoa inexistente nesta agenda')\n\n @classmethod\n def imprimePessoa(cls, quem):\n try:\n busca = cls.lista[quem]\n print(busca)\n except KeyError:\n print('Pessoa inexistente nesta agenda')\n\n @staticmethod\n def imprimeAgenda():\n try:\n with open('ex02_Agenda.txt', 'r') as arquivo:\n print(arquivo.read())\n except FileNotFoundError:\n print('Nenhum dado foi cadastrado na agenda até o momento!')\n\n def __init__(self, posicao, nome, idade, altura):\n self.__posicao = f'Posição {posicao}'\n self.__nome = nome\n self.__idade = f'{idade} anos.'\n self.__altura = f'altura : {altura}'\n \n def armazenaPessoa(self):\n Agenda.lista[self.__nome]= [self.__posicao, self.__idade, self.__altura]\n with open('ex02_Agenda.txt', 'a') as arquivo:\n arquivo.write(f'{self.__nome} = {self.__posicao}, {self.__idade}, {self.__altura}\\n')\n\n\nfor i in range(1, 4):\n posicao = i\n try:\n nome = str(input(f'{i} - Digite nome da pessoa: '))\n except ValueError:\n nome = 'Dado iserido incorreto'\n\n try:\n idade = int(input(f'{i} - Informe sua idade: '))\n except ValueError:\n idade = 'Dado iserido incorreto'\n\n try:\n altura = float(input(f'{i} - E sua altura: '))\n except ValueError:\n altura = 'Dado iserido incorreto'\n \n pessoa = Agenda(posicao, nome, idade, altura)\n pessoa.armazenaPessoa()\n\nmenu = int(input('Digite uma das opções:\\n 1- Remover uma pessoa:\\n 2- Buscar uma pessoa:\\n '+\n '3- Imprimir agenda:\\n 4- Imprimir dados de uma pessoa:\\n 5- Caso deseja sair. '\n ))\n\nif menu == 1:\n quem = str(input('Digite o nome da pessoa que deseja remover: '))\n Agenda.removePessoa(quem)\nelif menu == 2:\n quem = str(input('Digite o nome da pessoa que deseja buscar: '))\n Agenda.buscaPessoa(quem)\nelif menu == 3:\n Agenda.imprimeAgenda()\nelif menu == 4:\n quem = str(input('Digite o nome da pessoa que deseja buscar os dados: '))\n Agenda.imprimePessoa(quem)\nelif menu == 5:\n exit(1)\nelse:\n print('Número invalido')\n ","repo_name":"pand-oly/curso_python","sub_path":"secao-16/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40853243746","text":"import numpy as np\nimport pandas\nimport matplotlib.pyplot as plt\nimport matplotlib\nx = 2\n\ndef EMA(N,samples, sample_id):\n alpha = 2 / (N + 1)\n base = 1 - alpha\n sum_top = 0\n sum_bottom = 0\n for i in range(N):\n if sample_id - i >= 0:\n base_pow = pow(base,i)\n sum_bottom += base_pow\n sum_top += base_pow * samples[sample_id - i]\n\n return sum_top/sum_bottom\n\ndef MACD(samples, sample_id):\n EMA_12 = EMA(12,samples,sample_id)\n EMA_26 = EMA(26,samples,sample_id)\n\n return EMA_12 - EMA_26\n\ndef generate_MACD(n,samples):\n MACD_val = []\n for i in range(n):\n MACD_val.append(MACD(samples,i))\n return MACD_val\n\ndef SIGNAL(MACD_val,id):\n return EMA(9,MACD_val,id)\n\ndef generate_SIGNAL(n, MACD_val):\n SIGNAL_val = []\n for i in range(n):\n SIGNAL_val.append(SIGNAL(MACD_val, i))\n return SIGNAL_val\ndef read_file(file_name):\n file = pandas.read_csv(file_name,index_col=0)\n values = []\n for i,j in file.iterrows():\n #print(file.at[i,'Zamkniecie'])\n values.append(file.at[i,'Zamkniecie'])\n # if not open(file):\n # print(\"Something went wrong, unable to open file\")\n # return -1\n return values\n#flag determines if it needs 2 diagrams at plot on not\ndef make_plot(MACD_val, title, isTwoDiagrams, label1, label2='',SIGNAL_val=None):\n matplotlib.use('Tkagg')\n range = np.linspace(0,1000,1000) #check if it is nessesary and how it worksq\n line1, = plt.plot(range,MACD_val, label=label1)\n if(isTwoDiagrams):\n line2, = plt.plot(range,SIGNAL_val, label=label2)\n plt.legend(handles=[line1, line2])\n else:\n plt.legend(handles=[line1])\n plt.title(title)\ndef simulation(N,MACD_val, SIGNAL_val,samples,value,delay):\n max_size = value-1\n delay_cond = (delay if delay == 0 else delay - 1) # condition to avoid -1 in case dealy equals 0\n assets = value / samples[0]\n value = 0\n for i in range(N):\n if i >= delay:\n if MACD_val[i-delay] > SIGNAL_val[i-delay] and MACD_val[i- delay_cond] < SIGNAL_val[i-delay_cond] and assets != 0:\n value = assets * samples[i]\n assets = 0\n elif MACD_val[i-2] < SIGNAL_val[i-delay] and MACD_val[i-delay_cond] > SIGNAL_val[i-delay_cond] and value != 0:\n assets = value / samples[i]\n value = 0\n if(value == 0):\n value = assets * samples[max_size]\n print(value)\ndef L14(samples,start,period=14): #finds the lowest price traded of the 14 previous trading sessions\n min = samples[start]\n for i in range(start-period,start):\n if samples[i] < min:\n min = samples[i]\n return min\n\ndef H14(samples,start,period=14): #finds the highest price traded of the 14 previous trading sessions\n max = samples[start]\n for i in range(start-period,start):\n if samples[i] > max:\n max = samples[i]\n return max\n\ndef stochastic_oscillator(samples,n):\n K=0\n period = 14\n K_values = []\n for i in range(period,n):\n K_top = samples[i] - L14(samples,i)\n K += K_top\n K_bottom = H14(samples,i) - L14(samples,i)\n K /= K_bottom\n K *= 100\n K_values.append(K)\n return K_values\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n n = 1000\n samples = read_file('wig20_d.csv')\n make_plot(samples,'Wig20',False,'wig20')\n plt.figure()\n MACD_val = generate_MACD(n,samples)\n SIGNAL_val = generate_SIGNAL(n,MACD_val)\n K_val = stochastic_oscillator(samples,n)\n print(MACD_val)\n print(SIGNAL_val)\n print(K_val)\n #TODO analise with Stochastic Oscillator to get better results\n simulation(n,MACD_val,SIGNAL_val,samples,1000,40)\n make_plot(MACD_val,'MACD AND SIGNAL', True, 'MACD', 'SIGNAL',SIGNAL_val)\n #plt.set_figwidth(300)\n #TODO add classes\n #TODO prepare security for no opened file\n plt.show()","repo_name":"PR0TEX/MACD","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28263645480","text":"#LOAD PACKAGES\nimport argparse, os, sys, timeit\nimport pandas as pd\n\nfrom datetime import datetime\nget_time = datetime.now\n\n## Load from scripts\nfrom utils import createDirs, Predictions\nfrom tf_utils import DataLoader\nfrom SiamCDR_DNN import SiamCDR_DNN\n\n#### PARSER\nparser = argparse.ArgumentParser(usage=__doc__)\n\n### Model Architecture\n## pretrained models\nparser.add_argument(\n \"--cellEncoder\",\n required=False,\n type=str,\n default=None,\n help='Pretrained feature encoder for cell lines')\nparser.add_argument(\n \"--drugEncoder\",\n required=False,\n type=str,\n default=None,\n help='Pretrained feature encoder for drugs')\n\n## Architecture\nparser.add_argument(\n \"--nodeList\",\n required=False,\n type=int,\n nargs='+',\n default=[],\n help='Number of nodes in each hidden layer')\nparser.add_argument(\n \"--dropout\",\n required=False,\n type=float,\n default=0.1,\n help='dropout rate')\nparser.add_argument(\n \"--activation\",\n required=False,\n type=str,\n default='relu',\n help=\"activation function to use for the model's hidden layers\")\n\n### Model Training\n## early stopping\nparser.add_argument(\n \"--epochs\",\n required=False,\n type=int,\n default=1000,\n help='max epochs')\nparser.add_argument(\n \"--batchSize\",\n required=False,\n type=int,\n default=512,\n help='minibatch size')\nparser.add_argument(\n \"--patience\",\n required=False,\n type=int,\n default=15,\n help='Patience for early stopping')\nparser.add_argument(\n \"--minDelta\",\n required=False,\n type=float,\n default=0.0001,\n help='Minimum change in validation AUC to reset early stopping patience counter')\n\n## learning rate schedule\nparser.add_argument(\n \"--learningRate\",\n required=False,\n type=float,\n default=0.001,\n help='starting learning rate for training')\nparser.add_argument(\n \"--decayRate\",\n required=False,\n type=float,\n default=0.99,\n help=\"rate of decay for training's exponential decay learning rate scheduler\")\nparser.add_argument(\n \"--decaySteps\",\n required=False,\n type=int,\n default=50,\n help=\"number of epochs between each decay step of learning rate during training\")\n\n### OUT\nparser.add_argument(\n \"--save\",\n required=False,\n type=bool,\n default=True,\n help='Should weights be saved after fit to data?')\nparser.add_argument(\n \"--dir\",\n type=str,\n default='full_model',\n help='parent directory for saving output from training and testing')\nparser.add_argument(\n \"--out\",\n required=True,\n type=str,\n help='file name for saving results and model')\n\n\n#### DEFINE\n### Constants\nfdir = os.path.dirname(__file__)\n\nstart = timeit.default_timer()\nprint(\"{0}: Executing script: trainFS-CDR.py\".format(get_time()))\n\n# Define presplit data files\nrna_paths = {'train': f'{fdir}/../../data/processed/RNA_train_cancergenes.csv',\n 'val': f'{fdir}/../../data/processed/RNA_val_cancergenes.csv',\n 'test': f'{fdir}/../../data/processed/RNA_test_cancergenes.csv',\n 'newcancer': f'{fdir}/../../data/processed/RNA_newcancer_cancergenes.csv'}\n\ndrug_path = f'{fdir}/../../data/processed/drug_fingerprints.csv'\ncdr_path = f'{fdir}/../../data/processed/drugCellLinePairsData.csv'\ninfo_path = f'{fdir}/../../data/processed/cellLineInfo.csv'\n\n## Functions\ndef use_parser(argv):\n # Parse input\n args = parser.parse_args(argv)\n inputs = {}\n\n # Architecture\n inputs['arch'] = {}\n inputs['arch']['cellLineModelPath'] = args.cellEncoder\n inputs['arch']['drugModelPath'] = args.drugEncoder\n inputs['arch']['nodeList'] = args.nodeList\n print(f\"[INFO] Node List: {args.nodeList}...\")\n inputs['arch']['dropout'] = args.dropout\n inputs['arch']['activation'] = args.activation\n\n # Training\n inputs['fit'] = {}\n inputs['fit']['learningRate'] = args.learningRate\n inputs['fit']['decayRate'] = args.decayRate\n inputs['fit']['decaySteps'] = args.decaySteps\n inputs['fit']['epochs'] = args.epochs\n inputs['fit']['batchSize'] = args.batchSize\n inputs['fit']['patience'] = args.patience\n inputs['fit']['minDelta'] = args.minDelta\n\n ## Save\n inputs['out'] = {}\n inputs['out']['save'] = args.save\n inputs['out']['fname'] = args.out\n\n parent = args.dir\n inputs['out']['paths'] = createDirs(parent).paths\n\n metrics = ['precision', 'recall', 'f1']\n\n return inputs, metrics\n\n#### MAIN\ndef run(argv):\n # Parse input fields\n inputs, metrics = use_parser(argv)\n\n # Load data\n data = {}\n cdrs = {}\n loader = DataLoader(drug_path, cdr_path, info_path, drugsProcessed=True)\n for split, path in rna_paths.items():\n data[split] = {}\n data[split]['rna'], data[split]['drug'], data[split]['GT'], cdrs[split] = loader.get_split(path)\n \n # Initialize full model\n model = fsCDR(**inputs['arch'])\n \n # Train CDR-smcRBM model\n model_path = os.path.join(inputs['out']['paths']['models'], inputs['out']['fname'])\n history = model.fit(train=(data['train']['drug'], data['train']['rna'], data['train']['GT']),\n val=(data['val']['drug'], data['val']['rna'], data['val']['GT']),\n modelPath=model_path, saveModel=inputs['out']['save'], **inputs['fit'])\n\n sys.stdout.flush()\n print(f'{get_time()}: Training completed.')\n\n # Save training and validation loss history\n fit_fname = os.path.join(inputs['out']['paths']['fit'], f\"{inputs['out']['fname']}_FitLoss.csv\")\n pd.DataFrame({'train_loss': history['loss'],\n 'val_loss': history['val_loss']}).to_csv(fit_fname, index=False)\n print(f'{get_time()}: Training data saved')\n \n print(f'{get_time()}: Obtaining predictions and performance metrics')\n evaluator = Predictions(model)\n for split, cdr in cdrs.items():\n input_data = [data[split]['drug'], data[split]['rna']]\n cancer_type = cdr.DepMap_ID.apply(loader.cancer_lookup).values\n res_out = os.path.join(inputs['out']['paths'][split+'_res'], inputs['out']['fname'])\n if split in ['train', 'val']:\n evaluator.evalPerformance(input_data, cdr, cancer_type, metrics, res_out, save_preds=False)\n else:\n pred_out = os.path.join(inputs['out']['paths'][split+'_preds'], inputs['out']['fname'])\n evaluator.evalPerformance(input_data, cdr, cancer_type, metrics, res_out, pred_out, save_preds=True)\n\n # save average performance of k fold cross validation\n stop = timeit.default_timer()\n print('{1}: Runtime = {0}'.format(stop - start, get_time()))\n sys.stdout.flush()\n\nif __name__ == '__main__':\n run(sys.argv[1:])\n \n","repo_name":"ninglab/SiamCDR","sub_path":"src/models/trainSiamCDR_DNN.py","file_name":"trainSiamCDR_DNN.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"69849038224","text":"from tkinter import *\r\nfrom functools import partial\r\nfrom time import sleep\r\n\r\n\r\ndef center_gui(root):\r\n\r\n # Gets the requested values of the height and widht.\r\n windowWidth = root.winfo_reqwidth()\r\n windowHeight = root.winfo_reqheight()\r\n\r\n # Gets both half the screen width/height and window width/height\r\n positionRight = int(root.winfo_screenwidth()/2 - windowWidth/2)\r\n positionDown = int(root.winfo_screenheight()/2 - windowHeight/2)\r\n\r\n # Positions the window in the center of the page.\r\n root.geometry(\"+{}+{}\".format(positionRight, positionDown))\r\n\r\n\r\ndef pop_up_window(app):\r\n \"\"\"\r\n Displays the start up window with the instructions.\r\n When the pop up is displayed the buttons at the backround\r\n are disabled\r\n \"\"\"\r\n\r\n def start_button_action():\r\n\r\n app.enable_buttons()\r\n win.destroy()\r\n\r\n win = Toplevel()\r\n win.wm_title(\"Welcome\")\r\n\r\n Label(win, text=\"Step 1: Select starting point\",\r\n font=(\"Calibri\", 13), pady=5, padx=10).pack()\r\n Label(win, text=\"Step 2: Select end point\", font=(\r\n \"Calibri\", 13), pady=5, padx=10).pack()\r\n Label(win, text=\"Step 3: Select Obstacles\", font=(\r\n \"Calibri\", 13), pady=5, padx=10).pack()\r\n Label(win, text=\"Click and hover.Then click again to stop\", padx=25).pack()\r\n Label(win, text=\"Step 4: Press Enter to start\",\r\n font=(\"Calibri\", 13), pady=5, padx=10).pack()\r\n Label(win, text=\"Step 5: Press R to restart\",\r\n font=(\"Calibri\", 13), pady=5, padx=10).pack()\r\n Button(win, text=\"Start\", command=start_button_action,\r\n ).pack()\r\n\r\n win.update_idletasks()\r\n center_gui(win)\r\n\r\n\r\nclass App:\r\n\r\n def __init__(self, master):\r\n\r\n self.master = master\r\n master.wm_title(\"A* Algorithm\")\r\n self.buttons = []\r\n self.start = []\r\n self.goal = []\r\n self.obstacles = []\r\n self.mode = 0\r\n\r\n for i in range(25):\r\n self.buttons.append([])\r\n for j in range(25):\r\n\r\n # Initiliaze buttons\r\n button = Button(master, width=2, height=1,\r\n command=partial(self.button_operation, i, j), state=\"disabled\")\r\n\r\n self.buttons[i].append(button)\r\n\r\n # This event is used for the obstacle setting\r\n self.buttons[i][j].bind('', partial(\r\n self.add_obstacle, i, j))\r\n\r\n self.buttons[i][j].grid(row=i, column=j)\r\n\r\n master.update_idletasks()\r\n center_gui(master)\r\n\r\n pop_up_window(self)\r\n\r\n def enable_buttons(self):\r\n\r\n for i in range(25):\r\n for j in range(25):\r\n self.buttons[i][j].configure(state=\"normal\")\r\n\r\n def disable_buttons(self):\r\n\r\n for i in range(25):\r\n for j in range(25):\r\n self.buttons[i][j].configure(state=\"disable\")\r\n\r\n # Every time a button is clicked this function is triggered\r\n # This function is responsible for controling the flow of the program\r\n\r\n def button_operation(self, row, column):\r\n \"\"\"\r\n According to the value of 'mode' this fuction\r\n sets the value of start and end. Also by changing\r\n the value of mode it controls when we can set obstacles and\r\n when we can start the algorithm\r\n \"\"\"\r\n\r\n # Set start mode\r\n if self.mode == 0:\r\n\r\n self.start.append(row)\r\n self.start.append(column)\r\n self.mode = 1\r\n self.buttons[row][column].configure(bg='green')\r\n\r\n # Set end mode\r\n elif self.mode == 1:\r\n\r\n self.goal.append(row)\r\n self.goal.append(column)\r\n self.mode = 2\r\n self.buttons[row][column].configure(bg='red')\r\n\r\n elif self.mode == 2:\r\n\r\n # Set to set obstacles mode => By hovering over buttons\r\n self.mode = 3\r\n\r\n else:\r\n # When the mode = 2 the user cant set obstacles by hovering and the algorithm can start\r\n self.mode = 2\r\n\r\n def add_obstacle(self, row, column, event):\r\n\r\n # Checks if we are in the obstacle setting mode\r\n if self.mode == 3:\r\n obstacle_node = []\r\n obstacle_node.append(row)\r\n obstacle_node.append(column)\r\n\r\n self.obstacles.append(obstacle_node[:])\r\n self.buttons[row][column].configure(bg='black')\r\n\r\n def heuristic(self, node1, node2):\r\n result = abs(node1[0] - node2[0]) + abs(node1[1]-node2[1])\r\n return result\r\n\r\n def find_neighbors(self, current, obstacles):\r\n\r\n neighbors = []\r\n\r\n # With current[:] I create a new list and I dont use the pointer to the original list otherwise the end result whould have same lists\r\n\r\n right_neighbor = current[:]\r\n right_neighbor[1] = current[1] + 1\r\n\r\n if 0 <= right_neighbor[1] < 25 and right_neighbor not in self.obstacles:\r\n neighbors.append(right_neighbor)\r\n\r\n left_neighbor = current[:]\r\n left_neighbor[1] = current[1] - 1\r\n\r\n if 0 <= left_neighbor[1] < 25 and left_neighbor not in self.obstacles:\r\n neighbors.append(left_neighbor)\r\n\r\n up_neighbor = current[:]\r\n up_neighbor[0] = current[0] + 1\r\n\r\n if 0 <= up_neighbor[0] < 25 and up_neighbor not in self.obstacles:\r\n\r\n neighbors.append(up_neighbor)\r\n\r\n down_neighbor = current[:]\r\n down_neighbor[0] = current[0] - 1\r\n\r\n if 0 <= down_neighbor[0] < 25 and down_neighbor not in self.obstacles:\r\n\r\n neighbors.append(down_neighbor)\r\n\r\n down_right_neighbor = current[:]\r\n down_right_neighbor[0] = current[0] + 1\r\n down_right_neighbor[1] = current[1] + 1\r\n\r\n if 0 <= down_right_neighbor[0] < 25 and 0 <= down_right_neighbor[1] < 25 and down_right_neighbor not in self.obstacles:\r\n neighbors.append(down_right_neighbor)\r\n\r\n up_right_neighbor = current[:]\r\n up_right_neighbor[0] = current[0] - 1\r\n up_right_neighbor[1] = current[1] + 1\r\n\r\n if 0 <= up_right_neighbor[0] < 25 and 0 <= up_right_neighbor[1] < 25 and up_right_neighbor not in self.obstacles:\r\n\r\n neighbors.append(up_right_neighbor)\r\n\r\n up_left_neighbor = current[:]\r\n up_left_neighbor[0] = current[0] - 1\r\n up_left_neighbor[1] = current[1] - 1\r\n\r\n if 0 <= up_left_neighbor[0] < 25 and 0 <= up_left_neighbor[1] < 25 and up_left_neighbor not in self.obstacles:\r\n\r\n neighbors.append(up_left_neighbor)\r\n\r\n down_left_neighbor = current[:]\r\n down_left_neighbor[0] = current[0] + 1\r\n down_left_neighbor[1] = current[1] - 1\r\n\r\n if 0 <= down_left_neighbor[0] < 25 and 0 <= down_left_neighbor[1] < 25 and down_left_neighbor not in self.obstacles:\r\n neighbors.append(down_left_neighbor)\r\n\r\n return neighbors\r\n\r\n def sort_open_set(self, open_set, f_score):\r\n\r\n # The index of the list is the same as the index in the open set\r\n # and the value of the index is the f_score of it\r\n index_to_fscore = []\r\n\r\n for node in open_set:\r\n f_score_of_node = f_score[node[0]][node[1]]\r\n index_to_fscore.append(f_score_of_node)\r\n\r\n sorted_copy = index_to_fscore.copy()\r\n sorted_copy.sort()\r\n\r\n sorted_open_set = []\r\n\r\n for value in sorted_copy:\r\n min = index_to_fscore.index(value)\r\n sorted_open_set.append(open_set[min])\r\n # We mark that we have transfered this value to the sorted array\r\n index_to_fscore[min] = float('inf')\r\n\r\n return sorted_open_set\r\n\r\n def reconstruct_path(self, cameFrom, current):\r\n total_path = []\r\n\r\n while current != self.start:\r\n\r\n self.buttons[current[0]][current[1]].configure(bg='red')\r\n\r\n total_path.append(current[:])\r\n current = cameFrom[current[0]][current[1]]\r\n\r\n def a_star_algorithm(self, start, goal):\r\n\r\n open_set = [start]\r\n g_score = []\r\n f_score = []\r\n came_from = []\r\n\r\n # Initialiazation of g_score and came_from\r\n for i in range(25):\r\n f_score.append([])\r\n g_score.append([])\r\n came_from.append([])\r\n for j in range(25):\r\n temp = float('inf')\r\n came_from[i].append([])\r\n g_score[i].append(temp) # set it to infinity\r\n f_score[i].append(temp) # set it to infinity\r\n\r\n g_score[start[0]][start[1]] = 0\r\n f_score[start[0]][start[1]] = self.heuristic(start, goal)\r\n\r\n while len(open_set) > 0:\r\n self.master.update_idletasks()\r\n sleep(0.02)\r\n\r\n open_set = self.sort_open_set(open_set, f_score)\r\n\r\n current = open_set[0]\r\n current_row = current[0]\r\n current_column = current[1]\r\n\r\n if current == goal:\r\n return self.reconstruct_path(came_from, current)\r\n\r\n open_set.remove(current)\r\n\r\n neighbors = self.find_neighbors(current, [])\r\n\r\n for node in neighbors:\r\n\r\n node_row = node[0]\r\n node_column = node[1]\r\n\r\n # The weight of every edge is 1\r\n tentative_gScore = g_score[current_row][current_column] + 1\r\n\r\n if tentative_gScore < g_score[node_row][node_column]:\r\n\r\n came_from[node_row][node_column].append(current_row)\r\n came_from[node_row][node_column].append(\r\n current_column)\r\n\r\n g_score[node_row][node_column] = tentative_gScore\r\n\r\n f_score[node_row][node_column] = g_score[node_row][node_column] + \\\r\n self.heuristic(node, self.goal)\r\n\r\n if node not in open_set:\r\n\r\n self.buttons[node[0]][node[1]].configure(bg='blue')\r\n open_set.append(node[:])\r\n\r\n print(\"fail!\")\r\n\r\n def find_path(self, event):\r\n\r\n # Checks if we are in the correct mode to start the algorithm\r\n if self.mode == 2:\r\n self.a_star_algorithm(self.start, self.goal)\r\n self.disable_buttons()\r\n\r\n def reset(self, event):\r\n\r\n if self.mode == 2:\r\n self.start = []\r\n self.goal = []\r\n self.obstacles = []\r\n self.mode = 0\r\n\r\n for i in range(25):\r\n for j in range(25):\r\n\r\n self.buttons[i][j].configure(bg='SystemButtonFace')\r\n\r\n self.enable_buttons()\r\n\r\n\r\nif __name__ == '__main__':\r\n root = Tk()\r\n app = App(root)\r\n\r\n # Starts the algorithm when we press enter\r\n root.bind('', app.find_path)\r\n # Resets when we press 'R'\r\n root.bind('r', app.reset)\r\n\r\n root.mainloop()\r\n","repo_name":"Chrisbelefantis/A-Star-Algorithm","sub_path":"Astar-Algorithm.py","file_name":"Astar-Algorithm.py","file_ext":"py","file_size_in_byte":10881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"23883047875","text":"import pygame\nimport time, random, math \n\nclass Button:\n def __init__(self, x, y, width, height, text, color, hover_color, action):\n self.rect = pygame.Rect(x, y, width, height)\n self.color = color\n self.hover_color = hover_color\n self.text = text\n self.action = action\n self.font = pygame.font.Font(None, 36)\n self.image = None\n self.hovered = False\n\n def update(self):\n if self.rect.collidepoint(pygame.mouse.get_pos()):\n self.hovered = True\n else:\n self.hovered = False\n\n def draw(self, surface):\n if self.hovered:\n pygame.draw.rect(surface, self.hover_color, self.rect)\n else:\n pygame.draw.rect(surface, self.color, self.rect)\n\n text_surface = self.font.render(self.text, True, (255, 255, 255))\n text_rect = text_surface.get_rect(center=self.rect.center)\n surface.blit(text_surface, text_rect)\n\n def handle_event(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN and self.hovered:\n if self.action is not None:\n self.action()","repo_name":"JiekRuan/python-aim-app","sub_path":"bouton.py","file_name":"bouton.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15667327883","text":"import Live, sys\n#from _osc.RawConfigParser import RawConfigParser\nfrom configparser import MissingSectionHeaderError, ParsingError, ConfigParser\nfrom collections import OrderedDict\nimport os\npath_join = os.path.join\nexpanduser = os.path.expanduser\nexists = os.path.exists\nfrom _Framework.InputControlElement import MIDI_CC_TYPE, MIDI_NOTE_TYPE\nfrom _Framework.ButtonElement import ButtonElement\nfrom _Framework.SubjectSlot import subject_slot_group\nfrom ._EbiagiComponent import EbiagiComponent\n\nDEFAULT_XCONTROL_ON_THRESHOLD = 33\nMIN_XCONTROL_SETTING_LEN = 6\nMIDI_MSG_TYPES = {'cc': MIDI_CC_TYPE, 'note': MIDI_NOTE_TYPE}\n\ndef handle_xcontrol_and_binding_settings(identifier, parent, logger):\n \"\"\" Handles xcontrol and binding settings for self or for the given XT script. \"\"\"\n s_path = path_join(expanduser('~'), 'nativeKONTROL', 'ClyphX_Pro')\n settings = _parse_config_file(s_path, 'X-Controls.txt', logger)\n if settings:\n parsed_settings = parse_xcontrol_settings(settings, identifier, logger, parent)\n if parsed_settings:\n XControlComponent(parsed_settings, parent)\n # self._setup_bindings(s_path, identifier)\n\ndef _parse_config_file(file_path, file_name, logger):\n \"\"\" Reads the given config file from the given path and returns a dict of\n the keys and values it contains. \"\"\"\n file_to_read = path_join(file_path, file_name)\n parser = ConfigParser()\n parser.optionxform = str\n try:\n logger('Attempting to read config %s' % file_to_read)\n parser.read((file_to_read,))\n except ParsingError:\n logger(' -> %s contains parsing errors' % file_name)\n\n sections = parser.sections()\n file_data = OrderedDict()\n for s in sections:\n for key in parser.options(s):\n value = parser.get(s, key)\n if isinstance(value, list):\n value = (' ').join(value)\n file_data[key] = value.replace('\\n', '')\n #logger(' -> %s: %s' % (key, file_data[key]))\n\n return file_data\n\ndef parse_xcontrol_settings(text, identifier, logger, parent):\n \"\"\" Returns a dict of xcontrol settings parsed from the text. This ensures that\n each setting is valid and that its associated MIDI message is unique. Note that this\n handles casting to lower case since the settings file is not cast to lower case. \"\"\"\n x_dict = {}\n for k, v in text.items():\n d = v.split(',')\n if len(d) < MIN_XCONTROL_SETTING_LEN:\n continue\n msg_type = MIDI_MSG_TYPES.get(str(d[0]).lower(), None)\n if msg_type is None:\n continue\n ch = parse_midi_channel(d[1].strip())\n num = parse_midi_value(d[2].strip())\n msg = (msg_type, ch, num)\n if parent.can_register_midi_message(msg, identifier):\n parent.register_midi_message(msg, identifier)\n led_off = parse_midi_value(d[3].strip())\n led_on = parse_midi_value(d[4].strip(), default_value=127)\n x_dict[k] = (msg, led_off, led_on, (',').join(d[5:]).strip())\n\n return x_dict\n\ndef parse_midi_value(num_as_string, default_value=0):\n \"\"\" Returns a MIDI value (range 0 - 127) or the given default value. \"\"\"\n return parse_number(num_as_string, default_value=default_value, min_value=0, max_value=127)\n\ndef parse_midi_channel(num_as_string):\n \"\"\" Returns a MIDI channel number (0 - 15) or 0 if parse error. \"\"\"\n return parse_number(num_as_string, default_value=1, min_value=1, max_value=16) - 1\n\ndef parse_number(num_as_string, default_value=None, min_value=None, max_value=None, is_float=False):\n \"\"\" Parses the given string containing a number and returns the parsed number.\n If a parse error occurs, the default_value will be returned. If a min_value or\n max_value is given, the default_value will be returned if the parsed_value is not\n within range. \"\"\"\n ret_value = default_value\n try:\n parsed_value = float(num_as_string) if is_float else int(num_as_string)\n if min_value is not None and parsed_value < min_value:\n return ret_value\n if max_value is not None and parsed_value > max_value:\n return ret_value\n ret_value = parsed_value\n except:\n pass\n\n return ret_value\n\nclass XControlComponent(EbiagiComponent):\n \"\"\" XControlComponent creates a list of buttons based on the given settings, monitors\n their values and handles triggering their action lists and setting their LED\n state. \"\"\"\n\n def __init__(self, settings, parent, *a, **k):\n super(XControlComponent, self).__init__(*a, **k)\n self._parent = parent\n btns = []\n for s in settings.values():\n btn = ButtonElement(True, s[0][0], s[0][1], s[0][2], name=s[3])\n btns.append(btn)\n\n self._on_button_value.replace_subjects(btns)\n\n def disconnect(self):\n super(XControlComponent, self).disconnect()\n self._parent = None\n return\n\n @subject_slot_group('value')\n def _on_button_value(self, value, button):\n parsed_xcontrol = button.name.split(' ')\n action_def = parsed_xcontrol[0]\n args = None\n if len(parsed_xcontrol) > 1:\n args = parsed_xcontrol[1]\n self._parent.handle_action(action_def, args)\n","repo_name":"jbernz-Ebiagi/clyphx-actions","sub_path":"_ParseControls.py","file_name":"_ParseControls.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"39880633746","text":"#!/usr/bin/env python\r\n\"\"\" \"\"\"\r\n\r\n# Script information for the file.\r\n__author__ = \"Hendrix Demers (hendrix.demers@mail.mcgill.ca)\"\r\n__version__ = \"\"\r\n__date__ = \"\"\r\n__copyright__ = \"Copyright (c) 2011 Hendrix Demers\"\r\n__license__ = \"\"\r\n\r\n# Subversion informations for the file.\r\n__svnRevision__ = \"$Revision$\"\r\n__svnDate__ = \"$Date$\"\r\n__svnId__ = \"$Id$\"\r\n\r\n# Standard library modules.\r\n\r\n# Third party modules.\r\n\r\n# Local modules.\r\n\r\n# Project modules\r\n\r\n# Globals and constants variables.\r\nSHELL_K = \"K\"\r\nSHELL_LI = \"LI\"\r\nSHELL_LII = \"LII\"\r\nSHELL_LIII = \"LIII\"\r\nSHELL_MI = \"MI\"\r\nSHELL_MII = \"MII\"\r\nSHELL_MIII = \"MIII\"\r\nSHELL_MIV = \"MIV\"\r\nSHELL_MV = \"MV\"\r\n\r\nif __name__ == '__main__': #pragma: no cover\r\n import pyHendrixDemersTools.Runner as Runner\r\n Runner.Runner().run(runFunction=None)\r\n","repo_name":"drix00/pyionisationcrosssection","sub_path":"pyIonisationCrossSection/atomic_shell.py","file_name":"atomic_shell.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10603531923","text":"import numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pandas as pd\nimport random\nimport pickle\nfrom matplotlib.colors import ListedColormap\nimport plot_functions\nimport constants as c\nfrom tqdm import tqdm\nimport os\n\nNTag = 4\nprint('running with NTag =', NTag)\n\n\ndef binInSR(x,y):\n \"\"\"\n Checks if any corners of the bin are in the SR\n Assumes giving the lower left corner of the bin\n \"\"\"\n xs = x + c.xbinSize\n ys = y + c.ybinSize\n return ((0.0256 > (x - c.m_h1_0)**2 / x**2 + (y - c.m_h2_0)**2 / y**2) |\n (0.0256 > (xs - c.m_h1_0)**2 / xs**2 + (y - c.m_h2_0)**2 / y**2) |\n (0.0256 > (x - c.m_h1_0)**2 / x**2 + (ys - c.m_h2_0)**2 / ys**2) |\n (0.0256 > (xs - c.m_h1_0)**2 / xs**2 + (ys - c.m_h2_0)**2 / ys**2))\n\ndef integrate_mhh(df):\n \"\"\"\n Gets sum of m_hh in each bin\n \"\"\"\n row_list = []\n for xi in tqdm(c.xbins):\n for yi in c.ybins:\n row_list.append({\"m_h1\":xi,\"m_h2\":yi,\"pdf\":sum(df.loc[ (df[\"m_h1\"]==xi) & (df[\"m_h2\"]==yi),\"pdf\"])})\n return pd.DataFrame(row_list)\n\ndf = pd.read_pickle(f\"data_{NTag}tag.p\")\n# Now make the 3D histogram\ncoord_array = np.array(df[[\"m_h1\",\"m_h2\",\"m_hh\"]])\n\nhist3d,[xbins,ybins,mhhbins] = np.histogramdd(coord_array,[c.xbins,c.ybins,c.mhhbins])\nxv,yv,zv = np.meshgrid(xbins[:-1],ybins[:-1],mhhbins[:-1],indexing='ij')\n\ndata_df = pd.DataFrame()\ndata_df[\"m_h1\"] = xv.flatten()\ndata_df[\"m_h2\"] = yv.flatten()\ndata_df[\"m_hh\"] = zv.flatten()\ndata_df[\"pdf\"] = hist3d.flatten()\n\npickle.dump(np.array(data_df[[\"m_h1\",\"m_h2\",\"m_hh\"]]), open(\"3mnn_X.p\", 'wb'))\npickle.dump(np.array(data_df[\"pdf\"]), open(\"3mnn_Y.p\", 'wb'))\n\n# Filter out the SR bins if needed\nif NTag == 4:\n data_df = data_df.loc[~binInSR(data_df[\"m_h1\"],data_df[\"m_h2\"])]\n\n# bin into histogram, save\nfmp = integrate_mhh(data_df)\npickle.dump(fmp, open(f\"fmp_{NTag}b.p\", 'wb'))\n\n\n# plot initial massplane\nfig = plt.figure()\nax = fig.add_subplot(111)\nxmesh = np.array(np.array(fmp[\"m_h1\"]).reshape((len(c.xbins),len(c.ybins))).T)\nymesh = np.array(np.array(fmp[\"m_h2\"]).reshape((len(c.xbins),len(c.ybins))).T)\nhmesh = np.array(np.array(fmp[\"pdf\"]).reshape((len(c.xbins),len(c.ybins))).T)\nax.pcolormesh(xmesh,ymesh,hmesh, shading='auto')\nplot_functions.plotSR()\nplt.xlabel(\"$m_{h1}$\")\nplt.ylabel(\"$m_{h2}$\")\nplt.savefig(f\"kde_2d_fullmassplane_{NTag}b_init.png\")\n\n# do kde to get new density\nif NTag == 2:\n x = xmesh.flatten()\n y = ymesh.flatten()\n h = hmesh.flatten()\nelif NTag == 4:\n mask_indices = ~binInSR(xmesh,ymesh)\n x = xmesh[mask_indices].flatten()\n y = ymesh[mask_indices].flatten()\n h = hmesh[mask_indices].flatten()\n\nprint('fitting polynomial without signal region,', len(x), 'pts instead of', len(xmesh.flatten()))\n#kde = stats.gaussian_kde([x, y], weights=h)#, bw_method=0.02)\n\ndef polyfit2d(x, y, z, kx=3, ky=3, order=None):\n '''\n Two dimensional polynomial fitting by least squares.\n Fits the functional form f(x,y) = z.\n\n Notes\n -----\n Resultant fit can be plotted with:\n np.polynomial.polynomial.polygrid2d(x, y, soln.reshape((kx+1, ky+1)))\n\n Parameters\n ----------\n x, y: array-like, 1d\n x and y coordinates.\n z: np.ndarray, 2d\n Surface to fit.\n kx, ky: int, default is 3\n Polynomial order in x and y, respectively.\n order: int or None, default is None\n If None, all coefficients up to maxiumum kx, ky, ie. up to and including x^kx*y^ky, are considered.\n If int, coefficients up to a maximum of kx+ky <= order are considered.\n\n Returns\n -------\n Return paramters from np.linalg.lstsq.\n\n soln: np.ndarray\n Array of polynomial coefficients.\n residuals: np.ndarray\n rank: int\n s: np.ndarray\n\n '''\n\n # grid coords\n x, y = np.meshgrid(x, y)\n # coefficient array, up to x^kx, y^ky\n coeffs = np.ones((kx+1, ky+1))\n\n # solve array\n a = np.zeros((coeffs.size, x.size))\n\n # for each coefficient produce array x^i, y^j\n for index, (j, i) in enumerate(np.ndindex(coeffs.shape)):\n # do not include powers greater than order\n if order is not None and i + j > order:\n arr = np.zeros_like(x)\n else:\n arr = coeffs[i, j] * x**i * y**j\n a[index] = arr.ravel()\n\n # do leastsq fitting and return leastsq result\n print('doing lstsq function')\n return np.linalg.lstsq(a.T, np.ravel(z), rcond=None)\n\nsoln, _, _, _ = polyfit2d(x, y, h, kx=3, ky=3)\nfitted_surf = np.polynomial.polynomial.polyval2d(x, y, soln.reshape((kx+1,ky+1)))\nplt.matshow(fitted_surf, shading='auto')\nplt.show()\n\n#print(kde.factor)\n\n# calculate kde density\n#new_density = kde([data_df[\"m_h1\"], data_df[\"m_h2\"]])\nnew_density = smooth([data_df[\"m_h1\"], data_df[\"m_h2\"]])\nprint('done the long part')\n\nnew_density -= min(new_density)\nnew_density *= max(data_df[\"pdf\"])/max(new_density)\n# save smoothed output file\npickle.dump(np.array(new_density), open(f\"3mnn_Y_poly_smoothed_{NTag}b.p\", 'wb'))\n\n# get smoothed output for here\nnew_density = smooth([fmp[\"m_h1\"], fmp[\"m_h2\"]])\nnew_density -= min(new_density)\nnew_density *= max(fmp[\"pdf\"])/max(new_density)\n\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nxmesh = np.array(fmp[\"m_h1\"]).reshape((len(c.xbins),len(c.ybins))).T\nymesh = np.array(fmp[\"m_h2\"]).reshape((len(c.xbins),len(c.ybins))).T\nhmesh = np.array(new_density).reshape((len(c.xbins),len(c.ybins))).T\nax.pcolormesh(xmesh,ymesh,hmesh, shading='auto')\nplot_functions.plotSR()\nplt.xlabel(\"$m_{h1}$\")\nplt.ylabel(\"$m_{h2}$\")\nplt.savefig(f\"poly_2d_fullmassplane_{NTag}b_final.png\")\n\npickle.dump(xmesh, open(f\"xmesh_poly_2d_{NTag}b.p\", 'wb'))\npickle.dump(ymesh, open(f\"ymesh_poly_2d_{NTag}b.p\", 'wb'))\npickle.dump(hmesh, open(f\"hmesh_poly_2d_{NTag}b.p\", 'wb'))\n\nplt.cla(); plt.clf()\nplt.scatter(fmp[\"pdf\"], new_density)\nplt.plot(fmp[\"pdf\"],fmp[\"pdf\"], 'k')\nplt.xlabel(\"original pdf\")\nplt.ylabel(\"pdf after poly smoothing\")\nplt.savefig(f\"poly_2d_scatter_{NTag}b.png\")\n","repo_name":"callum-mccracken/backgroundnnregression","sub_path":"polyfit.py","file_name":"polyfit.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"18764192926","text":"import pygame\nfrom pygame.locals import *\nfrom sys import exit\nimport random\n\n\n# Definiert die Wände als Sprites\n \nclass Wall(pygame.sprite.Sprite):\n def __init__(self, x, y, width, height):\n super().__init__()\n self.image = pygame.Surface((width, height))\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n self.image.set_alpha(0)\n self.mask = pygame.mask.from_surface(self.image)\n\n\n# Definiert den steuerbaren Tetromino\n \nclass Tetromino (pygame.sprite.Sprite):\n def __init__(self, x, y, shape):\n super().__init__() \n self.shape = shape\n # Laden der Bilder\n self.image = pygame.image.load(\"graphics/\" + str(shape) + \".png\")\n \n # Anwenden der Maske\n self.mask = pygame.mask.from_surface(self.image) \n \n # Einstellen des Rect-Attributs\n self.rect = self.image.get_rect()\n self.rect.topleft = (x,y)\n\n # Einen transparenten Surface erstellen, um die Maske anzuzeigen\n self.mask_surface = pygame.Surface(self.image.get_size(), pygame.SRCALPHA, 32)\n self.mask_surface.fill((255, 255, 255, 100))\n \n # Die Maske des Tetrominos auf die Surface setzen\n self.mask_surface.blit(self.image, (0, 0), special_flags=pygame.BLEND_RGBA_MULT)\n \n self.is_moving_left = False\n self.is_moving_right = False\n self.is_turning_right = False\n\n def can_move_down(self, wall_group, move_group):\n # Erstellt eine vorübergehende Kopie des Tetrominos\n temp_tetromino = Tetromino(self.rect.x, self.rect.y + 20, self.shape)\n temp_tetromino.mask = pygame.mask.from_surface(temp_tetromino.image)\n\n # Überprüft, ob der vorübergehende Tetromino kollidiert\n for wall in wall_group:\n if pygame.sprite.collide_mask(temp_tetromino, wall):\n return False\n for tetromino in move_group:\n if tetromino != self:\n if pygame.sprite.collide_mask(temp_tetromino, tetromino):\n return False\n\n return True\n \n def update(self, wall_group):\n keys = pygame.key.get_pressed()\n if keys[K_RIGHT] and self.rect.topright[0] < 259:\n if not self.is_moving_right:\n self.rect.x = self.rect.x + 20\n self.is_moving_right = True\n else:\n self.is_moving_right = False\n \n if keys[K_LEFT] and self.rect.topleft[0] > 61:\n if not self.is_moving_left:\n self.rect.x = self.rect.x - 20\n self.is_moving_left = True\n else:\n self.is_moving_left = False\n\n if keys[K_UP]:\n self.rect.y = self.rect.y - 1\n\n elif keys[K_DOWN] and self.can_move_down(wall_group, move_group):\n self.rect.y = self.rect.y + 20\n\n if keys[K_e]:\n if not self.is_turning_right:\n self.image = pygame.transform.rotate(self.image, 90)\n self.mask = pygame.mask.from_surface(self.image) \n self.is_turning_right = True\n else:\n self.is_turning_right = False\n \n \n \n \n\n \n\n \n\n# Standart Kram\n\npygame.init()\nscreen = pygame.display.set_mode\n\npygame.init()\nscreen = pygame.display.set_mode((400,500))\npygame.display.set_caption(\"Tetris\")\nclock = pygame.time.Clock()\n\n# Setzt den Hintergrund auf background.jpg\n\nbackground_surface = pygame.image.load(\"graphics/background.jpg\")\n\n# Initialisiert die Wände und fügt sie wall_group hinzu\n \nground = Wall(60, 440, 200, 20)\nleft_wall = Wall(40, 40, 20, 400)\nright_wall = Wall(260, 40, 20, 400)\n\nwall_group = pygame.sprite.Group()\nwall_group.add(ground, left_wall, right_wall) \n\n# Initialisiert den Tetromino t und fügt in in move_group\n\n\ntetromino_list = [\"i\",\"o\",\"l\",\"j\",\"s\",\"z\",\"t\"]\nmove_group = pygame.sprite.Group()\n\n\n\n# count für langsames absenken\n\ncount = 0\n\n\ndef collision():\n move_group.sprites()[0].rect.y = move_group.sprites()[0].rect.y - 20\n wall_group.add(move_group.sprites()[0])\n move_group.empty()\n \nwhile True:\n #Sollte der letzte Tetromino gesetzt sein, wird ein neuer gewählt\n #und in die move_group geschoben; ist die Tetromino liste leer\n if len(move_group) == 0:\n if len(tetromino_list) == 0:\n tetromino_list = [\"i\",\"o\",\"l\",\"j\",\"s\",\"z\",\"t\"]\n random_pop = random.randint(0,len(tetromino_list)-1)\n pop = tetromino_list.pop(random_pop)\n print(pop)\n new_tetromino = Tetromino(140,40,pop)\n move_group.add(new_tetromino)\n print(tetromino_list)\n \n #Zähler\n count = count + 1\n\n # Ermöglicht das Schließen per X\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n \n \n\n \n # Updates und draws\n screen.blit(background_surface,(0,0))\n wall_group.draw(screen)\n move_group.update(wall_group)\n move_group.draw(screen)\n\n \n \n for wall in wall_group:\n if pygame.sprite.collide_mask(move_group.sprites()[0],wall):\n move_group.sprites()[0].rect.y = move_group.sprites()[0].rect.y - 20\n wall_group.add(move_group.sprites()[0])\n move_group.empty()\n break\n \n # Alle 40 frames bewegt sich das Tetromino nach unten\n if count % 60 == 0:\n \n if move_group.sprites()[0].can_move_down(wall_group, move_group):\n move_group.sprites()[0].rect.y = move_group.sprites()[0].rect.y + 20\n else:\n wall_group.add(move_group.sprites()[0])\n move_group.empty()\n \n\n # Standart Kram\n pygame.display.update()\n clock.tick(60)\n","repo_name":"LeonOhneH/Tetris","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21416971547","text":"from fastapi import FastAPI\nimport uvicorn\n\nfrom app.schemas import Data\nfrom app import functions as func\n\napp = FastAPI(title=\"Web-приложение для определения заполненных форм\")\n\n\n@app.post(\"/get_form\")\nasync def get_form(data: Data):\n # разделяем строку со списком полей на ключи-значения\n form_field_val_dict = func.separate_fields(data.fields_values)\n # определяем типы полей\n form_field_type_dict = func.get_field_type(form_field_val_dict)\n # находим подходящий шаблон в бд\n template_name = func.get_template_name(form_field_type_dict)\n if len(template_name) > 0:\n return template_name\n else:\n # если шаблона нет, отправляем поля-типы полученной формы\n form_field_type_str = func.compose_str(form_field_type_dict)\n return form_field_type_str\n\n\n# для отладки\nif __name__ == '__main__':\n uvicorn.run(app, host=\"0.0.0.0\", port=8002)\n","repo_name":"anshexa/completed_forms_app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70555556624","text":"from datetime import datetime\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\n\nfrom django.urls import reverse\nfrom registration.backends.simple.views import RegistrationView\n\nfrom rango.models import Category,Page\nfrom rango.form import CategoryForm,PageForm\nfrom rango.webhose_search import run_query\n\n\ndef get_server_side_cookie(request,cookie,default_val=None):\n #通过 requests.sessions.get() 检查 cookie 是否存在,因为request中有存放sessionid的cookie\n val=request.session.get(cookie)\n if not val:\n val=default_val\n return val\n\ndef visitor_cookie_handler(request):\n #获得或者创建一个cookie:visits,request.COOKIES.get这个方法获取的都会变成字符串类型\n visits=int(get_server_side_cookie(request,'visits','1'))\n last_visit_cookie = get_server_side_cookie(request,'last_visit',str(datetime.now()))\n #把last_visit变成Datetime类型\n last_visit_time=datetime.strptime(last_visit_cookie[:-7],'%Y-%m-%d %H:%M:%S')\n\n #设置成连续10s内访问不增加数值\n if(datetime.now()-last_visit_time).seconds>=10:\n visits=visits+1\n request.session['last_visit']=str(datetime.now())\n else:\n request.session['last_visit']=last_visit_cookie\n\n request.session['visits']=visits\n\n\ndef index(request):\n\n #想要赋值使用字典的话,必须要先定义字典\n context_dict = {}\n #查询数据库,获取前5个分类,放入上下文变量中\n category_list=Category.objects.order_by('-views')[:5]\n #创建上下文字典,渲染模板\n context_dict['category_list'] =category_list\n\n page_list=Page.objects.order_by('-views')[:5]\n context_dict['page_list']=page_list\n\n #调用处理 cookie 的辅助函数\n visitor_cookie_handler(request)\n context_dict['visits']=request.session['visits']\n\n # render是生成一个response对象\n response = render(request, 'rango/index.html', context_dict)\n\n # 返回 response 对象,更新目标 cookie\n return response\n\ndef show_category(request,category_name_slug):\n context_dict={}\n category = Category.objects.get(slug=category_name_slug)\n context_dict[\"category\"]=category\n try:\n pages=Page.objects.filter(category=category)\n context_dict[\"pages\"]=pages\n except \"page.doesnotexist\":\n context_dict['pages']=None\n\n return render(request,'rango/category.html',context_dict)\n\ndef about(request):\n #html代码可以直接返回给浏览器,浏览器会进行解析。所以可以在python中写html代码,python把它当字符串处理,返回给浏览器后,浏览器会进行解析。\n # return HttpResponse(\"
      \"+\"

      关于

      \"+\"
      \")\n return render(request,'rango/about.html',{})\n\n\n@login_required\ndef add_category(request):\n #创建一个CategoryForm对象\n form = CategoryForm()\n #HTTP GET请求 获取指定资源的表述。即HTTP GET请求用于获取特定的资源,例如一个网页、一张图像或一个文件。\n #HTTP POST请求 向服务器中提交数据,一般还会存入数据库。\n #判断是不是HTTP POST请求,即是不是提交数据过来的,如果不是的话,则是首次访问页面过来添加数据的\n if request.method == \"POST\":\n #把数据放到CategoryForm中,生成一个新CategoryForm对象,名字还是form\n form=CategoryForm(request.POST)\n\n #表单数据有效吗?\n if form.is_valid():\n #调用save函数,会生成一个Category对象,并且将表单数据存到对象里,commit=True会把新分类对象存入数据库\n cat=form.save(commit=True)#通过表单创建一个对象\n #调用index()函数,必须要有请求对象作为参数,把用户带到首页\n print(cat)\n return index(request)\n\n else:\n #表单数据有错误,在服务器的终端(控制台)里打印出来\n print(form.errors)\n return render(request, 'rango/add_category.html', {\"form\": form})\n\n return render(request,'rango/add_category.html',{\"form\":form})\n\n@login_required\ndef add_page(request,category_name_slug):\n\n try:\n category=Category.objects.get(slug=category_name_slug)\n except Category.DoesNotExist:\n category=None\n\n form=PageForm()\n if request.method == \"POST\":\n form = PageForm(request.POST)\n\n if form.is_valid():\n ##Pageform调用save函数,会生成一个Page对象,并且将表单数据���到对象里,commit=False不会把表单数据存入数据库\n page=form.save(commit=False)\n page.category=category\n page.views=0\n page.save()\n return show_category(request,category_name_slug)\n else:\n print(form.errors)\n #context_dict = {\"form\": form, \"category\": category}\n #return render(request,\"rango/add_page.html\",context_dict)\n\n context_dict={\"form\":form,\"category\":category}\n return render(request,\"rango/add_page.html\",context_dict)\n\n\n#RangoRegistrationView类继承了RegistrationView,并且重写了get_success_url方法\nclass RangoRegistrationView(RegistrationView):\n def get_success_url(self, user):\n return reverse('index')\n\ndef search(request):\n result_list=[]\n query_str=\"\"\n if request.method == \"POST\":\n query_str=request.POST['query'].strip()\n if query_str:\n result_list=run_query(query_str)\n\n return render(request,\"rango/search.html\",{\"result_list\":result_list,\"query_str\":query_str})\n\n@login_required\ndef like_category(request):\n cat_id = None\n print(\"怎么回事儿\")\n\n if request.method == \"GET\":\n cat_id = request.GET['category_id']\n likes = 0\n if cat_id:\n cat =Category.objects.get(id=int(cat_id))\n if cat:\n likes = cat.likes+1\n cat.likes = likes\n cat.save()\n return HttpResponse(likes)\n\ndef get_category_list(max_results=0,starts_with=\"\"):\n cat_list = []\n if starts_with:\n cat_list = Category.objects.filter(name__istartswith=starts_with)\n\n if max_results>0:\n if len(cat_list)>max_results:\n cat_list=cat_list[:max_results]\n return cat_list\n\n\ndef suggest_category(request):\n cat_list = []\n starts_with = \"\"\n\n if request.method == \"GET\":\n starts_with = request.GET['suggestion']\n cat_list=get_category_list(8,starts_with)\n #render生成一个response对象,这个响应对象是经过数据渲染过的html代码。\n # 然后return返回给向这个函数发送请求的对象,可能是浏览器,可能是ajax请求\n return render(request,'rango/cats.html',{'cats':cat_list})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ZhaoShuaifei123/Tango_with_Django","sub_path":"rango/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28816443170","text":"from turtle import Screen\nimport time\nfrom snake import Snake\nfrom food import Food\nfrom scoreboard import Scoreboard\n# screen setup\nscreen = Screen()\nscreen.setup(width=600, height = 600)\nscreen.bgcolor(\"black\")\nscreen.title(\"Snake Game\")\nscreen.tracer(0)\nscreen.listen()\n\n\n#call snake and food\n\nsnake = Snake()\nfood = Food()\nscoreboard = Scoreboard()\ngame_is_on = True\n## restart game function\ndef restart():\n game_is_on = True\n scoreboard.restart()\n snake.restart()\n#listen for keys\nscreen.onkey(snake.up, \"Up\")\nscreen.onkey(snake.left, \"Left\")\nscreen.onkey(snake.down, \"Down\")\nscreen.onkey(snake.right, \"Right\")\n# screen.onkey(restart, \"space\")\n\n#movement\n\nwhile game_is_on:\n #move each segment together\n screen.update()\n time.sleep(0.1)\n snake.move()\n\n #detect collision with food\n if snake.head.distance(food) < 15:\n food.refresh()\n scoreboard.add_point()\n snake.extend()\n # detect collision with wall\n if snake.head.xcor() >290 or snake.head.xcor() < -290 or snake.head.ycor() > 290 or snake.head.ycor() < -290:\n scoreboard.game_over()\n game_is_on= False\n # scoreboard.start_again()\n \n\n\n\n #detect collision with self\n # for part in snake.segments:\n\n # if snake.head.xcor() == snake.part.xcor() or snake.head.ycor() == snake.part.ycor():\n # scoreboard.game_over()\n # game_is_on = False\n for segment in snake.segments[1:]:\n if snake.head.distance(segment) < 7:\n game_is_on= False\n scoreboard.game_over()\n\n\n\n\n\n\n\n\n\nscreen.exitonclick()","repo_name":"Xconina/snake_game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31683364613","text":"# import librarier\nimport os\nimport sys\nimport re\nimport pickle\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score, recall_score, precision_score, classification_report, accuracy_score\nfrom sklearn.model_selection import GridSearchCV\nfrom nltk.corpus import stopwords\n\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\n# download nltk-content\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('stopwords')\n\ndef load_data(database_filepath):\n \"\"\"\n Loades the data stored int the sqlite database into a DataFrame.\n \n Input:\n database_filepath: str - Path to the sqlite database from which the data should be loaded\n \n Output:\n df: pd.DataFrame - DataFrame with the loaded data\n \"\"\"\n \n engine = create_engine(f\"sqlite:///{database_filepath}\") \n df = pd.read_sql_table(f\"Data_Table\", engine)\n \n # dropping the 'child_alone'-category, since it has just zeros and the 'original'-column since we're only interested in the english text\n df.drop(['child_alone', 'original'], axis=1, inplace=True)\n \n # It seems that if a category has a nan-value, \n # all of the other columns are nans as well, hence they can be dropped\n df.dropna(inplace=True)\n \n # Split into X and y\n X = df['message']\n y = df.iloc[:,3:]\n category_names = y.columns\n \n \n return X, y, category_names\n \ndef tokenize(text):\n \"\"\"\n Processes the text by replacing any URLs, tokenizing, lemmatizing and removing stop words.\n \n Input:\n text: str - Raw input text\n \n Output:\n clean_tokens: list - List of tokens containing the processed text\n \n \"\"\"\n \n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n stop_words = stopwords.words(\"english\")\n \n # replacing urls\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n\n # tokenizing the text\n tokens = word_tokenize(text)\n\n # lemmatizing and removing stop words\n clean_tokens = [WordNetLemmatizer().lemmatize(w) for w in tokens if w not in stop_words]\n \n return clean_tokens\n\n\ndef build_model():\n \"\"\"\n Builds the pipeline and performs a GridSearch over specified parameters to improve the \n model's performance.\n \n Input:\n None\n \n Output:\n model: sklearn - List of tokens containing the processed text\n \n \"\"\"\n \n \n # defining the pipeline\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n # defining the parameter for the grid search\n parameters = {'clf__estimator__max_features': [\"auto\", \"log2\"],\n 'clf__estimator__n_estimators': [10, 50]}\n\n # perform the grid search\n cv = GridSearchCV(pipeline, param_grid=parameters, verbose=2, n_jobs=-1)\n\n #return the model\n return cv\n\ndef evaluate_model(model, X_test, y_test, category_names):\n \"\"\"\n Evaluating and printing a models performance on a given test-set by calculating the models\n accuracy, precision and recall.\n \n Input:\n model: sklearn model - model which should be evaluated\n X_test: np.array - Input data of the test set\n y_test: np.array - Label data of the test set\n category_names: list of str - List of the category names\n \n Output:\n None\n \n \"\"\"\n \n y_pred = model.predict(X_test)\n for i, col in enumerate(category_names):\n print(f\"{col}: \\n\")\n print(classification_report(y_test.values[:,i] , y_pred[:,i]))\n\n\n\ndef save_model(model, model_filepath):\n \"\"\"\n Saving the best model from the grid search to a pickle-file.\n \n Input:\n model: sklearn model - model which should be saved\n model_filepath: str - Filepath of the pickle-file to which the \n model should be saved\n \n Output:\n None\n \"\"\" \n \n pickle.dump(model.best_estimator_, open(model_filepath, 'wb'))\n\n\ndef main():\n print(sys.argv[1])\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\npath = \"/home/workspace/data/DisasterResponse.db\" \n\nif __name__ == '__main__':\n #test-call python train_classifier.py /home/workspace/data/DisasterResponse.db model.pkl \n main()","repo_name":"ChaosMcChief/DataScience_Nanodegree_p2-Disaster_Response","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24178173010","text":"import os\r\nfrom flask import Flask, request, render_template, jsonify, make_response\r\n\r\nfrom app.models import db, Menu\r\nfrom app.webhook import handler\r\n\r\nLIFF_ID = os.environ['LIFF_ID']\r\n\r\napp = Flask(__name__, template_folder='liff/templates', static_folder='liff/static')\r\napp.config.from_object('app.config.Config')\r\ndb.init_app(app)\r\n\r\n@app.route('/')\r\ndef hello():\r\n return 'Hi'\r\n\r\n\r\n@app.route('/webhook', methods=['POST'])\r\ndef webhook_handler():\r\n data = request.get_json()\r\n # events内にWebhookイベントのリストが格納されている\r\n for event_json in data['events']:\r\n handler.handle(event_json)\r\n return '200 OK'\r\n\r\n\r\n@app.route('/liff', methods=['GET'])\r\ndef liff_origin():\r\n return render_template('liff.html', LIFF_ID=LIFF_ID)\r\n\r\n\r\n@app.route('/liff/menu/', methods=['GET'])\r\n@app.route('/liff/new-menu/', methods=['GET'])\r\ndef menu_modifier(num):\r\n # response = make_response(render_template('menu.html', LIFF_ID=LIFF_ID))\r\n # response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n # response.headers[\"Pragma\"] = \"no-cache\"\r\n # response.headers[\"Expires\"] = \"0\"\r\n # response.headers['Cache-Control'] = 'public, max-age=0'\r\n return render_template('menu.html', LIFF_ID=LIFF_ID)\r\n\r\n\r\n@app.route('/liff/menu/', methods=['PUT'])\r\ndef update_menu(menu_id):\r\n category = request.json['category']\r\n description = request.json['description']\r\n cycle = request.json['cycle']\r\n\r\n target_menu = Menu.query.get(menu_id)\r\n\r\n if target_menu is None:\r\n return 'Failed'\r\n target_menu.category = category\r\n target_menu.description = description\r\n target_menu.cycle = cycle\r\n db.session.commit()\r\n return str(menu_id)\r\n\r\n\r\n@app.route('/liff/new-menu/', methods=['POST'])\r\ndef post_new_menu(date_int):\r\n category = request.json['category']\r\n description = request.json['description']\r\n cycle = request.json['cycle']\r\n new_menu = Menu(date_int, category, description, cycle)\r\n db.session.add(new_menu)\r\n db.session.commit()\r\n menu_id = new_menu.menuid\r\n return str(menu_id)\r\n\r\n\r\n@app.route('/liff/menu//ajax', methods=['GET'])\r\ndef fetch_menu_status(menu_id):\r\n menu = Menu.query.get(menu_id)\r\n\r\n if menu is None:\r\n return jsonify({'message': 'メニューが見つかりませんでした'})\r\n\r\n response = {\r\n 'message': 'Success',\r\n 'date': menu.date,\r\n 'category': menu.category,\r\n 'description': menu.description,\r\n 'cycle': menu.cycle\r\n }\r\n return jsonify(response)\r\n\r\n\r\n\r\n@app.route('/liff/id')\r\ndef get_liff_id():\r\n return jsonify({'LIFFID': LIFF_ID})\r\n\r\n\r\n@app.route('/create')\r\ndef create():\r\n db.create_all()\r\n return 'Hi'\r\n","repo_name":"kcabo/TiM","sub_path":"app/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40311173691","text":"# -- coding: utf-8 --\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .forms import *\nfrom django.db import transaction\nimport nlpaug\nimport random\nimport string\nimport nlpaug.augmenter.word as naw\nimport nlpaug.augmenter.char as nac\nimport nltk\nfrom textaugment import EDA, Wordnet\nimport emoji\nfrom .models import Parent, Positive, Negative\nimport decimal\n\npositive_options = [\n \"Random word swap\",\n \"Random word delete\",\n \"Random word insert\",\n \"Synonym Augmentation\",\n \"OCR Augmentation\",\n \"KeyBoard Augmentation\",\n \"Random Char insert\",\n \"Random Char swap\",\n \"Random Char delete\",\n]\nnegative_options = [\n \"Text to emoji\",\n \"Antonym of text\",\n \"Insert sentence\",\n \"Special character insertion\",\n \"Swap in the sentence\",\n \"Sentence insertion\",\n]\n\n@transaction.atomic\ndef my_form_post(request):\n if request.method == 'POST':\n text = request.POST.get('text')\n result = []\n pos_logics = request.POST.getlist('pos-logic')\n neg_logics = request.POST.getlist('neg-logic')\n\n # parent for the input text\n parent, _ = Parent.objects.get_or_create(sentence=text)\n\n if pos_logics:\n t = EDA()\n last_positive = Positive.objects.filter(parent_id=parent.id).last()\n counter = round(last_positive.positive_id, 1) if last_positive else 1.0\n new_records = []\n\n for logic in pos_logics:\n logic_res = apply_pos_logic(logic, text, t)\n counter+=0.1\n new_records.append(Positive(sentence=logic_res, parent_id=parent.id, positive_id=counter))\n result.append([logic, logic_res])\n Positive.objects.bulk_create(new_records)\n\n elif neg_logics:\n t = EDA()\n words = text.split(\" \")\n half_txt = \" \".join(words[:int(len(words) / 2)])\n rem_txt = \" \".join(words[int(len(words) / 2):])\n n = int(len(words) / 2)\n \n last_negative = Negative.objects.filter(parent_id=parent.id).last()\n counter = round(last_negative.negative_id, 1) if last_negative else 1.0\n new_records = []\n for logic in neg_logics:\n logic_res = evaluate_negative_augmentation(text, logic, t, half_txt, rem_txt, n, words)\n if logic_res:\n counter+=0.1\n new_records.append(Negative(sentence=logic_res, parent_id=parent.id, negative_id=counter))\n result.append([logic, logic_res])\n Negative.objects.bulk_create(new_records)\n\n return render(request, 'index.html', {\"input_text\":text, \"result\":result, \"positive_options\": positive_options, \"negative_options\": negative_options})\n return render(request, 'index.html', {\"input_text\":\"\", \"result\":[], \"positive_options\": positive_options, \"negative_options\": negative_options})\n\n\n# helper functions are below\n\ndef evaluate_negative_augmentation(text, neg_logic, t, half_txt, rem_txt, n, words):\n # 0. replace with emojis\n if neg_logic == \"Text to emoji\":\n return text_to_emoji(text)\n # 1. make antonym of whole text\n elif neg_logic == \"Antonym of text\":\n return naw.AntonymAug().augment(text, n=1)\n # 2. insert n words in the half sentence, where n = half of size of sentence\n elif neg_logic == \"Insert sentence\":\n try:\n rand_index = random.randint(0, n)\n return t.random_insertion(sentence=words[rand_index], n=n) + \" \" + rem_txt\n except:\n pass\n # 3. make antonym of whole text and insert a special character at any position\n elif neg_logic == \"Special character insertion\":\n return get_with_special_char(text)\n # 4. swap half of the sentence\n elif neg_logic == \"Swap in the sentence\":\n return t.random_swap(half_txt) + \" \" + rem_txt\n # 5. insert one random word in half text\n elif neg_logic == \"Sentence insertion\":\n return t.random_insertion(half_txt) + \" \" + rem_txt\n\n\ndef get_with_special_char(text):\n \"\"\"\n replace char in text\n \"\"\"\n # get random indexes to be replaced with special characters which will be 35% of sentence but not more than 15 chars\n indexes = random.sample(range(0, len(text)), min(round(len(text) * 35 / 100), 15))\n for index in indexes:\n text = text[:index] + random.choice(string.punctuation) + text[index + 1:]\n\n return text\n\n\ndef text_to_emoji(text):\n \"\"\"\n Replaces words with possible emojis.\n \"\"\"\n text = text.replace(\",\", \"\").replace(\".\", \"\")\n new_sentence = \" \".join([\":\" + s + \":\" for s in text.split(\" \")])\n emojized = emoji.emojize(new_sentence, use_aliases=True).split(\" \")\n\n sent = []\n for each in emojized:\n if each in emoji.UNICODE_EMOJI['en']:\n sent.append(each)\n else:\n sent.append(each.replace(\":\", \"\"))\n return \" \".join(sent)\n\n\ndef apply_pos_logic(logic, text, t):\n if logic == \"Random word swap\":\n return t.random_swap(text)\n elif logic == \"Random word delete\":\n return t.random_deletion(text, p=0.3)\n elif logic == \"Random word insert\":\n return t.random_insertion(text)\n elif logic == \"Synonym Augmentation\":\n return naw.SynonymAug(aug_src='wordnet').augment(text, n=1)\n elif logic == \"OCR Augmentation\":\n return nac.OcrAug().augment(text, n=1)\n elif logic == \"KeyBoard Augmentation\":\n return nac.KeyboardAug().augment(text, n=1)\n elif logic == \"Random Char insert\":\n return nac.RandomCharAug('insert').augment(text, n=1)\n elif logic == \"Random Char swap\":\n return nac.RandomCharAug('swap').augment(text, n=1)\n elif logic == \"Random Char delete\":\n return nac.RandomCharAug('delete').augment(text, n=1)\n\n\n","repo_name":"smartbrainsakshi/augmenter","sub_path":"rl/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7862190588","text":"def xor(x, y):\n ans = \"\"\n for i in range(len(x)):\n if x[i] == \"0\" and y[i] == \"1\" or x[i] == \"1\" and y[i] == \"0\":\n ans += \"1\"\n else:\n ans += \"0\"\n return ans\n\ndef convert2Bin(texto):\n return \" \".join(f\"{ord(i):08b}\" for i in texto)\n\ndef criptografar(chave, chaveBinaria, arquivoBinario):\n\n #garatindo que arquivo possa ser separado em partes iguais\n chaveBinaria = chaveBinaria.replace(\" \", \"\")\n arquivoBinario = arquivoBinario.replace(\" \", \"\")\n if len(chave) == 8 and len(arquivoBinario) % len(chaveBinaria) != 0:\n arquivoBinario += '00000000'\n elif len(chave) == 16 and len(arquivoBinario) % len(chaveBinaria) != 0:\n arquivoBinario += '0000000000000000'\n\n #separando o arquivo em blocos do tamanho da chave\n listaArquivoBinario = []\n for i in range(0, len(arquivoBinario), len(chaveBinaria)):\n listaArquivoBinario.append(arquivoBinario[i : i+len(chaveBinaria)])\n\n #efetuando operação XOR entre chave e arquivo\n resultado = ''\n for i in range(len(listaArquivoBinario)):\n resultado += xor(listaArquivoBinario[i], chaveBinaria)\n return resultado\n\ndef decriptografar(arquivo, chaveBinaria):\n \n #tirando espaços da chave\n chaveBinaria = chaveBinaria.replace(\" \", \"\")\n\n #separando o arquivo em blocos do tamanho da chave\n listaArquivoBinario = []\n for i in range(0, len(arquivo), len(chaveBinaria)):\n listaArquivoBinario.append(arquivo[i : i+len(chaveBinaria)])\n\n #efetuando operação XOR entre chave e arquivo\n aux = ''\n for i in range(len(listaArquivoBinario)):\n aux += xor(listaArquivoBinario[i], chaveBinaria)\n\n #convertendo binário para string\n resultado = '' \n for i in range(0, len(aux), 8):\n resultado += chr(int(aux[i : i+8], 2))\n return resultado","repo_name":"samuel4oliveira/criptografia-decriptografia","sub_path":"funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"15250256686","text":"def normalize_image(img, L):\n _min, _max = img.min(), img.max()\n img = ((img-_min)/(_max-_min) * (L-1)).astype(np.uint8)\n return img\n\ndef sample_patches(img, patch_size, num_copies, upscale):\n high_resolution_img = img\n high_resolution_img = high_resolution_img.astype(float)\n m, n = high_resolution_img.shape\n # generate low resolution counterparts\n low_resolution_img = cv2.resize(\\\n high_resolution_img, (m//upscale, n//upscale),interpolation=cv2.INTER_CUBIC)\n low_resolution_img = cv2.resize(\\\n low_resolution_img, (m, n), interpolation=cv2.INTER_CUBIC)\n low_resolution_img = low_resolution_img.astype(float)\n\n x = np.arange(m-2*patch_size)+patch_size\n y = np.arange(n-2*patch_size)+patch_size\n np.random.shuffle(x)\n np.random.shuffle(y)\n\n X, Y = np.meshgrid(x,y)\n xrow, ycol = X.reshape(-1), Y.reshape(-1)\n if num_copies < len(xrow):\n xrow = xrow[:num_copies]\n ycol = ycol[:num_copies]\n else:\n num_copies = len(xrow)\n\n # initialize output\n x_high = np.zeros((patch_size**2, num_copies))\n x_low = np.zeros((4*patch_size**2, num_copies))\n\n # compute the first and second order gradients\n hf1 = np.array([-1,0,1]).reshape(1,-1)\n vf1 = hf1.T\n hf2 = np.array([1,0,-2,0,1]).reshape(1,-1)\n vf2 = hf2.T\n\n # get low_resolution_img features\n low_resolution_feature1 = signal.convolve2d(low_resolution_img, hf1, 'same')\n low_resolution_feature2 = signal.convolve2d(low_resolution_img, vf1, 'same')\n low_resolution_feature3 = signal.convolve2d(low_resolution_img, hf2, 'same')\n low_resolution_feature4 = signal.convolve2d(low_resolution_img, vf2, 'same')\n\n # collect patches from sample\n for i in np.arange(num_copies):\n row, col = xrow[i], ycol[i]\n Hpatch = high_resolution_img[row:row+patch_size, col:col+patch_size].reshape(-1)\n Lpatch1 = low_resolution_feature1[row:row+patch_size, col:col+patch_size].reshape(-1)\n Lpatch2 = low_resolution_feature2[row:row+patch_size, col:col+patch_size].reshape(-1)\n Lpatch3 = low_resolution_feature3[row:row+patch_size, col:col+patch_size].reshape(-1)\n Lpatch4 = low_resolution_feature4[row:row+patch_size, col:col+patch_size].reshape(-1)\n Lpatch = np.concatenate([Lpatch1,Lpatch2,Lpatch3,Lpatch4],axis=0)\n x_high[:,i] = Hpatch-np.mean(Hpatch)\n x_low[:,i] = Lpatch\n\n return x_high, x_low\n\ndef rnd_smp_patch(img_path, type, patch_size, num_patches, upscale):\n # get all training images name\n img_list = glob.glob(img_path+type) # type = '*.tif'\n # get total number of images being considered\n img_num = len(img_list)\n # initialize number of copies for each image\n # depends on its size\n num_copies_img = np.zeros(img_num)\n\n # read images and determine number of copies for each image\n # this number is proportional to total number of patches\n for i in np.arange(img_num):\n img = tif.imread(img_list[i])\n num_copies_img[i] = np.prod(img.shape)\n num_copies_img = np.floor(num_copies_img*num_patches/np.sum(num_copies_img)).astype(np.int)\n\n # initialize output\n X_high = []\n X_low = []\n\n for i in np.arange(img_num):\n num_copies = num_copies_img[i]\n img = tif.imread(img_list[i])\n img = normalize_image(img, 256)\n x_high, x_low = sample_patches(img, patch_size, num_copies, upscale)\n X_high.append(x_high)\n X_low.append(x_low)\n\n # assemble a numpy ndarray\n X_high = np.concatenate(X_high, axis=1)\n X_low = np.concatenate(X_low, axis=1)\n\n # save data\n save_path = 'Training/rnd_patches'+str(patch_size)+'_'+str(upscale)+'_'\n savetxt(save_path+'X_high.csv', X_high, delimiter=' ')\n savetxt(save_path+'X_low.csv', X_low, delimiter=' ')\n\n return X_high, X_low\n\ndef patch_pruning(X_high, X_low, threshold):\n vars = np.var(X_high, axis=0)\n idx = vars > threshold\n X_high = X_high[:,idx]\n X_low = X_low[:,idx]\n return X_high, X_low\n","repo_name":"tanpeng1995/289FinalProject","sub_path":"python_code/rnd_smp_patch.py","file_name":"rnd_smp_patch.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"23586208917","text":"class NegativeValueError(Exception):\r\n def __init__(self, value):\r\n self.value = value\r\n\r\ntry:\r\n num = int(input(\"Enter an integer: \"))\r\n \r\n if num < 0:\r\n raise NegativeValueError(num) \r\n \r\n print(f\"The entered integer is: {num}\")\r\nexcept NegativeValueError as e:\r\n print(f\"Error: Negative value ({e.value}) is not allowed.\")\r\nexcept ValueError:\r\n print(\"Error: Invalid input. Please enter a valid integer.\")\r\n","repo_name":"SyntaxErrorBad/GeekHub","sub_path":"HT_04/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39672024393","text":"from reinforcement_learning.utils.plot import plot_rewards\nfrom reinforcement_learning.algorithm.dqn.general import test, train, env_agent_config, get_args\n\ndef main():\n cfg = get_args()\n # 训练\n env, agent = env_agent_config(cfg)\n res_dic = train(cfg, env, agent)\n\n plot_rewards(res_dic['rewards'], cfg)\n # 测试\n res_dic = test(cfg, env, agent)\n plot_rewards(res_dic['rewards'], cfg)\n\n\nif __name__ == '__main__':\n main()\n # print(envs.registry.all())","repo_name":"Gxs16/Learn-Optimization","sub_path":"reinforcement_learning/algorithm/dqn/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"5146358511","text":"# WORKS BETTER WITH THE ENGLISH ALPHABET! THE GREEK ALPHABET HAS VERY FEW EXAMPLES. #\n\nimport os\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nfrom src import *\nfrom src.utilities import rmse, mae\nfrom src.utilities.get_omniglot_dataset import get_omniglot_dataset\nfrom src.utilities.plot_utils import plot_images\nfrom src.utilities.vae_in_tensorflow import vae\n\n\ndef omniglot(latent_dim=64, epochs=100, batch_size='250', learning_rate=0.01, language='English'):\n if language.lower() == 'greek':\n output_images_path = output_img_base_path + 'vaes_in_tensorflow/omniglot_greek'\n logdir = 'tensorflow_logs/omniglot_greek_vae'\n save_path = 'save/omniglot_greek_vae'\n alphabet = 20\n else:\n output_images_path = output_img_base_path + 'vaes_in_tensorflow/omniglot_english'\n logdir = 'tensorflow_logs/omniglot_english_vae'\n save_path = 'save/omniglot_english_vae'\n alphabet = 31\n\n if not os.path.exists(output_images_path):\n os.makedirs(output_images_path)\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n # LOAD OMNIGLOT DATASET #\n X_train, y_train = get_omniglot_dataset(\n omniglot_dataset_path + '/chardata.mat',\n train_or_test='train',\n alphabet=alphabet,\n binarize=True\n )\n X_test, y_test = get_omniglot_dataset(\n omniglot_dataset_path + '/chardata.mat',\n train_or_test='test',\n alphabet=alphabet,\n binarize=True\n )\n\n X_merged = np.concatenate((X_train, X_test), axis=0)\n y_merged = np.concatenate((y_train, y_test), axis=0)\n\n #####\n\n N = X_merged.shape[0]\n input_dim = 784 # D\n # M1: number of neurons in the encoder\n # M2: number of neurons in the decoder\n hidden_encoder_dim = 400 # M1\n hidden_decoder_dim = hidden_encoder_dim # M2\n # latent_dim = Z_dim\n if batch_size == 'N':\n batch_size = N\n else:\n batch_size = int(batch_size)\n\n #####\n\n fig = plot_images(X_merged, y_merged, categories=list(range(1, 11)), title='Original Data')\n fig.savefig(f'{output_images_path}/original_data_characters_1-10.png', bbox_inches='tight')\n plt.close()\n fig = plot_images(X_merged, y_merged, categories=list(range(11, 21)), title='Original Data')\n fig.savefig(f'{output_images_path}/original_data_characters_11-20.png', bbox_inches='tight')\n plt.close()\n if language.lower() == 'greek':\n fig = plot_images(X_merged, y_merged, categories=list(range(21, 25)), title='Original Data')\n fig.savefig(f'{output_images_path}/original_data_characters_21-24.png', bbox_inches='tight')\n else:\n fig = plot_images(X_merged, y_merged, categories=list(range(21, 27)), title='Original Data')\n fig.savefig(f'{output_images_path}/original_data_characters_21-26.png', bbox_inches='tight')\n plt.close()\n\n #####\n\n x, loss_summ, apply_updates, summary_op, saver, elbo, x_recon_samples = vae(\n batch_size,\n input_dim,\n hidden_encoder_dim,\n hidden_decoder_dim,\n latent_dim,\n learning_rate=learning_rate\n )\n\n cur_samples = None\n batch_labels = None\n cur_elbo = None\n X_recon = np.zeros((N, input_dim))\n\n start_time = time.time()\n with tf.compat.v1.Session() as sess:\n summary_writer = tf.compat.v1.summary.FileWriter(logdir, graph=sess.graph)\n if os.path.isfile(save_path + '/model.ckpt'):\n print('Restoring saved parameters')\n saver.restore(sess, save_path + '/model.ckpt')\n else:\n print('Initializing parameters')\n sess.run(tf.compat.v1.global_variables_initializer())\n\n print()\n\n for epoch in range(1, epochs + 1):\n iterations = int(N / batch_size)\n for i in range(1, iterations + 1):\n start_index = (i - 1) * batch_size\n end_index = i * batch_size\n\n batch_data = X_merged[start_index:end_index, :]\n batch_labels = y_merged[start_index:end_index]\n\n feed_dict = {x: batch_data}\n loss_str, _, summary_str, cur_elbo, cur_samples = sess.run(\n [loss_summ, apply_updates, summary_op, elbo, x_recon_samples],\n feed_dict=feed_dict\n )\n\n X_recon[start_index:end_index] = cur_samples\n\n summary_writer.add_summary(loss_str, epoch)\n summary_writer.add_summary(summary_str, epoch)\n\n print(f'Epoch {epoch} | Loss (ELBO): {cur_elbo}')\n\n if epoch % 10 == 0 or epoch == 1:\n fig = plot_images(\n cur_samples,\n batch_labels,\n categories=list(range(1, 11)), title=f'Epoch {str(epoch).zfill(3)}'\n )\n fig.savefig(\n f'{output_images_path}/epoch_{str(epoch).zfill(3)}_characters_1-10.png',\n bbox_inches='tight'\n )\n plt.close()\n fig = plot_images(\n cur_samples,\n batch_labels,\n categories=list(range(11, 21)), title=f'Epoch {str(epoch).zfill(3)}'\n )\n fig.savefig(\n f'{output_images_path}/epoch_{str(epoch).zfill(3)}_characters_11-20.png',\n bbox_inches='tight'\n )\n plt.close()\n if language.lower() == 'greek':\n fig = plot_images(\n cur_samples,\n batch_labels,\n categories=list(range(21, 25)), title=f'Epoch {str(epoch).zfill(3)}'\n )\n fig.savefig(\n f'{output_images_path}/epoch_{str(epoch).zfill(3)}_characters_21-24.png',\n bbox_inches='tight'\n )\n else:\n fig = plot_images(\n cur_samples,\n batch_labels,\n categories=list(range(21, 27)),\n title=f'Epoch {str(epoch).zfill(3)}'\n )\n fig.savefig(\n f'{output_images_path}/epoch_{str(epoch).zfill(3)}_characters_21-26.png',\n bbox_inches='tight'\n )\n plt.close()\n\n if epoch % 2 == 0:\n saver.save(sess, save_path + '/model.ckpt')\n elapsed_time = time.time() - start_time\n\n print(f'training time: {elapsed_time} secs')\n print()\n\n error1 = rmse(X_merged, X_recon)\n print(f'root mean squared error: {error1}')\n\n error2 = mae(X_merged, X_recon)\n print(f'mean absolute error: {error2}')\n\n # TENSORBOARD\n # Open a console and run 'tensorboard --logdir=../tensorflow_logs/omniglot_greek_vae' OR\n # 'tensorboard --logdir=../tensorflow_logs/omniglot_english_vae'.\n # Then open your browser and navigate to -> http://localhost:6006\n\n\nif __name__ == '__main__':\n omniglot()\n","repo_name":"chriskormaris/vae-master-thesis","sub_path":"src/vaes_in_tensorflow/omniglot.py","file_name":"omniglot.py","file_ext":"py","file_size_in_byte":7089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"70741295184","text":"from decimal import Decimal, getcontext\nfrom copy import deepcopy\n\nimport math\n\nfrom vector import Vector, MyDecimal\nfrom plane import Plane\n\ngetcontext().prec = 30\n\nclass Parametrization(object):\n\n BASEPT_AND_DIR_VECTORS_MUST_BE_IN_SAME_DIM = (\n 'The basepoint and direction vectors should all live in the same '\n 'dimension')\n\n def __init__(self, basepoint, direction_vectors):\n\n self.basepoint = basepoint\n self.direction_vectors = direction_vectors\n self.dimension = self.basepoint.dimension\n\n try:\n for v in direction_vectors:\n assert v.dimension == self.dimension\n\n except AssertionError:\n raise Exception(self.BASEPT_AND_DIR_VECTORS_MUST_BE_IN_SAME_DIM)\n\n def __str__(self):\n\n output = ''\n for coord in range(self.dimension):\n output += 'x_{} = {} '.format(coord + 1,\n round(self.basepoint[coord], 3))\n for free_var, vector in enumerate(self.direction_vectors):\n output += '+ {} t_{}'.format(round(vector[coord], 3),\n free_var + 1)\n output += '\\n'\n return output\n\n\nclass LinearSystem(object):\n\n planes = None # type: List[Plane]\n dimension = None # type: int\n\n ALL_PLANES_MUST_BE_IN_SAME_DIM_MSG = 'All planes in the system should live in the same dimension'\n NO_SOLUTIONS_MSG = 'No solutions'\n INF_SOLUTIONS_MSG = 'Infinitely many solutions'\n\n\n def __init__(self, planes):\n '''\n\n :param planes:\n :type planes: List[Plane]\n '''\n try:\n d = planes[0].dimension\n for p in planes:\n assert p.dimension == d\n\n self.planes = planes\n self.dimension = d\n\n except AssertionError:\n raise Exception(self.ALL_PLANES_MUST_BE_IN_SAME_DIM_MSG)\n\n\n def swap_rows(self, row1, row2):\n tmp = self[row1]\n self[row1] = self[row2]\n self[row2] = tmp\n\n def multiply_coefficient_and_row(self, coefficient, row):\n self[row] = self[row] * coefficient\n\n def add_multiple_times_row_to_row(self, coefficient, row_to_add, row_to_be_added_to):\n self[row_to_be_added_to] = \\\n self[row_to_add] * Decimal(coefficient) + self[row_to_be_added_to]\n\n def indices_of_first_nonzero_terms_in_each_row(self):\n num_equations = len(self)\n num_variables = self.dimension\n\n indices = [-1] * num_equations\n\n for i,p in enumerate(self.planes):\n try:\n indices[i] = p.get_nonzero_index()\n except Exception as e:\n if str(e) == Plane.NO_NONZERO_ELTS_FOUND_MSG:\n continue\n else:\n raise e\n\n return indices\n\n\n def __len__(self):\n return len(self.planes)\n\n\n def __getitem__(self, i):\n '''\n\n :param i:\n :type i: int\n :return:\n :rtype: Plane\n '''\n return self.planes[i]\n\n\n def __setitem__(self, i, x):\n '''\n\n :param i:\n :type i: int\n :param x:\n :type x: Plane\n :return:\n :rtype: None\n '''\n try:\n assert x.dimension == self.dimension\n self.planes[i] = x\n\n except AssertionError:\n raise Exception(self.ALL_PLANES_MUST_BE_IN_SAME_DIM_MSG)\n\n\n def __str__(self):\n ret = 'Linear System:\\n'\n temp = ['Equation {}: {}'.format(i+1,p) for i,p in enumerate(self.planes)]\n ret += '\\n'.join(temp)\n return ret\n\n def find_plane(self, func, start = 0):\n for i in range(start, len(self.planes)):\n if func(self.planes[i]):\n return i\n return None\n\n # x y z\n # 1 1 1 N\n # -1 1 -1 Y\n # 1 -1 -1 N\n # -1 -1 1 Y\n\n def find_and_swap(self, variable_index, current_index):\n '''\n\n :param system:\n :type system: LinearSystem\n :param variable_index:\n :type variable_index: int\n :param current_index:\n :type current_index: int\n :param nonzero_index:\n :type nonzero_index: int\n :return:\n :rtype: None\n '''\n for i in range(current_index, len(self.planes)):\n first_zero_index = self[i].get_nonzero_index()\n if first_zero_index >=0 and first_zero_index <= variable_index:\n self.swap_rows(current_index, i)\n return True\n\n return False\n\n def clear_variables(self, plane_index, variable_index, nonzero_index):\n for i in range(plane_index):\n ith_nonzero_index = self[i].get_nonzero_index()\n if (ith_nonzero_index == nonzero_index):\n row_to_add = i\n row_to_add_to = plane_index\n coefficient = (self[row_to_add_to][nonzero_index] / self[row_to_add][ith_nonzero_index]).copy_abs()\n if self[row_to_add_to][nonzero_index] * self[row_to_add][ith_nonzero_index] > 0:\n coefficient = coefficient * -1\n self.add_multiple_times_row_to_row(coefficient, row_to_add=row_to_add, row_to_be_added_to=row_to_add_to)\n\n\n @property\n def compute_triangular_form(self):\n system = deepcopy(self) # type: LinearSystem\n plane_index = 0\n variable_index = 0\n while plane_index < len(system.planes):\n nonzero_index = system[plane_index].get_nonzero_index()\n if nonzero_index < 0:\n if not system.find_and_swap(variable_index, plane_index):\n variable_index = variable_index + 1\n plane_index = plane_index + 1\n elif nonzero_index > variable_index:\n if not system.find_and_swap(variable_index, plane_index):\n variable_index = variable_index + 1\n plane_index = plane_index + 1\n elif nonzero_index == variable_index:\n variable_index = variable_index + 1\n plane_index = plane_index + 1\n else:\n system.clear_variables(plane_index, variable_index, nonzero_index)\n\n return system\n\n # @property\n # def compute_triangular_form2(self):\n # system = deepcopy(self)\n #\n # current_index = 0\n # while current_index < len(system.planes):\n # current_zero_index = system[current_index].get_nonzero_index()\n # if current_zero_index > current_index:\n # # find the best fit\n # maxindex = current_index\n # min = current_zero_index\n # for j in range(current_index+1, len(system.planes)):\n # if (system[j].get_nonzero_index() < min):\n # min = system[j].get_nonzero_index()\n # maxindex = j\n #\n # system.swap_rows(current_index, maxindex)\n # current_index = current_index + 1\n # else:\n # if not current_zero_index == current_index:\n # row_to_add_index = None\n # for j in range(0, current_index):\n # if system[j].get_nonzero_index() == current_zero_index:\n # row_to_add_index = j\n # break\n #\n # if row_to_add_index is not None:\n # current_factor = system[current_index][current_zero_index]\n # target_factor = system[row_to_add_index][current_zero_index]\n # coefficient = (current_factor / target_factor).copy_abs()\n # if target_factor * current_factor > 0:\n # coefficient = coefficient * -1\n #\n # system.add_multiple_times_row_to_row(coefficient, row_to_add_index, current_index)\n # else:\n # current_index = current_index + 1\n # else:\n # current_index = current_index + 1\n #\n # return system\n\n def compute_rref(self):\n '''\n reduce pivot variable coefficient to 1\n ensure pivot variable is alone in column\n :return: LinearSystem\n :rtype: LinearSystem\n '''\n tf = self.compute_triangular_form\n\n for i, x in enumerate(tf.indices_of_first_nonzero_terms_in_each_row()):\n if x >= 0:\n tf.reduce_pivot_variable_to_one(i, x)\n tf.clear_rows_above(i, x)\n\n return tf\n\n def reduce_pivot_variable_to_one(self, i, x):\n self.multiply_coefficient_and_row(Decimal('1') / self[i][x], i)\n\n def clear_rows_above(self, i, x):\n for j in range(0, i):\n if self[j][x] != 0:\n coefficient = (self[j][x] / self[i][x]).copy_abs()\n if self[j][x] * coefficient > 0:\n coefficient = coefficient * -1\n self.add_multiple_times_row_to_row(coefficient=coefficient, row_to_add=i, row_to_be_added_to=j)\n\n def has_unique_solution(self):\n for x in self.planes:\n if x.get_nonzero_index() < 0 and not MyDecimal(x.constant_term).is_near_zero():\n return self.NO_SOLUTIONS_MSG\n\n if not len([x for x in self.indices_of_first_nonzero_terms_in_each_row() if x >= 0]) == self.dimension:\n return self.INF_SOLUTIONS_MSG\n\n return True\n\n def to_parametrization(self):\n plane_index = 0\n coefficient_index = 0\n basepoint = ['0']*self.dimension\n pivots = self.indices_of_first_nonzero_terms_in_each_row()\n number_of_direction_vectors = self.dimension - len(['0' for x in pivots if x >= 0])\n direction_vectors = [['0' for x in range(self.dimension)] for x in range(number_of_direction_vectors)]\n\n for i in range(0, len(self.planes)):\n pivot = self[i].get_nonzero_index()\n if pivot < 0:\n break\n basepoint[pivot] = self[i].constant_term\n\n for i in range(0, len(self.planes)):\n pivot = self[i].get_nonzero_index()\n if pivot < 0:\n break\n else:\n index = 0\n for j in range(pivot+1, self.dimension):\n coefficient = self[i][j]\n if not MyDecimal(coefficient).is_near_zero():\n direction_vectors[index][pivot] = coefficient * Decimal('-1')\n if j not in pivots:\n index = index + 1\n\n index = 0\n for i in range(self.dimension):\n if i not in pivots:\n direction_vectors[index][i] = '1'\n index = index + 1\n\n return Parametrization(Vector(basepoint), map(lambda x: Vector(x), direction_vectors))\n\n# def dot_product(vector_one, vector_two):\n# return sum([x*y for x,y in zip(vector_one, vector_two)])\n\n# def get_row(matrix, row):\n# return matrix[row]\n\n\n\n\n\n\n\n\n\n","repo_name":"cromgit/data-analyst","sub_path":"udacity-linear-algebra/linsys.py","file_name":"linsys.py","file_ext":"py","file_size_in_byte":11013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18022909772","text":"from lcsscaseapi.types import Judge, JudgeRuling\nimport pytest\n\n\ndef test_jr_constructor():\n # test that missing fields set to None\n # test that judge_id and case_id fields are mandatory\n\n jr = JudgeRuling(case_id=\"X1111\", judge_id=35)\n\n assert jr.vote == None\n assert jr.author == None\n assert jr.id == None\n\n jr = JudgeRuling(case_id=\"X1111\", judge_id=35, vote = JudgeRuling.DISSENTING, author=False, id = 10)\n\n assert jr.case == \"X1111\"\n assert jr.judge == 35\n assert jr.vote == JudgeRuling.DISSENTING\n assert jr.author == False\n assert jr.id == 10\n\n with pytest.raises(Exception):\n jr = JudgeRuling()\n with pytest.raises(Exception):\n jr = JudgeRuling(case_id=\"X1111\")\n with pytest.raises(Exception):\n jr = JudgeRuling(judge_id=22)\n\ndef test_jr_constructor_invalid_vote():\n # votes can only be concurring or dissenting\n with pytest.raises(Exception, match=\"vote must be Concurring, Dissenting or None\"):\n JudgeRuling(case_id=\"W1212\", judge_id=22, vote=\"Hello\")\n\ndef test_jr_vote_setter():\n # again, votes can only be set to concurring or dissenting\n jr = JudgeRuling(case_id=\"W1212\", judge_id=22, vote = JudgeRuling.CONCURRING)\n jr.vote = None\n assert jr.vote == None\n \n jr.vote = JudgeRuling.DISSENTING\n assert jr.vote == JudgeRuling.DISSENTING\n \n with pytest.raises(Exception, match=\"vote must be Concurring, Dissenting or None\"):\n jr.vote = \"Agree\"\n\ndef test_jr_eq():\n jr1 = JudgeRuling(case_id=\"W1212\", judge_id=22, vote = JudgeRuling.PARTIAL)\n jr2 = JudgeRuling(case_id=\"W1212\", judge_id=22, vote = JudgeRuling.PARTIAL, author = True)\n\n assert jr1 != jr2\n jr1.author = True\n assert jr1 == jr2\n\ndef test_jr_hash():\n jr1 = JudgeRuling(case_id=\"W1212\", judge_id=22, vote = JudgeRuling.CONCURRING)\n jr2 = JudgeRuling(case_id=\"W1212\", judge_id=22, vote = JudgeRuling.CONCURRING, author = True)\n\n assert jr1.__hash__() != jr2.__hash__()\n\n s = set()\n s.add(jr2)\n assert jr1 not in s\n\n jr1.author = True\n assert jr1.__hash__() == jr2.__hash__()\n assert jr1 in s\n\ndef test_jr_str():\n jr = JudgeRuling(case_id=\"V1234\", judge_id=8, vote = JudgeRuling.CONCURRING, author = False)\n assert str(jr) == r'{\"author\": false, \"case\": \"V1234\", \"id\": null, \"judge\": 8, \"vote\": \"Concurring\"}'\n\ndef test_jr_repr():\n jr = JudgeRuling(case_id=\"V1234\", judge_id=22, vote = None, author = True)\n assert jr.__repr__() == \"JudgeRuling Object: \" + str(jr)\n\ndef test_jr_to_json_dict():\n jr = JudgeRuling(case_id=\"X3462\", judge_id=8, id=12, author = False)\n json_dict = {\n \"case\": \"X3462\",\n \"judge\": 8,\n \"id\": 12,\n \"author\": False,\n \"vote\": None\n }\n assert jr.to_json_dict() == json_dict\n\ndef test_jr_from_json_dict():\n json_dict = {\n \"case\": \"T5673\",\n \"judge\": 103,\n \"id\": None,\n \"author\": True,\n \"vote\": JudgeRuling.CONCURRING\n }\n jr = JudgeRuling(case_id = \"T5673\", judge_id = 103, author = True, vote = JudgeRuling.CONCURRING)\n\n assert JudgeRuling.from_json_dict(json_dict) == jr\n\ndef test_jr_from_json_dict_missing_fields():\n # check that trying to create a jr with no `case` or `judge` field fails\n # missing any other field should not pose an issue\n\n json_dict = {\n \"case\": \"T5673\",\n \"judge\": 103,\n \"id\": None,\n }\n jr = JudgeRuling(case_id = \"T5673\", judge_id = 103)\n assert JudgeRuling.from_json_dict(json_dict) == jr\n\n json_dict = {\n \"case\": \"T5673\",\n \"id\": 10,\n }\n with pytest.raises(Exception, match = \"Cannot have JudgeRuling without 'case' field or without 'judge' field\"):\n JudgeRuling.from_json_dict(json_dict)\n\n json_dict = {\n \"judge\": 11,\n \"vote\": JudgeRuling.DISSENTING,\n \"id\": 10\n }\n with pytest.raises(Exception, match = \"Cannot have JudgeRuling without 'case' field or without 'judge' field\"):\n JudgeRuling.from_json_dict(json_dict) \n\ndef test_jr_from_json_dict_incorrect_vote():\n json_dict = {\n \"case\": \"T5673\",\n \"judge\": 103,\n \"id\": 5,\n \"vote\": \"hello\"\n }\n with pytest.raises(Exception, match = \"vote must be Concurring, Dissenting or None\"):\n JudgeRuling.from_json_dict(json_dict) \n\ndef test_jr_from_json_dict_extra_fields():\n json_dict = {\n \"case\": \"T5673\",\n \"judge\": 103,\n \"id\": None,\n \"irrelevant\": True\n }\n jr = JudgeRuling(case_id = \"T5673\", judge_id = 103)\n assert JudgeRuling.from_json_dict(json_dict) == jr","repo_name":"SKAshwin/caseapi-wrapper","sub_path":"tests/test_types_judgeruling.py","file_name":"test_types_judgeruling.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"69962611022","text":"import unittest.mock as mock\nimport sys\nimport os\n\nsys.path.append(os.path.realpath(os.path.dirname(__file__) + \"/..\"))\nimport src.deploy as deploy # noqa: E402\n\n\ndef test_check_status():\n payload = {\"deployment_status\": {\"state\": \"pending\"}}\n assert deploy.check_status(payload)\n\n\ndef test_restart_webservice():\n m = mock.MagicMock()\n with mock.patch(\"subprocess.Popen\", m):\n d = deploy.restart_webservice()\n assert d is None\n m.assert_called_once_with([\"webservice\", \"restart\"])\n","repo_name":"AntiCompositeNumber/anticompositetools","sub_path":"tests/test_deploy.py","file_name":"test_deploy.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"72246802064","text":"# Задача 8: Требуется определить, можно ли от шоколадки размером n × m долек отломить k долек, если разрешается сделать один разлом по прямой между дольками (то есть разломить шоколадку на два прямоугольника).\n# *Пример:*\n\n# 3 2 4 -> yes\n# 3 2 1 -> no\n\nnum_horizontal = int(input(\"Введите число долек по горизонтали: \"))\nnum_vertical = int(input(\"Введите число долек по горизонтали: \"))\nnum_break_off = int(input(\"Введите число долек, которые нужно отломить: \"))\n\nif num_break_off == num_vertical or num_break_off == num_horizontal or num_break_off % num_horizontal == 0 or num_break_off % num_vertical == 0:\n print(f\"{num_horizontal}; {num_vertical}; {num_break_off} --> yes\")\nelse:\n print(f\"{num_horizontal}; {num_vertical}; {num_break_off} --> no\")\n","repo_name":"CurtisEllese/python_course","sub_path":"task08.py","file_name":"task08.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12537507358","text":"from os import path\nfrom verify_cert import load_certificate,verify_certificate\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.backends import default_backend\nfrom base64 import b64decode\nfrom verify_sig import verify_signature\nfrom RSA.rsa_key_pair import load_private_key\nfrom cipher import create_cipher\nfrom digital_sig import get_OAEP_padding\n\ndef search_certificate(userid:str):\n cert_name = userid.lower()+'_cert.pem'\n dir = path.join(path.abspath('.'),'keystore')\n filepath = path.join(dir,cert_name)\n print(f'Searching for file in filepath {filepath}')\n if path.isfile(filepath):\n return filepath\n else:\n return None \n\ndef get_user_id_from_message(file:list[str]):\n while True:\n userid=''\n for line in file:\n if '-----BEGIN ID-----' in line:\n pass\n elif '-----END ID-----' in line:\n return userid.strip('\\n')\n else:\n userid = userid + line\n break \n\n\n \ndef get_EKEY(file:list[str]):\n i = iter(file)\n ekey = '' \n for m in i:\n if '-----BEGIN EKEY-----' in m:\n ekey = i.__next__()\n for n in i:\n if '-----END EKEY-----' in n:\n return ekey\n else:\n ekey = ekey + n \n\n\ndef get_message(file:list[str]):\n i = iter(file) \n for m in i:\n if '-----BEGIN MESSAGE-----' in m:\n message = i.__next__()\n for n in i:\n if '-----END MESSAGE-----' in n:\n return message\n else:\n message = message + n\n\ndef get_signature(file:list[str]):\n i = iter(file) \n sig = ''\n for m in i:\n if '-----BEGIN SIGNATURE-----' in m:\n #sig = sig + m\n sig = i.__next__()\n for n in i:\n if '-----END SIGNATURE-----' in n:\n #sig = sig + n\n return sig\n else:\n sig = sig + n \n\ndef get_eiv(file:list[str]):\n i = iter(file) \n for m in i:\n if '-----BEGIN EIV-----' in m:\n #eiv = eiv + m\n eiv = i.__next__()\n for n in i:\n if '-----END EIV-----' in n:\n #eiv = eiv + n\n return eiv\n else:\n eiv = eiv + n \n \ndef grab_up_to_signature(file):\n data = ''\n with open(file,'r') as r:\n while True:\n line = r.readline()\n if '-----BEGIN SIGNATURE-----' in line:\n break\n else:\n data = data + line \n return data\n \n \n\nif __name__ == '__main__':\n filename = input('Enter filename to load:')\n #filename = 'mess_to_richard_from_nigel.txt'\n if path.isfile(filename):\n with open(filename,'r') as r:\n file = r.readlines()\n print('Getting user id.')\n userid = get_user_id_from_message(file)\n if userid == None:\n print(f'Message was not properly formatted. Cannot get userid.')\n exit()\n print(f'Searching for {userid} certificate')\n cert_path = search_certificate(userid)\n #print(cert_path)\n print('Loading certificate.')\n certificate = load_certificate(cert_path)\n verify_certificate(certificate)\n print('Getting public key provided in certificate')\n ku = certificate.public_key()\n print('Getting encrypted message')\n message = get_message(file)\n #print(f'Message = #{message}#')\n print('Getting encrypted key')\n ekey = get_EKEY(file)\n #print(f'EKEY = #{ekey}#')\n print('Getting encrypted iv')\n eiv = get_eiv(file)\n #print(f'EIV = #{eiv}#')\n d = grab_up_to_signature(filename)\n #print(f\"D = \\n{d}\")\n print('Creating digest.')\n myhash = hashes.SHA256()\n hasher = hashes.Hash(myhash,default_backend())\n hasher.update(d.encode())\n digest = hasher.finalize()\n #print(f'Digest = \\n{digest}')\n print('Getting signature.')\n sig = get_signature(file)\n #print(f'Before decode, sig = \\n{sig}')\n sig = b64decode(sig)\n print('Verifying signature.')\n if verify_signature(digest=digest,ku=ku,sig=sig):\n pkey = load_private_key()\n decrypted_ekey = pkey.decrypt(b64decode(ekey),padding=get_OAEP_padding())\n decrypted_iv = pkey.decrypt(b64decode(eiv),padding=get_OAEP_padding())\n AES_CIPHER = create_cipher(decrypted_ekey,decrypted_iv,default_backend())\n decryptor = AES_CIPHER.decryptor()\n decrypted_message = decryptor.update(b64decode(message)) + decryptor.finalize()\n print(f'------Decrypted message------\\n{decrypted_message.decode()}')\n else:\n print('Signature is not valid and means the data has been modified...')\n\n \n\n\n\n\n","repo_name":"richimorales13/secure_message_exchange_system","sub_path":"receiving_app.py","file_name":"receiving_app.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2354679142","text":"total = 5\nfearData = \"2 3 1 2 2\"\n\nresult = 0 # 총 그룹의 수\ncount = 0 # 현재 그룹에 포함된 모함가의 수\ndata = list(map(int,fearData.split()))\n\n\nfor i in data: # 공포도를 낮은 것 부터 하나씩 확인하며\n count += 1 # 현재 그룹에 해당 모험가를 포함시키기\n if count >= i: # 현재 그룹에 포함된 모험가의 수가 현재 공포도 이상이라면 그룹 결성\n result += 1 # 총 그룹 수 증가\n count = 0 # 현재 그룹에 포함된 모험가 초기화\n\nprint(result)\n","repo_name":"mmvv11/PythonAlgorithmStudy","sub_path":"그리디/[모범답안] 모험가 길드.py","file_name":"[모범답안] 모험가 길드.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43850586855","text":"from flask import render_template, request, \\\n jsonify, redirect, flash, \\\n send_from_directory, url_for, send_file, session, make_response, g, Response\nfrom suppliers import supp\nfrom suppliers.models import Supplier, Prefin, Documents, Tn, Supp_payment \nfrom app_main.models import *\nfrom app_main import db, app\nfrom .forms import *\nfrom .models import *\n\nfrom customers import cust\n\nfrom sqlalchemy import exc\nfrom sqlalchemy import desc, or_, and_\n\nimport datetime\nfrom datetime import timedelta\nimport json\n\n@cust.route('/index')\ndef index():\n return \"Privet customers\"\n\n@cust.route('/customers', methods=['POST', 'GET'])\ndef customers_payments():\n customers = Customer.query.all()\n invoices = Invoicecust.query.all()\n formName = CustomerForm()\n formName.name.choices = [(g.id, g.name) for g in customers]\n today = datetime.datetime.today()\n\n\n\n \n \n if formName.is_submitted():\n customer = Customer.query.get(formName.name.data)\n print(customer.id)\n\n all_payments = Invoice_payment_c.query.filter(Invoice_payment_c.customer_id==customer.id).\\\n order_by(Invoice_payment_c.date.desc()).all()\n \n invoices_failed = Invoicecust.query.filter(and_(Invoicecust.customer_id==customer.id, Invoicecust.invoice_deadline_payment', methods=['POST', 'GET'])\ndef remove_payments_c(id):\n id = request.args.get('id')\n payment = Invoice_payment_c.query.get(int(id))\n if payment:\n db.session.delete(payment)\n db.session.commit()\n return jsonify({'success_remove':'Запись удалена'})\n else:\n return jsonify({'faile':'failed'})\n\n\n\n ","repo_name":"dmitriyignatiev/intranet_main","sub_path":"customers/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74246377742","text":"\"\"\"\nThis file demonstrates writing tests using the unittest module. These will pass\nwhen you run \"manage.py test\".\n\nReplace this with more appropriate tests for your application.\n\"\"\"\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User, Permission\nfrom django.core.exceptions import PermissionDenied\n\nfrom models import Example\n\n\nclass CodeExamples(TestCase):\n def test_readme_example(self):\n 'test the code example in README.md'\n \n permission = Permission.objects.get(\n codename='example_can_get_id')\n \n user = User.objects.create(\n username='user')\n user_without_permissions = User.objects.create(\n username='without_permissions')\n user.user_permissions.add(permission)\n \n assert user.has_perm('app.example_can_get_id')\n \n '''\n Basic Example\n '''\n \n \"\"\"\n \n \n Then, instance it and use it. When you try to access a locked\n method, modelpermissions will throw an exception.\n \"\"\"\n \n model = Example.objects.create(name='hidden-name')\n \n try:\n model.get_id()\n assert False #remove this\n except PermissionDenied:\n 'django.core.exceptions.PermissionDenied was thrown'\n \n \"But now let's unlock this model instance and try again\"\n model.unlock(user)\n \n model.get_id()\n 'no exception thrown'\n \n \n '''\n Checks example\n '''\n \n model.lock()\n \n \"You can use the above idiom, or you may want to check whether a method is locked beforehand.\"\n \n from modelpermissions import checks\n \n get_id_locked = checks.is_locked(model, 'get_id')\n \n assert get_id_locked #remove this\n","repo_name":"fabiosantoscode/django-model-permissions","sub_path":"testproject/app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9697695940","text":"with open(\"Day1/Day1Inputs.txt\",'r') as f:\n inputs = [line.strip() for line in f.read().split(\"\\n\")]\n\ncalorieCount = []\ncountPerElf = 0\n\nfor i in inputs:\n if i == inputs[-1]:\n calorieCount.append(countPerElf + int(i))\n elif i == \"\":\n calorieCount.append(countPerElf)\n countPerElf = 0\n \n else:\n countPerElf += int(i)\n\n\n#Part 1\nprint(\"The most calories one elf carries is:\", max(calorieCount), \" Which is held by Elf #\", calorieCount.index(max(calorieCount)) + 1, \"\\n\")\n\n#Part 2\ntop3ElvesCount = []\nfor i in range(0,3):\n top3ElvesCount.append(max(calorieCount))\n calorieCount.remove(max(calorieCount))\nprint(\"The top 3 Elves hold\", sum(top3ElvesCount), \"calories.\")","repo_name":"PhilGomez228/AdventOfCode2022","sub_path":"Day1/Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"6479428300","text":"\"\"\"\n\nProblem statement: User need to return the Minimum time taken by k painters to paint the given number of boards under the condition that each painter can paint continous \n section of boards and each painter take 1 unit of time to paint 1 unit of board\n\nSolution: This Problem could be solved using binary search in which\n Search Space would be betwween maximum size of the borad and sum of all the board size\n start will be initialised as maximum size of the board\n end will be initialised as sum of all the board size\n mid would be calculated as (start+end)/2.\n Using painterhelp function we would find the number of painters needed to paint (mid) number of boards\n If painters needed to paint 'mid' number of boards is greater than k then we would make search space to be between mid and end\n If painters needed to paint 'mid' number of boards is less than k then we would make search space to be between start and mid\n Maximum unit painted by the painter would then be multiplied by time given to get the minimum answer\n\n\"\"\"\n\ndef painterhelp(board,total,b):\n temp = 0\n currentpainters = 1\n\n for i in range(0,b): \n temp += board[i] \n\n if (temp > total):\n temp = board[i]\n currentpainters += 1\n\n return currentpainters #painters required to paint (total) unit of boards\n\ndef painterdivide(board,p,b): \n maxi=0;\n total=0; \n for i in range(0, b):\n if board[i]> maxi :\n maxi=board[i]\n total+=board[i]\n start=maxi #Start intialised as the maximum size of the board\n end=total # end initialised as sum of all the size of the boards\n\n while(start p): \n start = mid+1 \n else:\n end=mid\n return start #Maximum units of the board that could be painted by each painter\n\nb = int(input(\"Enter number of boards : \")) #Taking how many boards to be painted as the input\np= int(input(\"Enter number of painters available : \")) #Taking input Number of painters \nt= int(input(\"Enter the time taken by each painter: \")) #Taking input time taken by each painter\nprint(\"Enter the size of each board : \")\nboard = [] #Intialise board array with empty list\nfor i in range(0, b): \n ele = int(input()) \n \n board.append(ele) # Input the size of the boards\nanswer = painterdivide(board, p, b)\nanswer*=t; #Minimum time to paint all the boards\nprint(answer)\n\n\n\n\"\"\"\n\nTestCaes\nInput-1\n 2 2 5\n 1 10\nOutput-1\n 50\n\nInput-2\n 9 3 5\n 1 2 3 4 5 6 7 8 9\nOutput-1\n 85\n \nTime Complexity : O(N*log(sum(boards))\nSpace Complexity : O(1)\n\n\"\"\"\n","repo_name":"Algo-Phantoms/Algo-Tree","sub_path":"Code/Python/Painter's_Partition.py","file_name":"Painter's_Partition.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":340,"dataset":"github-code","pt":"47"} +{"seq_id":"41240695329","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport pandas as pd\n\ncourse_name = ['Data Science', 'Machine Learning', 'Big Data', 'Data Engineer']\nduration = [2, 3, 6, 4]\n\ndf = pd.DataFrame(data={'course_name': course_name, 'duration': duration})\n\n# Print the data present in the second row using iloc\nsecond_row = df.iloc[1]\nprint(second_row)\n\n\n# In Pandas, the loc and iloc are both indexer functions used to access data in a DataFrame. However, they have different methods of indexing and selecting data:\n# \n# loc: The loc indexer is label-based and is used to select data based on labels or boolean arrays along both the row and column axes.\n# Syntax: df.loc[row_indexer, column_indexer]\n# \n# iloc: The iloc indexer is integer-based and is used to select data based on integer positions along both the row and column axes.\n# Syntax: df.iloc[row_indexer, column_indexer]\n\n# In[10]:\n\n\nimport pandas as pd\n\ncourse_name = ['Data Science', 'Machine Learning', 'Big Data', 'Data Engineer']\nduration = [2, 3, 6, 4]\n\ndf = pd.DataFrame(data={'course_name': course_name, 'duration': duration})\n\n# Define the reindex order\nreindex = [3, 0, 1, 2]\n\n# Reindex the DataFrame\nnew_df = df.reindex(reindex)\n\n# Print the new_df\nprint(new_df)\n\n# Access row using loc\nprint(new_df.loc[2])\n\n# Access row using iloc\nprint(new_df.iloc[2])\n\n\n# The difference in the outputs arises because loc and iloc use different indexing methods:\n# \n# new_df.loc[2] accesses the row with index label 2. In this case, after reindexing, the row with index label 2 corresponds to the course \"Big Data\" with a duration of 6. So, new_df.loc[2] returns the row with the label 2, which includes the values 'Big Data' and 6.\n# \n# new_df.iloc[2] accesses the row with index position 2. After reindexing, the row with index position 2 corresponds to the course \"Machine Learning\" with a duration of 3. So, new_df.iloc[2] returns the row with the index position 2, which includes the values 'Machine Learning' and 3.\n# \n# The difference is due to the fact that loc uses label-based indexing, whereas iloc uses integer-based indexing. Therefore, loc and iloc may return different results depending on the index labels and positions.\n# \n# In summary, the difference in the outputs is because loc and iloc access the data based on different indexing methods: label-based indexing for loc and integer-based indexing for iloc.\n\n# In[12]:\n\n\nimport pandas as pd\nimport numpy as np\n\ncolumns = ['column_1', 'column_2', 'column_3', 'column_4', 'column_5', 'column_6']\nindices = [1, 2, 3, 4, 5, 6]\n\n# Creating a DataFrame\ndf1 = pd.DataFrame(np.random.rand(6, 6), columns=columns, index=indices)\n\n# Calculate the mean of each column\ncolumn_means = df1.mean()\nprint(\"Mean of each column:\")\nprint(column_means)\nprint()\n\n# Calculate the standard deviation of 'column_2'\ncolumn_2_std = df1['column_2'].std()\nprint(\"Standard deviation of 'column_2':\", column_2_std)\n\n\n# In pandas, the \"window functions\" refer to a group of operations that are applied over a sliding window of data in a DataFrame or Series. These functions allow you to perform calculations and transformations on a specified window of data, such as rolling averages, cumulative sums, and more. Window functions are particularly useful for time series data and can provide insights into trends, patterns, and rolling statistics.\n# \n# There are several types of window functions available in pandas, including:\n# \n# Rolling Functions: These functions operate on a specified window of consecutive rows in the data. The window size can be defined using a fixed number of rows or a time-based offset. Some common rolling functions include:\n# \n# rolling(): Provides access to various rolling window computations, such as mean, sum, min, max, etc.\n# rolling().apply(): Applies a custom function to the rolling window.\n# Expanding Functions: These functions calculate statistics that accumulate over time as more data points become available. The window size expands with each new row. Some common expanding functions include:\n# \n# expanding(): Provides access to various expanding window computations, such as mean, sum, min, max, etc.\n# expanding().apply(): Applies a custom function to the expanding window.\n# EWM (Exponentially Weighted Moving) Functions: These functions assign weights to each data point based on its age or position within the window. Recent data points have higher weights than older data points, allowing for calculations that give more importance to recent observations. Some common EWM functions include:\n# \n# ewm(): Provides access to various exponentially weighted moving window computations, such as mean, sum, min, max, etc.\n# ewm().apply(): Applies a custom function to the exponentially weighted moving window.\n# These window functions can be applied to both DataFrame columns and Series objects in pandas, providing powerful tools for analyzing and transforming data over specified windows. They offer flexibility in handling time series data and enable various calculations and aggregations to be performed efficiently.\n\n# In[23]:\n\n\nimport pandas as pd\nfrom datetime import datetime\n\n# Get the current date and time\ncurrent_date = datetime.now()\n\n# Extract the month and year\ncurrent_month = current_date.month\ncurrent_year = current_date.year\n\n# Create a pandas datetime object\npandas_datetime = pd.to_datetime(f\"{current_year}-{current_month}\")\n\n# Print the current month and year\nprint(\"Current Month:\", pandas_datetime.strftime(\"%B\"))\nprint(\"Current Year:\", current_year)\n\n\n# In[25]:\n\n\nimport pandas as pd\n\n# Prompt the user to enter the dates\ndate1 = input(\"Enter the first date (YYYY-MM-DD): \")\ndate2 = input(\"Enter the second date (YYYY-MM-DD): \")\n\n# Convert the input dates to Pandas datetime objects\ndate1 = pd.to_datetime(date1)\ndate2 = pd.to_datetime(date2)\n\n# Calculate the time difference\ntime_diff = date2 - date1\n\n# Extract the difference in days, hours, and minutes\ndays_diff = time_diff.days\nhours_diff = time_diff.seconds // 3600\nminutes_diff = (time_diff.seconds % 3600) // 60\n\n# Display the result\nprint(\"Time difference: {} days, {} hours, {} minutes.\".format(days_diff, hours_diff, minutes_diff))\n\n\n# In[29]:\n\n\nimport pandas as pd\n\n# Prompt the user to enter the file path\nfile_path = input(\"Enter the file path: \")\n\n# Prompt the user to enter the column name\ncolumn_name = input(\"Enter the column name: \")\n\n# Prompt the user to enter the category order\ncategory_order = input(\"Enter the category order (comma-separated): \").split(\",\")\n\n# Read the CSV file\ndf = pd.read_csv(file_path)\n\n# Convert the specified column to categorical data type with specified category order\ndf[column_name] = pd.Categorical(df[column_name], categories=category_order, ordered=True)\n\n# Sort the data based on the specified column\nsorted_data = df.sort_values(by=column_name)\n\n# Display the sorted data\nprint(sorted_data)\n\n\n# In[ ]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Prompt the user to enter the file path\nfile_path = input(\"Enter the file path: \")\n\n# Read the CSV file\ndf = pd.read_csv(file_path)\n\n# Convert the 'Date' column to datetime data type\ndf['Date'] = pd.to_datetime(df['Date'])\n\n# Group the data by product category and date, and calculate the sum of sales\ngrouped_data = df.groupby(['Product Category', 'Date'])['Sales'].sum().reset_index()\n\n# Pivot the data to have product categories as columns and dates as index\npivot_data = grouped_data.pivot(index='Date', columns='Product Category', values='Sales')\n\n# Plot the stacked bar chart\npivot_data.plot(kind='bar', stacked=True)\n\n# Set the labels and title\nplt.xlabel('Date')\nplt.ylabel('Sales')\nplt.title('Sales of Product Categories over Time')\n\n# Show the chart\nplt.show()\n\n\n# In[ ]:\n\n\nimport pandas as pd\nfrom tabulate import tabulate\n\n# Prompt the user to enter the file path\nfile_path = input(\"Enter the file path of the CSV file containing the student data: \")\n\n# Read the CSV file into a DataFrame\ndf = pd.read_csv(file_path)\n\n# Calculate the mean, median, and mode of the test scores\nmean_score = df['Test Score'].mean()\nmedian_score = df['Test Score'].median()\nmode_scores = df['Test Score'].mode()\n\n# Format the mode scores as a string\nmode_scores_str = ', '.join(map(str, mode_scores))\n\n# Create a table to display the results\ntable = [['Statistic', 'Value'],\n ['Mean', mean_score],\n ['Median', median_score],\n ['Mode', mode_scores_str]]\n\n# Display the table\nprint(tabulate(table, headers='firstrow', tablefmt='grid'))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"JawadAbdi/Data-Science","sub_path":"25 Feb Assignment.py","file_name":"25 Feb Assignment.py","file_ext":"py","file_size_in_byte":8492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41787908575","text":"\"\"\"\"\nCP1404 Read numbers from file and add them together\n\n\"\"\"\n\nINPUT_FILE = \"numbers.txt\"\nin_file = open(INPUT_FILE, 'r')\ntotal = 0\nfor line in in_file:\n total += int(line)\nprint(\"Total is:\", str(total))\nin_file.close()","repo_name":"David-Roy-JCU/cp1404practicals","sub_path":"prac_02/07_numbers.py","file_name":"07_numbers.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"2565739564","text":"#encoding:gbk\nimport sys\n\nfout = open('newaclresult.txt', 'ab+')\n\ndef is_white_list(tempdn):\n all_white =[\n \"CN=Domain Admins,\",\n \"CN=Account Operators,\",\n \"CN=Administrators,CN=Builtin,\",\n \"CN=Cert Publishers,\",\n \"CN=Enterprise Admins,\",\n \"CN=Enterprise Key Admins,\",\n \"CN=Exchange Enterprise Servers,\",\n \"CN=Exchange Recipient Administrators,\",\n \"CN=Exchange Servers,\",\n \"CN=Exchange Trusted Subsystem,\",\n \"CN=Exchange Windows Permissions,\",\n \"CN=Key Admins,CN=Users,\",\n \"CN=Organization Management,\",\n \"CN=Terminal Server License Servers,\",\n \"CN=Exchange Organization Administrators,\",\n ]\n for item in all_white:\n if tempdn.startswith(item):\n return True\n return False\n\nif len(sys.argv) != 3:\n print('aclsidmap.py sid.txt aclresult.txt')\n sys.exit(0)\n\nmapsid = {}\ntmp_sid, tmp_user = '', ''\nfor line in open(sys.argv[1], 'rb').readlines():\n line = line.decode('gbk').strip('\\r\\n').strip()\n if line.startswith('dn:'):\n tmp_sid, tmp_user = '', ''\n tmp_user = line.replace('dn:', '')\n if line.startswith('>objectSid: '):\n tmp_sid = line.replace('>objectSid: ', '')\n #if line.startswith('>sAMAccountName: '):\n # tmp_user = line.replace('>sAMAccountName: ', '')\n if line == '' and tmp_sid != '' and tmp_user != '':\n mapsid[tmp_sid] = tmp_user\n #print(tmp_sid, tmp_user)\n \nfor line in open(sys.argv[2], 'rb').readlines():\n line = line.decode('utf8').strip('\\r\\n')\n if line.startswith(' S-'):\n tmp = line.strip(' ')\n if tmp in mapsid.keys():\n tempdn = mapsid[tmp]\n if is_white_list(tempdn):\n continue\n else:\n fout.write((\" \"+mapsid[tmp]+\"\\r\\n\").encode('utf8'))\n else:\n fout.write((\" \"+tmp+\"\\r\\n\").encode('utf8'))\n else:\n if is_white_list(line.strip(' ')):\n continue\n else:\n fout.write((line+\"\\r\\n\").encode('utf8'))\n ","repo_name":"DeepBlueTeam/adaclscan","sub_path":"aclsidmap_adfind.py","file_name":"aclsidmap_adfind.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"29509086218","text":"from django.shortcuts import render\nfrom django.contrib import messages\nfrom .forms import EquipoForm\nfrom .forms import JugadorForm\nfrom equipos.models import Equipo, Competencia, Jugador\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.decorators import login_required\n\ndef equipo_listar(request):\n equipo = Equipo.objects.all()\n return render(request, 'equipos/equipo_listar.html', {'equipo':equipo})\n\ndef jugador_listar(request):\n jugador = Jugador.objects.all()\n return render(request, 'equipos/jugador_listar.html', {'jugador':jugador})\n\n@login_required\ndef equipo_nuevo(request):\n if request.method == \"POST\":\n formulario = EquipoForm(request.POST)\n if formulario.is_valid():\n equipo = Equipo.objects.create(nombre=formulario.cleaned_data['nombre'], liga = formulario.cleaned_data['liga'])\n for jugador_id in request.POST.getlist('jugadores'):\n competicion = Competencia(jugador_id=jugador_id, equipo_id = equipo.id)\n competicion.save()\n return redirect('equipo_listar')\n else:\n formulario = EquipoForm()\n return render(request, 'equipos/equipo_editar.html', {'formulario': formulario})\n\n@login_required\ndef jugador_nuevo(request):\n if request.method == \"POST\":\n formulario = JugadorForm(request.POST)\n if formulario.is_valid():\n jugador = formulario.save(commit=False)\n jugador.save()\n return redirect('jugador_listar')\n else:\n formulario = JugadorForm()\n return render(request, 'equipos/jugador_editar.html', {'formulario': formulario})\n\n@login_required\ndef jugador_editar(request, pk):\n jugador = get_object_or_404(Jugador, pk=pk)\n if request.method == \"POST\":\n formulario = JugadorForm(request.POST, instance=jugador)\n if formulario.is_valid():\n jugador = formulario.save(commit=False)\n jugador.save()\n return redirect('jugador_detalle', pk=jugador.pk)\n else:\n formulario = JugadorForm(instance=jugador)\n return render(request, 'equipos/jugador_editar.html', {'formulario': formulario})\n\n@login_required\ndef equipo_editar(request, pk):\n equipo = get_object_or_404(Equipo, pk=pk)\n if request.method == \"POST\":\n formulario = EquipoForm(request.POST, instance = equipo)\n if formulario.is_valid:\n equipo = formulario.save(commit=False)\n equipo.save()\n return redirect('equipo_detalle', pk=equipo.pk)\n else:\n formulario = EquipoForm(instance=equipo)\n return render(request, 'equipos/equipo_editar.html', {'formulario':formulario})\n\ndef equipo_detalle(request, pk):\n equipo = get_object_or_404(Equipo, pk=pk)\n return render(request, 'equipos/equipo_detalle.html', {'equipo': equipo})\n\ndef jugador_detalle(request, pk):\n jugador = get_object_or_404(Jugador, pk=pk)\n return render(request, 'equipos/jugador_detalle.html', {'jugador': jugador})\n\n@login_required\ndef equipo_remover(request, pk):\n equipo = get_object_or_404(Equipo, pk=pk)\n equipo.delete()\n return redirect('equipo_listar')\n\n@login_required\ndef jugador_remover(request, pk):\n jugador = get_object_or_404(Jugador, pk=pk)\n jugador.delete()\n return redirect('jugador_listar')\n","repo_name":"Josephe23/torneo","sub_path":"equipos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16485020825","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import ttk \nimport adafruit_dht as DHT\nimport gpiozero\nimport adafruit_dht\nimport board\nimport time\n\n\nclass GraugeApp(tk.Tk):\n \n \n def __init__(self):\n tk.Tk.__init__(self)\n tk.Tk.wm_title(self, \"JOHNS MAN CAVE\")\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=\"true\")\n container.grid_rowconfigure(0,weight=1)\n container.grid_columnconfigure(0,weight=1)\n\n self.frames = {}\n\n for F in (StartPage, NextPage):\n frame = F(container,self)\n self.frames[F] = frame\n\n frame.grid(row=0,column=0, sticky=\"nsew\")\n self.show_frame(StartPage)\n\n def show_frame(self,cont):\n frame= self.frames[cont]\n frame.tkraise()\n\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n\n \n self.tempSen = adafruit_dht.DHT11(board.D17)\n self.relay = gpiozero.LED(18)\n #self.actualTemp = IntVar()\n #self.actualTemp.set(self.tempSen.temperature)\n print(self.tempSen.temperature,\"starting Temp\")\n self.pack(fill=BOTH, expand=1)\n self.quitButton = Button(self, text=\"QUIT\", command=self.client_exit)\n self.quitButton.grid(row=1,column=1)\n self.relayButton = Button(self, text=\"relay\", command=self.toggleRelay)\n self.relayButton.grid(row=1, column=2)\n self.lable = Label(self,text=\"TEMP:\")\n self.lable.grid(row=1, column=3)\n self.lableTemp = Label(self, text=\"start\")\n self.lableTemp.grid(row=1, column=4)\n self.temp = 0 \n self.onUpdate()\n \n def onUpdate(self):\n try:\n self.temp = self.tempSen.temperature\n except RuntimeError as i:\n print(\"HANDELED\", i)\n\n self.lableTemp.configure(text=self.temp)\n print(self.temp,\"still trying\")\n self.after(2000, self.onUpdate)\n\n def client_exit(self):\n exit()\n\n def toggleRelay(self):\n if self.relay.value == 0:\n self.relay.on()\n else: self.relay.off()\n \n def tempSend(self):\n return self.tempSen.temperature\n\n\nclass NextPage(tk.Frame):\n def __init__(self,parent,controller):\n tk.Frame.__init__(self,parent)\n pass\n\n\napp = GraugeApp()\n\napp.mainloop()\n","repo_name":"JohnSparham/garageApp","sub_path":"tk2.py","file_name":"tk2.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20971316675","text":"#!/usr/bin/python3\n\"\"\"post requests \"\"\"\n\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n url = 'http://0.0.0.0:5000/search_user'\n if len(sys.argv) > 1:\n letter = sys.argv[1]\n else:\n letter = \"\"\n\n payload = {'q': letter}\n r = requests.post(url, data=payload)\n try:\n json = r.json()\n # print(json)\n if len(json) > 0:\n print(\"[{}] {}\".format(json.get('id'), json.get('name')))\n else:\n print(\"No result\")\n except ValueError:\n print(\"Not a valid JSON\")\n","repo_name":"engemp/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/8-json_api.py","file_name":"8-json_api.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19686104618","text":"from palindrome.doublebase import DoubleBasePalindrome\nfrom random import randint\nimport unittest\n\n\nclass PalindromeTests(unittest.TestCase):\n\n def test_sum_one_million_palindromes(self):\n \"\"\"Sum 1 million double base palindromes\"\"\"\n palindromes = DoubleBasePalindrome()\n result = palindromes.sum_palindrome_numbers()\n self.assertEqual(872187, result)\n\n def test_repr_quality(self):\n \"\"\"Try eval on the output of the repr to create another\n object\"\"\"\n palindromes = DoubleBasePalindrome()\n obj = eval(str(palindromes))\n self.assertEqual(palindromes, obj)\n\n def test_bad_instantiation(self):\n \"\"\"Initialize the object with a negative number\"\"\"\n with self.assertRaises(ValueError):\n palindromes = DoubleBasePalindrome(max_number=-1)\n\n def test_generate_zero(self):\n \"\"\"Generate the palindrome number: zero.\n This is a corner case for the algorithm\"\"\"\n palindromes = DoubleBasePalindrome(max_number=0)\n zero = next(palindromes.palindrome_numbers_generator())\n self.assertEqual(0, zero)\n\n def test_generate_eleven(self):\n \"\"\"Generate the palindrome number: eleven.\n Correctly increment length of palindrome numbers\"\"\"\n palindromes = DoubleBasePalindrome(max_number=11)\n eleven = 0\n for x in palindromes.palindrome_numbers_generator():\n eleven = x\n self.assertEqual(11, eleven)\n\n def test_generate_random_palindrome_number_even_length(self):\n \"\"\"Generate a palindrome number of even length.\"\"\"\n palindromes = DoubleBasePalindrome()\n symmetric_part = str(randint(1, 99999))\n expected = symmetric_part + symmetric_part[::-1]\n number = palindromes.build_palindrome(symmetric_part,\n rtype=lambda x: str(x))\n self.assertEqual(expected, number)\n\n def test_generate_random_palindrome_number_odd_length(self):\n \"\"\"Generate a palindrome number of odd length.\"\"\"\n palindromes = DoubleBasePalindrome()\n symmetric_part = str(randint(1, 99999))\n center_part = str(randint(1, 99999))\n expected = symmetric_part + center_part + symmetric_part[::-1]\n number = palindromes.build_palindrome(symmetric_part,\n center=center_part,\n rtype=lambda x: str(x))\n self.assertEqual(expected, number)\n\n def test_exception_handler(self):\n \"\"\"Raise desired exception\"\"\"\n palindromes = DoubleBasePalindrome(max_number=3)\n number = 4\n exc = ValueError\n with self.assertRaises(exc):\n palindromes.exception_handler(str(number), exc)\n\n def test_double_base_palindrome_generator(self):\n \"\"\"Generate double base palindrome numbers\"\"\"\n palindromes = DoubleBasePalindrome(max_number=9009)\n res = [1, 3, 5, 7, 9, 33, 99, 313, 585, 717, 7447, 9009]\n for x, y in zip(palindromes.double_base_palindrome_generator(), res):\n self.assertEqual(x, y)\n","repo_name":"jam182/palindrome","sub_path":"palindrome/tests/test_palindrome.py","file_name":"test_palindrome.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73524002737","text":"__author__ = 'Cameron Seebach'\n\nimport pyglet\n\nfrom hackus.console import Console\nfrom hackus.system import System\n\nwindow = pyglet.window.Window(800, 600)\n\npyglet.font.add_file('Inconsolata.ttf')\n\nconsole = Console(window, \"Inconsolata\")\n\nsystem = System(console)\n\nwindow.push_handlers(console)\nconsole.schedule()\n\nsystem.boot()\n\npyglet.app.run()\n","repo_name":"cseebach/hackus","sub_path":"hackus/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17885437450","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt \n \ndef VesselDetection(image,orgImage):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) \n Clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(8,8))\n ClaheImage = Clahe.apply(image)\n #get kernel ellipse shape\n kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\n\n #Morphological Transformation\n #open erosion and dilation\n op = cv2.morphologyEx(ClaheImage, cv2.MORPH_OPEN, kernel)\n #close dilation and erosion\n cl = cv2.morphologyEx(op, cv2.MORPH_CLOSE, kernel)\n\n MorphologyImage = Clahe.apply(cv2.subtract(cl, ClaheImage))\n BloodVessels = NoiseSolving(MorphologyImage,orgImage) \n return BloodVessels\n\ndef NoiseSolving(image,orgImage):\n thresholdValue , thresholdImage = cv2.threshold(image,15,255,cv2.THRESH_BINARY)\t\n mask = np.ones(image.shape , dtype=\"uint8\")\n contours, hierarchy = cv2.findContours(thresholdImage, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n for c in contours:\n if cv2.contourArea(c) <= 1: \n cv2.drawContours(mask, [c], -1, 0, -1)\n im = cv2.bitwise_and(image, image, mask=mask)\n thresholdValue ,solveNoise = cv2.threshold(im,15,255,cv2.THRESH_BINARY)\t\t\t\n contours, hierarchy = cv2.findContours(solveNoise,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\t\n for c in contours: \n if cv2.contourArea(c) < 5 :\n cv2.drawContours(mask, [c], -1, 0, -1)\t\n blood_vessels = cv2.bitwise_and(solveNoise,solveNoise,mask=mask)\t\n i = 0\n j = 0\n for gr, fin in zip(orgImage, blood_vessels):\n for g, f in zip(gr, fin):\n if(f == 255):\n orgImage[i][j] = 0 \n \n j = j + 1\n j = 0\n i = i + 1 \n \n return orgImage\t\n\ndef GaussianBlurfilter(img, sigmaX): \n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n #mask\n height, width, depth = img.shape \n x = int(width/2)\n y = int(height/2) \n circle_img = np.zeros((height, width), np.uint8)\n cv2.circle(circle_img, (x,y), min(x,y), 1, thickness=-1)\n #bit wise and\n img = cv2.bitwise_and(img, img, mask= circle_img )\n #convert to gray\n grayImage = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n #extract non zero index\n x, y = np.nonzero(grayImage)\n #crop\n img=img[x.min():x.max(), y.min():y.max(),:]\n #resize\n if img.shape[0]<1000:\n img = cv2.resize(img, (2000,1500))\n #gaussian blur and weight\n img=cv2.addWeighted(img,4,cv2.GaussianBlur( img , (0,0),sigmaX) ,-4 ,30)\n return img \t\n ","repo_name":"ibrahimAlabdullah/ocular-disease-classification","sub_path":"mySite/blogs/Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31768766922","text":"#! /usr/local/bin/python3.7\nimport os\nimport sys\nimport time\nimport urllib.parse\nfrom pathlib import Path\nfrom datetime import datetime\nfrom http.client import RemoteDisconnected, IncompleteRead\nfrom multiprocessing import Process, Manager\nfrom ssl import SSLEOFError\nfrom urllib.error import HTTPError\nfrom urllib.request import urlretrieve\nfrom PIL import Image\n\nimport requests\nimport mimetypes\nimport mysql.connector\nfrom newspaper import ArticleException\nimport ib_textcategorization\nimport pandas as pd\n\nimport crawler\nimport newsscraper\nimport textsummarization_baru\nimport news_bot_story\n\nwith open(\"status.txt\") as s:\n first_line = s.readline().rstrip()\n if first_line == \"0\":\n exit()\n\nwith open(\"database.txt\") as f:\n props = [line.rstrip() for line in f]\n\nmydb = mysql.connector.connect(\n host=props[0],\n user=props[1],\n passwd=props[2],\n database=props[3]\n)\nbase_path_img = props[4]\nbase_path_log = props[5]\nTIMEOUT = 3000\ntimenow = \"{}-{}-{}\".format(datetime.now().day,datetime.now().month,datetime.now().year)\ndaily_log = pd.DataFrame(columns=['category','headline','content','probabilities','top3'])\n\ndef translate_category(input:str):\n if input == \"Makanan\":\n return \"Food\"\n\n elif input == \"Hiburan\":\n return \"Entertainment\"\n\n elif input == \"Edukasi\":\n return \"Education\"\n\n elif input == \"Bisnis\":\n return \"Business\"\n\n elif input == \"Travel\":\n return \"Travel\"\n\n elif input == \"Berita\":\n return \"News\"\n\n elif input == \"Lain-Lain\":\n return \"Others\"\n\n elif input == \"Tren\":\n return \"Trending\"\n\n elif input == \"Kesehatan\":\n return \"Health\"\n\n elif input == \"Teknologi\":\n return \"Technology\"\n\n elif input == \"Gaya Hidup\":\n return \"Lifestyle\"\n\n elif input == \"Olahraga\":\n return \"Sports\"\n\n elif input == \"Selebriti\":\n return \"Celebrity\"\n\n elif input == \"Sains\":\n return \"Science\"\n\n elif input == \"Lowongan Pekerjaan\":\n return \"Jobs\"\n\n elif input == \"COVID-19\":\n return \"COVID-19\"\n\ndef retrieve_post_tuple(url: str, post_list: list, unique_id: int, f_pin: str, privacy_flag: int):\n try:\n path = str(Path(sys.argv[0]).parent) + str(os.sep)\n global daily_log\n text_block, title, image_src = crawler.crawl_article(url)\n data = [title + \" \" + text_block]\n category = ib_textcategorization.classify(data)\n\n dataset = pd.DataFrame(data={'category': [category[1]], 'headline': [title], 'content': [text_block],\n 'probabilities': [category[2]],\n 'top3': [category[0]]},columns=['category','headline','content','probabilities','top3'])\n daily_log = daily_log.append(dataset)\n # cat_str = \"\"\n # cat_str = \",\".join(list(map(lambda x: x, category)))\n # y = 0\n # for cat in category:\n # if y == 0:\n # cat_str = \"('\" + cat + \"',\"\n # elif y != 0 and y != len(category) - 1:\n # cat_str = cat_str +\"'\" + cat +\"'\"+ \",\"\n # elif y == len(category) - 1:\n # cat_str = cat_str + \"'\" + cat +\"')\"\n # y = y + 1\n\n datasetAll = pd.DataFrame(data={'category': category[1], 'headline': [title], 'content': [text_block]})\n datasetAll.to_csv(path+'dataset-ib.csv', mode='a', header=False, index=False)\n title_id = textsummarization_baru.translate(title)\n title_2 = \"(en){}(/en)||(id){}(/id)\".format(title,title_id)\n summary = textsummarization_baru.summarize_text_en(text_block.replace(\"\\n\", \" \"), 1.0, 512, 'auto')\n summary_id = textsummarization_baru.summarize_text_id(text_block.replace(\"\\n\", \" \"), 1.0, 512, 'auto')\n summary_2 = \"(en){}(/en)||(id){}(/id)\".format(summary,summary_id)\n if summary == \"[Error] Error in summarizing article.\" or summary == \"[Cannot summarize the article]\":\n summary_2 = \"-\"\n curtime_milli = int(round(time.time() * 1000))\n img_filename = \"\"\n\n # base_path_img = \"/apps/indonesiabisa/server/image\"\n image_total = \"\"\n if image_src:\n n = 0\n if type(image_src) == list:\n for image in image_src:\n isUnicode = False\n isWebp = False\n extension = os.path.splitext(os.path.basename(image.split(\"?\")[0]))[-1]\n if extension == \".webp\".casefold():\n extension = \".jpg\"\n isWebp = True\n if len(extension) == 0:\n response = requests.get(image)\n content_type = response.headers['content-type']\n new_extension = mimetypes.guess_extension(content_type)\n if str(new_extension).casefold() == \".jpg\" or str(new_extension).casefold() == \".jpeg\":\n extension = str(new_extension)\n else:\n extension = \".jpg\"\n img_filename = \"APST-\" + f_pin + \"-\" + format(curtime_milli, 'X') + \"-\" + str(n) + \"-\" + str(unique_id) + \\\n extension\n full_filename = os.path.join(base_path_img, img_filename)\n for x in image:\n if ord(x) > 127:\n isUnicode = True\n if isUnicode:\n image = urllib.parse.quote(image, safe=\":/\")\n urlretrieve(image, full_filename)\n if isWebp:\n webImage = Image.open(full_filename).convert(\"RGB\")\n webImage.save(full_filename,\"jpeg\")\n # image_total = image_total + img_filename\n n = n + 1\n elif type(image_src) == str:\n isUnicode = False\n isWebp = False\n extension = os.path.splitext(os.path.basename(image_src.split(\"?\")[0]))[-1]\n if extension == \".webp\".casefold():\n extension = \".jpg\"\n isWebp = True\n if len(extension) == 0:\n response = requests.get(image_src)\n content_type = response.headers['content-type']\n new_extension = mimetypes.guess_extension(content_type)\n if str(new_extension).casefold() == \".jpg\" or str(new_extension).casefold() == \".jpeg\":\n extension = str(new_extension)\n else:\n extension = \".jpg\"\n img_filename = \"APST-\" + f_pin + \"-\" + format(curtime_milli, 'X') + \"-\" + str(n) + \"-\" + str(\n unique_id) + \\\n extension\n full_filename = os.path.join(base_path_img, img_filename)\n for x in image_src:\n if ord(x) > 127:\n isUnicode = True\n if isUnicode:\n image_src = urllib.parse.quote(image_src, safe=\":/\")\n urlretrieve(image_src, full_filename)\n if isWebp:\n webImage = Image.open(full_filename).convert(\"RGB\")\n webImage.save(full_filename,\"jpeg\")\n\n post_id = f_pin + str(curtime_milli) + str(unique_id)\n if category:\n post_values = (\n post_id, f_pin, urllib.parse.quote_plus(title_2), urllib.parse.quote_plus(summary_2), curtime_milli,\n privacy_flag,\n img_filename,\n img_filename, curtime_milli, url, 1, curtime_milli,category[0])\n post_list.append(post_values)\n print(\"Success in fetching \" + url)\n except (HTTPError, RemoteDisconnected,ArticleException,IncompleteRead,SSLEOFError):\n print(\"Error in fetching \" + url + \" :\\n\" + str(sys.exc_info()[0]))\n pass\n\n\ndef get_post_news(row: list):\n is_active = row[6]\n if is_active is not 1:\n return\n\n auto_post_id = row[0]\n f_pin = row[1]\n domain = row[2]\n category_id = row[3]\n last_update = row[4]\n privacy = \"3\"\n\n category = \"\"\n if category_id == 4:\n category = \"news\"\n elif category_id == 10:\n category = \"sport\"\n elif category_id == 11:\n category = \"technology\"\n\n latest = datetime.now()\n earliest = last_update\n\n last_update_ins = int(round(datetime.now().timestamp()))\n news = newsscraper.fetch_news_list(domain, category, latest, earliest)\n print(\"Fetching \" + str(len(news)) + \" link(s)...\")\n if len(news) > 0:\n with Manager() as manager:\n post_tuple_list = manager.list()\n news_processes = []\n uid = 1\n for link in reversed(news):\n print(\"Checking url \" + link)\n query_check = \"SELECT * from AUTO_POST_LINKS where F_PIN = '\" + f_pin + \"' and URL = '\" + link + \"' limit 1\"\n select_cursor.execute(query_check)\n check_post_result = select_cursor.fetchall()\n if len(check_post_result) == 0:\n p = Process(target=retrieve_post_tuple, args=(link, post_tuple_list, uid, f_pin, privacy))\n uid = uid + 1\n p.start()\n news_processes.append(p)\n # for p in news_processes:\n # p.join()\n #timeout\n start = time.time()\n while time.time() - start <= TIMEOUT:\n if not any(p.is_alive() for p in news_processes):\n # All the processes are done, break now.\n break\n\n time.sleep(30) # Just to avoid hogging the CPU\n else:\n # We only enter this if we didn't 'break' above.\n print(\"timed out, killing all processes\")\n for p in news_processes:\n p.terminate()\n p.join()\n\n print(\"Posting links: \" + str(len(post_tuple_list)))\n query = \"replace into POST(POST_ID, F_PIN, TITLE, DESCRIPTION, CREATED_DATE, PRIVACY, THUMB_ID, FILE_ID, LAST_UPDATE, LINK, FILE_TYPE, SCORE) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n for element in post_tuple_list:\n select_cursor.execute(\n query,\n (element[0],element[1],element[2],element[3],element[4],element[5],element[6],element[7],element[8],element[9],element[10],element[11],))\n # mydb.commit()\n print(\"Links posted: \" + str(len(post_tuple_list)))\n\n post_url_tuples = [(post[1], post[9]) for post in post_tuple_list]\n query_cat = \"replace into AUTO_POST_LINKS(F_PIN,URL) values (%s,%s)\"\n for element in post_url_tuples:\n select_cursor.executemany(\n query_cat,\n (element,))\n # mydb.commit()\n\n post_cat_tuples = [(post[0], post[12]) for post in post_tuple_list]\n print(post_cat_tuples)\n # for pid in post_id_list:\n # post_cat_tuples.append((pid, category_id))\n query_cat = \"replace into CONTENT_CATEGORY(POST_ID,CATEGORY) SELECT %s,ID from CATEGORY where CODE = %s\"\n for element in post_cat_tuples:\n for category in element[1]:\n print(\"Category : {}\".format(category))\n translated_category = translate_category(category)\n print(\"Category Translate : {}\".format(translated_category))\n select_cursor.execute(\n query_cat,\n (element[0],translated_category))\n print(\"Success Insert to Content Category\")\n # mydb.commit()\n\n print(post_tuple_list)\n if len(post_tuple_list) > 0:\n select_cursor.execute(\n \"update AUTO_POST set LAST_UPDATE = from_unixtime(\" + str(last_update_ins) + \") where ID = \" + str(\n auto_post_id))\n print(str(last_update_ins))\n mydb.commit()\n pass\n\n\nselect_cursor = mydb.cursor()\nselect_cursor.execute(\"SELECT * FROM AUTO_POST where IS_ACTIVE = 1\")\nauto_post_result = select_cursor.fetchall()\n\nrow_processes = []\nif(len(auto_post_result) > 0):\n TIMEOUT = min(300, 3000 / len(auto_post_result))\n\nfor q_row in auto_post_result:\n get_post_news(q_row)\nnews_bot_story.news_bot_story()\ndaily_log.to_csv(base_path_log + 'dataset-ib-{}.csv'.format(timenow),mode='w',header=True,index=False)\n# p = Process(target=get_post_news, args=(q_row,))\n# uid = uid + 1\n# p.start()\n# row_processes.append(p)\n# for p in row_processes:\n# p.join(1200)\n# if p.is_alive():\n# p.terminate()\n# p.join()\n","repo_name":"kevhnmay94/TextCategorization","sub_path":"auto_posting.py","file_name":"auto_posting.py","file_ext":"py","file_size_in_byte":12881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"11556268390","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# Load external module: MagNet\nimport sys, os\nproject_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(project_dir)\n\nfrom externals.MagNet.setup_cifar import CIFAR\nfrom externals.MagNet.utils import prepare_data\nfrom externals.MagNet.worker import AEDetector, DBDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator\n\nimport numpy as np\nimport os\n\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Lambda\nfrom keras.activations import softmax\n\nclass ClassifierWrapper:\n def __init__(self, model):\n \"\"\"\n Keras classifier wrapper.\n Note that the wrapped classifier should spit logits as output.\n \"\"\"\n layer_id = len(model.layers)-2\n self.model = Model(inputs=model.layers[0].input, outputs=model.layers[layer_id].output)\n self.softmax = Sequential()\n self.softmax.add(Lambda(lambda X: softmax(X, axis=1), input_shape=(10,)))\n\n def classify(self, X, option=\"logit\", T=1):\n if option == \"logit\":\n return self.model.predict(X)\n if option == \"prob\":\n logits = self.model.predict(X)/T\n return self.softmax.predict(logits)\n\n def print(self):\n return \"Classifier:\"+self.path.split(\"/\")[-1]\n\nclass MagNetDetector:\n def __init__(self, model, detector_name):\n classifier = ClassifierWrapper(model)\n\n autoencoder_model_fpath = os.path.join(project_dir, \"downloads/MagNet/defensive_models/CIFAR\")\n\n reformer = SimpleReformer(autoencoder_model_fpath)\n id_reformer = IdReformer()\n\n # Note: we may swap the two.\n reconstructor = id_reformer\n prober = reformer\n # reconstructor = reformer\n # prober = id_reformer\n\n eb_detector = AEDetector(autoencoder_model_fpath, p=1)\n db_detector_I = DBDetector(reconstructor, prober, classifier, T=10)\n db_detector_II = DBDetector(reconstructor, prober, classifier, T=40)\n\n detector_dict = dict()\n detector_dict[\"db-I\"] = db_detector_I\n detector_dict[\"db-II\"] = db_detector_II\n detector_dict['eb'] = eb_detector\n\n self.operator = Operator(CIFAR(), classifier, detector_dict, reformer)\n\n def train(self, X=None, Y=None, fpr=None):\n # CIFAR-10\n drop_rate={\"db-I\": 0.01, \"db-II\": 0.01, \"eb\": 0.005}\n # drop_rate={\"db-I\": fpr, \"db-II\": fpr, \"eb\": fpr}\n print(\"\\n==========================================================\")\n print(\"Drop Rate:\", drop_rate)\n self.thrs = self.operator.get_thrs(drop_rate)\n print(\"Thresholds:\", self.thrs)\n\n\n def test(self, X):\n all_pass, detector_breakdown = self.operator.filter(X, self.thrs)\n print (\"detector_breakdown\", detector_breakdown)\n ret_detection = np.array([ False if i in all_pass else True for i in range(len(X)) ])\n return ret_detection, ret_detection\n\nif __name__ == '__main__':\n magnet_detector = MagNetDetector()\n magnet_detector.train()\n\n X = magnet_detector.operator.data.test_data\n Y_detected, _ = magnet_detector.test(X)\n\n print (\"False positive rate: %f\" % (np.sum(Y_detected)/float(len(X))))\n","repo_name":"mzweilin/EvadeML-Zoo","sub_path":"detections/magnet_cifar.py","file_name":"magnet_cifar.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"57"} +{"seq_id":"20474185981","text":"import copy\n\n\nclass Board:\n \"\"\"\n Class representing board on which the game is being played.\n For each position on board the coordinates are represented as (x, y)\n Top left position is (0, 0),\n x is incrementing as we go right,\n y is incremented as we go down,\n bottom-down position is (board.size - 1, board.size - 1)\n \"\"\"\n\n def __init__(self,\n size,\n data=None,\n crossed_vertical_lines=None,\n crossed_horizontal_lines=None,\n crossed_diagonal_lines_left=None,\n crossed_diagonal_lines_right=None,\n c_points=0,\n p_points=0):\n\n # dimension of board\n self.size = size\n\n # data is two dimensional array that contains selected by players coordinates\n if data is not None:\n self.data = data\n else:\n self.data = Board.init_data(size)\n\n # list of X coordinates in range (0, size-1) of vertical lines that were already crossed\n if crossed_vertical_lines is not None:\n self.crossed_vertical_lines = crossed_vertical_lines\n else:\n self.crossed_vertical_lines = []\n\n # list of Y coordinates in range (0, size-1) of horizontal lines that were already crossed\n if crossed_horizontal_lines is not None:\n self.crossed_horizontal_lines = crossed_horizontal_lines\n else:\n self.crossed_horizontal_lines = []\n\n # list of tuples (x,y) where each element is top left coordinate of diagonal line that was already crossed\n if crossed_diagonal_lines_left is not None:\n self.crossed_diagonal_lines_top_left = crossed_diagonal_lines_left\n else:\n self.crossed_diagonal_lines_top_left = []\n\n # list of tuples (x,y) where each element is top right coordinate of diagonal line that was already crossed\n if crossed_diagonal_lines_right is not None:\n self.crossed_diagonal_lines_top_right = crossed_diagonal_lines_right\n else:\n self.crossed_diagonal_lines_top_right = []\n\n self.computer_points = c_points\n self.player_points = p_points\n\n def __str__(self):\n res = \"\"\n for y in range(self.size):\n for x in range(self.size):\n res += str(self.data[y][x])\n res += \"\\n\"\n res = res[:-1]\n return res\n\n @classmethod\n def init_data(cls, dimension):\n \"\"\"\n Creates empty initial board NxN size\n :param dimension: Number\n :return: array[N][N] filled with zeros\n \"\"\"\n return [[0 for _ in range(dimension)] for _ in range(dimension)]\n\n @staticmethod\n def load_from_file(filename):\n \"\"\"\n Returns board prepared in text file.\n This method doesn't fills the \"crossed lines\" fields so counting points from such board may be incorrect.\n This also doesn't include players points\n TODO: need to implement method that will fill crossed lines for board\n :param filename: String\n :return:\n \"\"\"\n data = list()\n file = open(filename, 'r')\n for line in file:\n elements = line.split(\" \")\n data.append(list(map(int, elements)))\n file.close()\n return Board(len(data), data)\n\n def to_string(self):\n \"\"\"\n Returns string representation of board\n :return:\n \"\"\"\n result = \"\"\n result += (\"c_points: \" + str(self.computer_points) + \"\\n\")\n result += (\"p_points: \" + str(self.player_points) + \"\\n\")\n for y in range(self.size):\n for x in range(self.size):\n result += (str(self.data[y][x]) + \" \")\n result += \"\\n\"\n return result\n\n def insert_pos(self, x, y, val=1):\n \"\"\"\n Inserts mark at position (x, y) and returns points achieved by this move\n Also remembers crossed lines to prevent them from being taken into account in future\n Coords are visual coords - not array coords\n \"\"\"\n self.data[y][x] = val\n\n vertical_points = self.__check_vertical_lines(x)\n if vertical_points == self.size:\n self.crossed_vertical_lines.append(x)\n\n horizontal_points = self.__check_horizontal_lines(y)\n if horizontal_points == self.size:\n self.crossed_horizontal_lines.append(y)\n\n diagonal_left_points = self.__check_diagonal_left(x, y)\n if diagonal_left_points >= 2:\n self.crossed_diagonal_lines_top_left.append(self.__get_top_left_diagonal_coords(x, y))\n\n diagonal_right_points = self.__check_diagonal_right(x, y)\n if diagonal_right_points >= 2:\n self.crossed_diagonal_lines_top_right.append(self.__get_top_right_diagonal_coords(x, y))\n\n return vertical_points + horizontal_points + diagonal_left_points + diagonal_right_points\n\n def get_position(self, x, y):\n \"\"\"\n Returns \"0\" or \"1\" value of board at coords (x,y)\n Coords are in visual format - not array positions\n :param x:\n :param y:\n :return:\n \"\"\"\n return self.data[y][x]\n\n def count_points(self, x, y):\n \"\"\"\n Counts how many points one can achieve from given position.\n Returns points achived\n :param x:\n :param y:\n :return: int\n \"\"\"\n prev_val = self.get_position(x, y)\n self.data[y][x] = 1\n points = 0\n points += self.__check_vertical_lines(x)\n points += self.__check_horizontal_lines(y)\n points += self.__check_diagonal_lines(x, y)\n self.data[y][x] = prev_val\n return points\n\n def __check_vertical_lines(self, x):\n \"\"\"\n Returns tuple(x, N) if found newly crossed vertical line.\n Remembers found line.\n If nothing found, returns False\n :return: tuple(x, N) of False\n \"\"\"\n if x in self.crossed_vertical_lines:\n return 0\n\n for i in range(self.size):\n if self.data[i][x] == 0:\n return 0\n return self.size\n\n def __check_horizontal_lines(self, y):\n \"\"\"\n Returns tuple(x, N) if found newly crossed vertical line.\n Remembers found line.\n If nothing found, returns False\n :return: tuple(x, N) of False\n \"\"\"\n if y in self.crossed_horizontal_lines:\n return 0\n\n for i in range(self.size):\n if self.data[y][i] == 0:\n return 0\n return self.size\n\n def __check_diagonal_lines(self, x, y):\n \"\"\"\n Counts how many points can be achieved from diagonal lines by taking move at (x, y)\n :param x:\n :param y:\n :return:\n \"\"\"\n points = 0\n points += self.__check_diagonal_left(x, y)\n points += self.__check_diagonal_right(x, y)\n return points\n\n def __check_diagonal_left(self, x, y):\n \"\"\"\n Counts how many points can be achieved by taking move at (x, y) from crossing diagonal left-to-right\n :param x:\n :param y:\n :return: int points\n \"\"\"\n x, y = self.__get_top_left_diagonal_coords(x, y)\n if (x, y) in self.crossed_diagonal_lines_top_left:\n return 0\n count = 0\n while x < self.size and y < self.size:\n count += 1\n if self.get_position(x, y) == 0:\n return 0\n x += 1\n y += 1\n if count < 2: # 2 is minimal amount of diagonal points\n return 0\n return count\n\n def __check_diagonal_right(self, x, y):\n \"\"\"\n Counts how many points can be achieved by taking move at (x, y) from crossing diagonal right-to-left\n :param x:\n :param y:\n :return: int points\n \"\"\"\n x, y = self.__get_top_right_diagonal_coords(x, y)\n if (x, y) in self.crossed_diagonal_lines_top_right:\n return 0\n count = 0\n while x >= 0 and y < self.size:\n count += 1\n if self.get_position(x, y) == 0:\n return 0\n x -= 1\n y += 1\n if count < 2: # 2 is minimal amount of diagonal points\n return 0\n return count\n\n def __get_top_left_diagonal_coords(self, x, y):\n while x > 0 and y > 0:\n x -= 1\n y -= 1\n return x, y\n\n def __get_top_right_diagonal_coords(self, x, y):\n while x < (self.size - 1) and y > 0:\n x += 1\n y -= 1\n return x, y\n\n def get_available_positions(self):\n \"\"\"\n Returns list with tuples (x, y) for each available (not crossed, 0 value) position on board\n :return:\n \"\"\"\n available_positions = []\n for y in range(self.size):\n for x in range(self.size):\n if self.get_position(x, y) == 0:\n available_positions.append((x, y))\n return available_positions\n\n def copy(self):\n \"\"\"\n Returns exact value copy of a Board instance\n :return:\n \"\"\"\n return Board(self.size,\n copy.deepcopy(self.data),\n copy.deepcopy(self.crossed_vertical_lines),\n copy.deepcopy(self.crossed_horizontal_lines),\n copy.deepcopy(self.crossed_diagonal_lines_top_left),\n copy.deepcopy(self.crossed_diagonal_lines_top_right),\n self.computer_points,\n self.player_points)\n","repo_name":"drugs-4-3/Stratego---AI-player","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":9586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5489893305","text":"\nfrom os import path\n\nimport re\n\n\nclass processmod():\n\n def __init__(self, path):\n # self.filapath = os.path.join(os.path.split(os.path.realpath(__file__))[0],path)\n self.filapath = os.path.join(os.path.dirname(__file__), path)\n print(\"路径创建成功\", self.filapath)\n\n def deletecn(self):\n with open(self.filapath, 'r', encoding='utf-8') as file:\n txt = file.read()\n newtxt = self.remove_punctuation(txt)\n with open(self.filapath, 'w', encoding='utf-8') as file:\n file.write(newtxt)\n\n def remove_punctuation(self, line):\n #\\u4e00-\\u9fa5 为汉字范围\n # rule = re.compile(ur\"[^a-zA-Z0-9\\u4e00-\\u9fa5]\")\n rule = re.compile('[\\u4e00-\\u9fa5]') # 匹配汉字\n line = rule.sub('', line) # 将汉字转换为空\n print(line)\n return line\n\nif __name__ == \"__main__\":\n a = processmod('a.txt')\n a.deletecn()\n","repo_name":"fudong1111/test","sub_path":"去除汉字.py","file_name":"去除汉字.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27711305422","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, request, redirect, url_for, send_from_directory\nfrom werkzeug import secure_filename\nimport json\nimport os\nimport time\n\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'\ndb = SQLAlchemy(app)\n\n# This is the path to the upload directory\napp.config['UPLOAD_FOLDER'] = 'uploads/'\n# These are the extension that we are accepting to be uploaded\napp.config['ALLOWED_EXTENSIONS'] = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\nclass Notes(db.Model):\n __tablename__ = 'notes'\n gcm_id = db.Column(db.String, primary_key=True)\n title = db.Column(db.String(100))\n tags = db.Column(db.String(100))\n\n def __init__(self,gcm_id,title,tags):\n # Initializes the fields with entered data\n self.gcm_id = gcm_id\n self.title = title\n self.tags = tags\n\nclass User(db.Model):\n __tablename__ = 'user4'\n gcm_id = db.Column(db.String, primary_key=True)\n name = db.Column(db.String(100))\n email = db.Column(db.String(100))\n tag_line = db.Column(db.String(200))\n dob = db.Column(db.String(200))\n address = db.Column(db.String(200))\n ins_type = db.Column(db.String(200))\n tag_name = db.Column(db.String(200))\n subjects = db.Column(db.String(200))\n\n def __init__(self,gcm_id,name,email,tag_line,address,dob,ins_type,ins_name,subjects):\n # Initializes the fields with entered data\n self.gcm_id = gcm_id\n self.name = name\n self.email = email\n self.address = address\n self.dob = dob\n self.ins_type = ins_type\n self.ins_name = ins_name\n self.subjects = subjects\n\ndef db_init():\n db.create_all()\n comment = User(\"a1\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\")\n db.session.add(comment)\n db.session.commit()\n\n@app.route('/view_all')\ndef foo():\n arr = []\n for u in db.session.query(User).all():\n arr.append(str(u.__dict__))\n return str(arr)\n\n@app.route('/add')\ndef add_user():\n gcm_id = request.args.get('gcm_id')\n name = request.args.get('name') or ''\n email = request.args.get('email') or 'aaasd'\n tag_line = request.args.get('tag_line') or ''\n address = request.args.get('address') or ''\n dob = request.args.get('dob') or ''\n ins_type = request.args.get('ins_type') or ''\n ins_name = request.args.get('ins_name') or ''\n subjects = request.args.get('subjects') or ''\n\n user_object = User(gcm_id,name,email,tag_line,address,dob,ins_type,ins_name,subjects)\n db.session.add(user_object)\n db.session.commit() \n return \"{'status':200,\\n\\t'response':'User has been added'}\"\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']\n\n@app.route('/test-upload')\ndef upload_test():\n return render_template('upload.html')\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n gcm_id = request.args.get('gcm_id')\n title = request.args.get('title')\n tags = request.args.get('tags') or 'food,icecream,biology'\n\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n filename = \"%d_%s\"%(int(time.time()),filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n img_path = \"http://tosc.in:5002/uploads/%s\"%filename\n\n notes_object = User(gcm_id,title,tags)\n db.session.add(notes_object)\n db.session.commit() \n return \"{'status':200,\\n\\t'response':'%s'}\"%img_path\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n\nif __name__ == '__main__':\n #db_init()\n app.debug = True\n app.run(host='0.0.0.0',port=5002)\n","repo_name":"teamOSC/stud.in_server_backend","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"519273579","text":"from Acquisition import aq_inner\nfrom Acquisition import aq_parent\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.interfaces.siteroot import IPloneSiteRoot\nfrom ftw.subsite.interfaces import ILanguages\nfrom ftw.subsite.interfaces import ISubsite\nfrom zope.component import adapter\nfrom zope.component import adapts\nfrom zope.component import getMultiAdapter\nfrom zope.interface import Interface\nfrom zope.interface import implementer\nfrom zope.interface import implements\n\n\ndef translate_language(context, language_code):\n ltool = getToolByName(context, 'portal_languages')\n info = ltool.getAvailableLanguageInformation().get(language_code, None)\n if info is not None:\n return info.get(u'native', None)\n return None\n\n\n@implementer(ILanguages)\n@adapter(Interface, Interface)\ndef inherit_languages(context, request):\n parent = aq_parent(aq_inner(context))\n return getMultiAdapter((parent, request), ILanguages)\n\n\nclass SubsiteLanguages(object):\n implements(ILanguages)\n adapts(ISubsite, Interface)\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n def get_current_language(self):\n language_code = self.context.force_language\n return {'url': self.context.absolute_url(),\n 'title': translate_language(self.context, language_code),\n 'code': language_code}\n\n def get_related_languages(self):\n results = []\n\n for relation in self.context.language_references:\n subsite = relation.to_object\n lang_code = subsite.force_language\n if not lang_code:\n continue\n\n results.append({\n 'url': subsite.absolute_url(),\n 'title': translate_language(self.context, lang_code),\n 'code': lang_code})\n\n if self.context.link_site_in_languagechooser:\n portal_url = getToolByName(self.context, 'portal_url')\n ltool = getToolByName(self.context, 'portal_languages')\n lang_code = ltool.getDefaultLanguage()\n\n results.append({\n 'url': portal_url(),\n 'title': translate_language(self.context, lang_code),\n 'code': lang_code})\n\n results.sort(key=lambda item: item.get('title'))\n return results\n\n\nclass PloneSiteLanguages(object):\n implements(ILanguages)\n adapts(IPloneSiteRoot, Interface)\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n def get_current_language(self):\n ltool = getToolByName(self.context, 'portal_languages')\n lang_code = ltool.getDefaultLanguage()\n return {'url': self.context.absolute_url(),\n 'title': translate_language(self.context, lang_code),\n 'code': lang_code}\n\n def get_related_languages(self):\n catalog = getToolByName(self.context, 'portal_catalog')\n results = []\n\n for brain in catalog(object_provides=ISubsite.__identifier__):\n subsite = brain.getObject()\n if not subsite.link_site_in_languagechooser:\n continue\n\n lang_code = subsite.force_language\n if not lang_code:\n continue\n\n results.append({\n 'url': subsite.absolute_url(),\n 'title': translate_language(self.context, lang_code),\n 'code': lang_code})\n\n results.sort(key=lambda item: item.get('title'))\n return results\n","repo_name":"4teamwork/ftw.subsite","sub_path":"ftw/subsite/languages.py","file_name":"languages.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"20223493989","text":"import math\n\ndef init_method_param(method, data=None, k=None, cp=None):\n \"\"\"\n Get method parameters based on approximate nearest neighbor algorithm and data (shape, vector)\n\n Input: name of method as string, data as np.array ([num_items][vector_length])\n Output: method_param as directory. Keys corresponding to the method-specific parameters\n \"\"\"\n\n #if customp\n\n if method == \"annoy\":\n n_items, vector_length = data.shape\n mp = {}\n\n if cp['search_k']:\n mp[\"search_k\"] = cp['search_k']\n else:\n mp[\"search_k\"] = -1\n #print(\"n_trees:\" + str(cp['n_trees']))\n #print(type(cp['n_trees']))\n if cp['n_trees']:\n mp[\"n_trees\"] = int(cp['n_trees'])\n else:\n mp[\"n_trees\"] = 5 + int(round((n_items) ** 0.5 / 20))\n\n elif method == \"nearpy\":\n n_items, vector_length = data.shape\n mp = {}\n if cp['n_bits']:\n mp['n_bits'] = cp['n_bits']\n else:\n mp['n_bits'] = min(vector_length, 20)\n #mp['n_bits'] = 20\n\n if cp['hash_counts']:\n mp['hash_counts'] = cp['hash_counts']\n else:\n mp['hash_counts'] = min(vector_length, 20)\n #mp['hash_counts'] = 20\n\n elif method == \"hnsw\":\n mp = {}\n if cp['M']:\n # Reasonable value between 5-100\n M = cp['M']\n else:\n M = int(k / 1.8)\n # find auto-values depending on n_items & dim\n\n if cp['hnsw_efConstruction']:\n # Reasonable value between 100-2000\n efC = cp['hnsw_efConstruction']\n else:\n efC = max(k * 0.8, 100)\n\n if cp['post']:\n #postprocessing:\n #0 = no, 1 = some, 2 = more\n post = cp['post']\n else:\n post = 0\n\n mp[\"index_param\"] = {'M': M, 'efConstruction': efC, 'post' : post}\n\n if cp['hnsw_efSearch']:\n # Reasonable value between 100-2000\n efS = cp['hnsw_efSearch']\n else:\n efS = max(k * 0.8, 100)\n mp['query_param'] = {'efSearch': efS}\n\n elif method == \"sw-graph\":\n mp = {}\n if cp['NN']:\n # Reasonable value between 5-100\n NN = cp['NN']\n else:\n NN = int(k / 1.8)\n\n if cp['swg_efConstruction']:\n # Reasonable value between 100-2000\n efC = cp['swg_efConstruction']\n else:\n efC = max(k * 0.8, 100)\n\n mp[\"index_param\"] = {'NN': NN, 'efConstruction': efC}\n\n if cp['swg_efSearch']:\n # Reasonable value between 100-2000\n efS = cp['swg_efSearch']\n else:\n efS = max(k * 0.8, 100)\n mp['query_param'] = {'efSearch': efS}\n\n elif method == \"napp\":\n n_items, vector_length = data.shape\n mp = {}\n\n if cp['numPivot']:\n numPivot = cp['numPivot']\n else:\n numPivot = int(math.sqrt(n_items))\n\n if cp['numPivotIndex']:\n numPivotIndex = cp['numPivotIndex']\n else:\n numPivotIndex = int(math.sqrt(n_items))\n\n if cp['hashTrickDim']:\n hashTrickDim = cp['hashTrickDim']\n mp[\"index_param\"] = {'numPivot':numPivot,\n 'numPivotIndex':numPivotIndex, 'hashTrickDim': hashTrickDim}\n else:\n hashTrickDim = None\n mp[\"index_param\"] = {'numPivot':numPivot,\n 'numPivotIndex':numPivotIndex}\n\n #mp['query_param'] = None\n\n else:\n print(\"Error: b not found in method parameter file\")\n return\n\n return mp\n","repo_name":"cstelmach/multi-ann-tsne-widget","sub_path":"bakk/additional_files/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"27480578814","text":"# def factorial(n):\n# if n == 0:\n# return 1\n# else:\n# return n * factorial(n-1)\n \n \n# def fibonacci(n):\n# if n == 0:\n# return 0\n# elif n == 1:\n# return 1\n# else:\n# return fibonacci(n-1) + fibonacci(n-2)\n \n \n#give the sum of all the numbers in the list\n\n# def sum_list(l): \n\n## Binary Search using recursion\n\n# To find whether k is present in array arr\ndef binary_search(arr, left, right, k):\n if(left>right):\n return -1 # k is not present in the array\n else:\n mid = int((left+right)/2)\n if(arr[mid]==k):\n return mid\n elif(k>arr[mid]):\n # Element is present in the right half of the array\n return binary_search(arr, mid+1, right, k)\n else:\n # Element is present in the left half of the array\n return binary_search(arr, left, mid-1, k)\n\n# print(binary_search([1,2,3,4,5,6,7,8,9,10], 0, 9, 10))\n\n\n \n#give the sum of all the numbers in the list\n\ndef sum_list(l):\n if len(l)==0:\n return 0\n elif len(l)==1:\n return l[0]\n else:\n return l[0] + sum_list(l[1:])\n \n \nprint(sum_list([1,2,3,4,5,6,7,8,9,10]))\n","repo_name":"bjain8751/ADH_ES113","sub_path":"31jan.py","file_name":"31jan.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"28078624856","text":"from typing import Dict, Any, Optional\n\nfrom atcodertools.codegen.code_style_config import CodeStyleConfig\nfrom atcodertools.codegen.models.code_gen_args import CodeGenArgs\nfrom atcodertools.codegen.template_engine import render\nfrom atcodertools.fmtprediction.models.format import Pattern, SingularPattern, ParallelPattern, TwoDimensionalPattern, \\\n Format\nfrom atcodertools.fmtprediction.models.type import Type\nfrom atcodertools.fmtprediction.models.variable import Variable\n\n\ndef _loop_header(var: Variable, for_second_index: bool):\n if for_second_index:\n index = var.second_index\n loop_var = \"j\"\n else:\n index = var.first_index\n loop_var = \"i\"\n\n return \"for(int {loop_var} = 0 ; {loop_var} < {length} ; {loop_var}++){{\".format(\n loop_var=loop_var, length=index.get_length())\n\n\nclass CppCodeGenerator:\n\n def __init__(self, format_: Optional[Format[Variable]],\n config: CodeStyleConfig):\n self._format = format_\n self._config = config\n\n def generate_parameters(self) -> Dict[str, Any]:\n if self._format is None:\n return dict(prediction_success=False)\n\n return dict(formal_arguments=self._formal_arguments(),\n actual_arguments=self._actual_arguments(),\n input_part=self._input_part(),\n prediction_success=True)\n\n def _input_part(self):\n lines = []\n for pattern in self._format.sequence:\n lines += self._render_pattern(pattern)\n return \"\\n{indent}\".format(indent=self._indent(1)).join(lines)\n\n def _convert_type(self, type_: Type) -> str:\n if type_ == Type.float:\n return \"long double\"\n elif type_ == Type.int:\n return \"long long\"\n elif type_ == Type.str:\n return \"std::string\"\n else:\n raise NotImplementedError\n\n def _get_declaration_type(self, var: Variable):\n ctype = self._convert_type(var.type)\n for _ in range(var.dim_num()):\n ctype = 'std::vector<{}>'.format(ctype)\n return ctype\n\n def _actual_arguments(self) -> str:\n \"\"\"\n :return the string form of actual arguments e.g. \"N, K, a\"\n \"\"\"\n return \", \".join([\n v.name if v.dim_num() == 0 else 'std::move({})'.format(v.name)\n for v in self._format.all_vars()\n ])\n\n def _formal_arguments(self):\n \"\"\"\n :return the string form of formal arguments e.g. \"int N, int K, std::vector a\"\n \"\"\"\n return \", \".join([\n \"{decl_type} {name}\".format(\n decl_type=self._get_declaration_type(v), name=v.name)\n for v in self._format.all_vars()\n ])\n\n def _generate_declaration(self, var: Variable):\n \"\"\"\n :return: Create declaration part E.g. array[1..n] -> std::vector array = std::vector(n-1+1);\n \"\"\"\n if var.dim_num() == 0:\n dims = []\n elif var.dim_num() == 1:\n dims = [var.first_index.get_length()]\n elif var.dim_num() == 2:\n dims = [\n var.first_index.get_length(),\n var.second_index.get_length()\n ]\n else:\n raise NotImplementedError\n\n if len(dims) == 0:\n ctor = ''\n elif len(dims) == 1:\n ctor = '({})'.format(dims[0])\n else:\n ctor = '({})'.format(dims[-1])\n ctype = self._convert_type(var.type)\n for dim in dims[-2::-1]:\n ctype = 'std::vector<{}>'.format(ctype)\n ctor = '({}, {}{})'.format(dim, ctype, ctor)\n\n line = \"{decl_type} {name}{constructor};\".format(\n name=var.name,\n decl_type=self._get_declaration_type(var),\n constructor=ctor)\n return line\n\n def _input_code_for_var(self, var: Variable) -> str:\n name = self._get_var_name(var)\n if var.type == Type.float:\n return 'scanf(\"%Lf\",&{name});'.format(name=name)\n elif var.type == Type.int:\n return 'scanf(\"%lld\",&{name});'.format(name=name)\n elif var.type == Type.str:\n return 'std::cin >> {name};'.format(name=name)\n else:\n raise NotImplementedError\n\n @staticmethod\n def _get_var_name(var: Variable):\n name = var.name\n if var.dim_num() >= 1:\n name += \"[i]\"\n if var.dim_num() >= 2:\n name += \"[j]\"\n return name\n\n def _render_pattern(self, pattern: Pattern):\n lines = []\n for var in pattern.all_vars():\n lines.append(self._generate_declaration(var))\n\n representative_var = pattern.all_vars()[0]\n if isinstance(pattern, SingularPattern):\n lines.append(self._input_code_for_var(representative_var))\n elif isinstance(pattern, ParallelPattern):\n lines.append(_loop_header(representative_var, False))\n for var in pattern.all_vars():\n lines.append(\"{indent}{line}\".format(\n indent=self._indent(1),\n line=self._input_code_for_var(var)))\n lines.append(\"}\")\n elif isinstance(pattern, TwoDimensionalPattern):\n lines.append(_loop_header(representative_var, False))\n lines.append(\"{indent}{line}\".format(indent=self._indent(1),\n line=_loop_header(\n representative_var,\n True)))\n for var in pattern.all_vars():\n lines.append(\"{indent}{line}\".format(\n indent=self._indent(2),\n line=self._input_code_for_var(var)))\n lines.append(\"{indent}}}\".format(indent=self._indent(1)))\n lines.append(\"}\")\n else:\n raise NotImplementedError\n\n return lines\n\n def _indent(self, depth):\n return self._config.indent(depth)\n\n\nclass NoPredictionResultGiven(Exception):\n pass\n\n\ndef main(args: CodeGenArgs) -> str:\n code_parameters = CppCodeGenerator(args.format,\n args.config).generate_parameters()\n return render(args.template,\n mod=args.constants.mod,\n yes_str=args.constants.yes_str,\n no_str=args.constants.no_str,\n **code_parameters)\n","repo_name":"whitepaperdog/wangjunrui-s-code","sub_path":"custom_code_generator.py","file_name":"custom_code_generator.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"15839988352","text":"\"\"\"\nRuns Default tests\nAvailable Modules: http://testinfra.readthedocs.io/en/latest/modules.html\n\n\"\"\"\nimport os\nimport testinfra.utils.ansible_runner\n\nTESTINFRA_HOSTS = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_kibana_is_installed(host):\n \"\"\"\n Tests that kibana is installed\n \"\"\"\n kibana_package = \"kibana\"\n\n kibana = host.package(kibana_package)\n assert kibana.is_installed\n\n\ndef test_kibana_running_and_enabled(host):\n \"\"\"\n Tests that kibana is running and enabled\n \"\"\"\n kibana = host.service(\"kibana\")\n assert kibana.is_running\n assert kibana.is_enabled\n","repo_name":"joshuacherry/ansible-role-kibana","sub_path":"molecule/default/tests/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70260417460","text":"import webapp2\nimport jinja2\nimport os\nfrom collections import namedtuple\nimport datagen as dg\nimport StringIO\nimport json\n\n\njinja_environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render())\n\nclass Download(webapp2.RequestHandler):\n def get(self):\n patterns = self.request.get('numberOfRecords', '')\n n_nodes = self.request.get('numberOfNodes', '')\n n_events = self.request.get('numberOfEvents', '')\n n_years = self.request.get('numberOfYears', '')\n stddev = self.request.get('stddev', 0.0)\n copies = self.request.get('copies', 1)\n events = json.loads(self.request.get('events'))\n\n patterns = int(patterns) if patterns.isdigit() else None\n n_nodes = int(n_nodes) if n_nodes.isdigit() else None\n n_events = int(n_events) if n_events.isdigit() else None\n n_years = int(n_years) if n_years.isdigit() else None\n stddev = float(stddev) if stddev.isdigit() else None\n copies = int(copies) if copies.isdigit() else None\n events = self._prepare_events(events)\n\n Args = namedtuple('DataGenArgs', 'num_of_patterns num_of_events num_of_nodes num_of_years only_points stddev copies events')\n args = Args(num_of_patterns=patterns, num_of_events=n_events, num_of_nodes=n_nodes,\n num_of_years=n_years, only_points=False, stddev=stddev, copies=copies,\n events=events)\n\n\n result = StringIO.StringIO()\n dg.run(args, result)\n\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(result.getvalue())\n\n def _prepare_events(self, events):\n for event in events:\n event_class = event['class']\n if event_class == 'Milestone':\n event['class'] = dg.Milestone\n elif event_class == 'Interval':\n event['class'] = dg.Interval\n else:\n raise Exception('Unespected class %s' %event_class)\n return events\n\napp = webapp2.WSGIApplication([('/', MainPage),\n ('/download', Download)],\n debug=True)\n","repo_name":"crispamares/eventdatagenerator","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33411407353","text":"# importing modules\nimport sys\nsys.path.append('/usr/local/lib/python3.6/dist-packages')\n\nimport face_recognition\nimport cv2\nimport numpy as np\nimport os\n\n# Folder in which image is saved is considered to be label\n\nDATA_DIR = 'data'\nTRAINING_DIR = 'training'\nTESTING_DIR = 'testing'\nTRAINING_PATH = os.path.join(DATA_DIR, TRAINING_DIR)\nTESTING_PATH = os.path.join(DATA_DIR, TESTING_DIR)\n\nnames=[]\nencodings=[]\n\n\n# TRAINING NAMES AND ENCODING ASSOCIATION USING TRANSFER LEARNING\nprint(\"Beginning Training\")\nfor filename in os.listdir(TRAINING_PATH):\n label = filename[:-4]\n FILE_PATH = os.path.join(TRAINING_PATH, filename)\n image_file = face_recognition.load_image_file(FILE_PATH)\n face_encoding = face_recognition.face_encodings(image_file)[0]\n names.append(label)\n encodings.append(face_encoding)\n\nprint(\"Training Complete. Starting Testing.\")\n\ncorrect_answers=0\ntotal_answers=0\nmodel_answer = []\n\n# TESTING RESULTS\nfor each_folder in os.listdir(TESTING_PATH):\n target = each_folder\n TARGET_DIR = os.path.join(TESTING_PATH,each_folder)\n for each_file in os.listdir(TARGET_DIR):\n INPUT_PATH = os.path.join(TARGET_DIR,each_file)\n input_image = face_recognition.load_image_file(INPUT_PATH)\n output_encoding = face_recognition.face_encodings(input_image)[0]\n #calculating nearest distance among all known encodings\n face_distances = face_recognition.face_distance(encodings, output_encoding)\n matches = face_recognition.compare_faces(encodings, output_encoding)\n best_match_index = np.argmin(face_distances)\n answer = \"Unknown\"\n if matches[best_match_index]:\n answer = names[best_match_index]\n total_answers += 1\n if answer == target:\n correct_answers +=1\n print(\"Answer: \" + answer + \" \"*(30-len(answer)) + \"Target: \" + target)\n\nprint(\"Accuracy: \" + str(correct_answers) + \"/\" + str(total_answers))\n\n","repo_name":"sahil3Vedi/FaceID-Digiledge","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41815101789","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def sortList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n def dfs(_head):\n if _head is None:\n return; \n \n linkedList.append(_head.val)\n dfs(_head.next); \n \n linkedList = [];\n dfs(head)\n linkedList.sort(reverse=True);\n \n prev : ListNode = None\n node = ListNode(''); \n for linked in linkedList:\n node = ListNode(linked);\n node.next = prev; \n prev = node; \n \n return node\n ","repo_name":"YoujeongPark/AlgorithmStudy","sub_path":"LeetCode/148. Sort List.py","file_name":"148. Sort List.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27882908529","text":"# Define a function that can accept two strings as input and concatenate them and then print it in console.\r\n\r\ndef sentence(a, b):\r\n return f\"{a} {b}\"\r\n\r\nexit = 'n'\r\nwhile exit == 'n':\r\n words = input(\"Enter two words separate by whitespace: \").split(' ')\r\n \r\n a, b = words\r\n print(a)\r\n print(b)\r\n \r\n print(sentence(a, b))\r\n \r\n exit = input(\"Exit (y/n): \")\r\n ","repo_name":"gdpp/python_100_exercises","sub_path":"day5/exercise_29.py","file_name":"exercise_29.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41758802776","text":"fruit={}\nstringA = input()\nwhile stringA!=\"None\":\n lst = stringA.split() #列表被更新\n if lst[0] not in fruit: #键\n fruit[lst[0]]=[1,eval(lst[1])] #值是列表的形式\n else:\n fruit[lst[0]][0]+=1 #购买次数+1\n fruit[lst[0]][1]+=eval(lst[1]) #购买金额+1 \n stringA = input()\n# lst1=sorted(fruit.items())\nprint(sorted(fruit.items))\nlst1 = sorted(fruit ,key = lambda x:(-x[0][0],x[0][1])) \ndic = dict(lst1)\nfor k in dic:\n print(k,dic[k][0],dic[k][1])","repo_name":"troyxxf/pythonAssistant","sub_path":"陈柯燃水果.py","file_name":"陈柯燃水果.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31242460352","text":"import random\nimport threading\nimport time\n\nfrom environment.environment import Environment\nfrom environment.serverside.competition import Competition\nimport socket\n\nfrom environment.serverside.player import NetworkPlayer\n\n\nclass Server(object):\n def __init__(self, listen_addr, competition_config_path, env: Environment):\n print(\"Starting server\")\n self.comp = Competition(competition_config_path)\n self.ip, self.port = listen_addr\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind(listen_addr)\n self.socket.listen(1)\n self.connected_players = dict()\n self.connected_lock = threading.Lock()\n self.searching = list()\n self.searching_lock = threading.Lock()\n self.comp_lock = threading.Lock()\n self.active_games = dict()\n self.env = env\n self.queue_manager = threading.Thread(target=self.game_queue_manager)\n self.running = True\n\n def run(self):\n self.queue_manager.start()\n try:\n print(\"Waiting for connections\")\n while True:\n connection, client_address = self.socket.accept()\n\n command = connection.recv(4096)\n command = command.decode(\"utf-8\")\n\n if command.startswith('GET'):\n self.handle_http(connection)\n elif command.startswith(\"CONNECT\"):\n self.handle_connect(connection, command)\n else:\n print(\"Received invalid command: \", command)\n connection.close()\n\n except KeyboardInterrupt:\n print(\"Closing Server!\")\n self.running = False\n self.comp.exit()\n self.socket.close()\n exit()\n\n def handle_http(self, conn):\n s = \"\"\n for id, score in self.comp.get_leaderboard():\n s += \"%s: %.2f
      \\n\"%(id, score)\n html = \"

      Players and scores:

      %s\"%(s,)\n conn.sendall((\"HTTP/1.1 200 OK\\nContent-Type: text/html\\ncontent-length: %d\\n\\n\"%(len(html,)) + html).encode('utf-8'))\n conn.close()\n\n def handle_connect(self, conn, message):\n message = message.split(\"\\n\")[0]\n split = message.split(\" \")\n id = split[1]\n print(\"Player \", id, \" connected\")\n with self.connected_lock:\n self.comp.register_player(id)\n self.connected_players[id] = NetworkPlayer(id, conn, self)\n with self.searching_lock:\n self.searching.append(id)\n\n def game_queue_manager(self):\n while self.running:\n with self.searching_lock:\n with self.comp_lock:\n while len(self.searching) >= 2:\n p1 = random.choice(self.searching)\n #print(\"Matchmaking with \", p1)\n\n p2 = self.comp.find_best_match(p1, self.searching)\n if p1 not in self.connected_players:\n print(\"Removing disconnected player from queue: \", p1)\n self.searching.remove(p1)\n break\n if p2 not in self.connected_players:\n print(\"Removing disconnected player from queue: \", p2)\n self.searching.remove(p2)\n break\n #print(self.searching, p1, p2)\n self.searching.remove(p1)\n self.searching.remove(p2)\n\n for p in [self.connected_players[p1], self.connected_players[p2]]:\n p.game_start()\n\n #print(self.connected_players)\n game = self.env.start_new_game([self.connected_players[p1], self.connected_players[p2]], self)\n self.active_games[game] = threading.Thread(target=game.play_game)\n self.active_games[game].start()\n time.sleep(0.001)\n\n def end_game(self, winners, losers, game):\n winner_ids, loser_ids = list([p.id for p in winners]), list([p.id for p in losers])\n #print(\"Game end in server: \", winner_ids, loser_ids)\n for p in winners + losers:\n p.game_end(winner_ids, loser_ids)\n\n with self.comp_lock:\n self.comp.register_win(winner_ids, loser_ids)\n del self.active_games[game]\n del game\n\n with self.searching_lock:\n self.searching += list([p.id for p in winners]) + list([p.id for p in losers])\n #print(\"Game closed on Server\")\n\n def end_game_draw(self, players, game):\n\n pids = list([p.id for p in players])\n for p in players:\n p.game_end([], pids)\n\n with self.comp_lock:\n self.comp.register_draw(pids)\n if game in self.active_games:\n del self.active_games[game]\n del game\n\n with self.searching_lock:\n self.searching += pids\n #print(\"Game closed on Server\")\n\n def deregister_player(self, p: NetworkPlayer):\n print(\"Player %s disconnected\" % p.id)\n with self.connected_lock and self.comp_lock and self.searching_lock:\n del self.connected_players[p.id]\n if p in self.searching:\n self.searching.remove(p)\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n s = Server(('localhost', 1337), \"leaderboard.json\", Environment())\n s.run()","repo_name":"Gerryflap/RL_net","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34044848487","text":"\"\"\"\n프로그래머스 - 하노이의 탑\n\"\"\"\n\n\ndef move(start, end):\n return [start, end]\n\n\ndef hanoi(n, start, end, sub, answer):\n if n == 1:\n answer.append(move(start, end))\n return\n \n hanoi(n - 1, start, sub, end, answer)\n answer.append(move(start, end))\n hanoi(n - 1, sub, end, start, answer)\n\n\ndef solution(n):\n answer = []\n hanoi(n, 1, 3, 2, answer)\n\n return answer\n\n\nif __name__ == \"__main__\":\n n = 5\n print(solution(n))\n","repo_name":"jonusHK/algorithm_data_structure","sub_path":"recursive/recursive_7.py","file_name":"recursive_7.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6760848302","text":"import sys\nsys.path.append(\"../../crawlers\")\n\nfrom lxml import html, etree\nimport requests, re\nimport pandas as pd\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nendpoint = \"https://edemann.dk/liste-danske-postnumre-og-byer\"\n\ndef collect(url):\n page = requests.get(url)\n tree = html.fromstring(page.content)\n main = tree.xpath(\"//*/tbody/tr\")\n data = pd.DataFrame([])\n for i in range(1, len(main)):\n base = \"//*/tbody/tr[%s]\" % str(i)\n zipcode = tree.xpath(base + \"/td[1]/text()\")[0]\n city = tree.xpath(base + \"/td[2]/text()\")[0]\n data = data.append(pd.DataFrame({'zipcode': zipcode, 'city': city}, index=[0]), ignore_index=True)\n data.to_csv('zipcodes.csv', sep=';')\n\nif __name__ == '__main__':\n collect(endpoint)\n","repo_name":"adamingwersen/AutoEncoderZone","sub_path":"datatools/crawlers/zipcodes/get_zipcodes.py","file_name":"get_zipcodes.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"36047074727","text":"import boto3\nfrom aws_lambda_powertools import Logger\n\nfrom inspector.utils.flatten import flatten\n\nlogger = Logger()\n\ndef verify_budgets_exist(client, region, account_id):\n results = []\n\n paginator = client.get_paginator(\"describe_budget_notifications_for_account\")\n\n notifications_for_account = flatten([\n result.get(\"BudgetNotificationsForAccount\", [])\n for result in paginator.paginate(AccountId=account_id)\n ])\n\n account_has_notification_for_forecast = False\n account_has_notification_for_actual = False\n\n for notifications in notifications_for_account:\n for notification in notifications.get(\"Notifications\", []):\n if notification[\"NotificationType\"] == \"ACTUAL\":\n account_has_notification_for_actual = True\n elif notification[\"NotificationType\"] == \"FORECASTED\":\n account_has_notification_for_forecast = True\n\n if not account_has_notification_for_forecast:\n results.append({\n \"rule_name\": \"forecast_budget_with_notification\",\n \"report\": {\n \"message\": \"No forecast budget with an notification exists\",\n \"remedy\": \"Create a forecast budget with an associated notification\",\n \"resource_id\": account_id,\n \"region\": region\n }\n })\n\n if not account_has_notification_for_actual:\n results.append({\n \"rule_name\": \"actual_budget_with_notification\",\n \"report\": {\n \"message\": \"No actual budget with an notification exists\",\n \"remedy\": \"Create a actual budget with an associated notification\",\n \"resource_id\": account_id,\n \"region\": region\n }\n })\n\n return results\n\ndef inspect(credentials, region):\n logger.info(f\"inspecting budget resources in {region}\")\n\n results = []\n\n client = boto3.client(\n \"budgets\",\n region_name=region,\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"]\n )\n\n account_id = boto3.client(\n \"sts\",\n region_name=region,\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"]\n ).get_caller_identity().get(\"Account\")\n\n results.extend(verify_budgets_exist(client, region, account_id))\n\n return results\n","repo_name":"AlexChesters/horatio","sub_path":"apps/inspector/inspector/services/budgets.py","file_name":"budgets.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72953942259","text":"# Imports\n\nfrom flask import render_template, request, g, redirect, url_for, session\nfrom flask_migrate import Migrate\nfrom flask_oidc import OpenIDConnect\nfrom okta import UsersClient\nimport os\nfrom werkzeug.utils import secure_filename\nfrom models.models import *\nfrom commands import *\nfrom utils import *\n\nmigrate = Migrate(app, db)\n\n\napp.config[\"OIDC_CLIENT_SECRETS\"] = \"client_secrets.json\"\napp.config[\"OIDC_COOKIE_SECURE\"] = False\napp.config[\"OIDC_CALLBACK_ROUTE\"] = \"/oidc/callback\"\napp.config[\"OIDC_SCOPES\"] = [\"openid\", \"email\", \"profile\"]\napp.config[\"SECRET_KEY\"] = \"yfrhkjdvknkjxhvbxhjfdgkdfjgk\"\noidc = OpenIDConnect(app)\n\nokta_client = UsersClient(\"https://dev-664783.oktapreview.com\", \"00pIz5wsJSahcbI1dp-czYIeocQdBw2ffUIYMjE7lP\")\n\nUPLOAD_FOLDER = './static/uploads'\nRETRIEVE_FOLDER = '/static/uploads'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@app.before_request\ndef before_request():\n if oidc.user_loggedin:\n g.user = okta_client.get_user(oidc.user_getfield(\"sub\"))\n session['role_name'] = get_logged_in_user_role(g)\n else:\n g.user = None\n\n\n@app.route(\"/login\")\n@oidc.require_login\ndef login():\n return redirect(url_for(\".profile\"))\n\n\n@app.route(\"/logout\")\ndef logout():\n oidc.logout()\n return redirect(url_for(\".index\"))\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n# renders the profile page if the user is logged in.\n@app.route(\"/profile\", methods=['GET', 'POST'])\n@oidc.require_login\ndef profile():\n states = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n if request.method == 'POST':\n edit_existing_user(request.form)\n user_details = get_logged_in_user_details(g)\n return render_template(\"profile.html\", user=user_details, states=states, success='true')\n user_details = get_logged_in_user_details(g)\n return render_template(\"profile.html\", user=user_details, states=states, success='false')\n\n\n# renders the dashboard page if the user is logged in.\n@app.route(\"/dashboard\")\n@oidc.require_login\ndef dashboard():\n uploaded_files = get_uploaded_files()\n return render_template(\"dashboard.html\", uploads=uploaded_files)\n\n\n# renders the search page if the user is logged in.\n# handles search functionality in post method\n@app.route(\"/search\", methods=['GET', 'POST'])\n@oidc.require_login\ndef search():\n if request.method == 'POST':\n first_name = request.form['input_first_name']\n last_name = request.form['input_last_name']\n users = get_users_by_filter(first_name, last_name)\n return render_template(\"search.html\", users=users)\n users = get_all_users()\n return render_template(\"search.html\", users=users)\n\n\n# renders the upload page if the user is logged in.\n# handles upload functionality in post method\n@app.route(\"/upload\", methods=['GET', 'POST'])\n@oidc.require_login\ndef upload():\n if session['role_name'] != 'admin':\n return render_template(\"error.html\")\n if request.method == 'POST':\n if 'file' not in request.files:\n text = request.form['input_upload_text']\n add_new_file('text', '', '', text)\n return render_template(\"upload.html\", success='text_true')\n file = request.files['file']\n ext = file.filename.rsplit('.', 1)[1].lower()\n name = file.filename.rsplit('.', 0)[0]\n file_path = RETRIEVE_FOLDER + '/' + file.filename\n\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n add_new_file(ext, name, file_path, '')\n return render_template(\"upload.html\", success='upload_true')\n else:\n return render_template(\"upload.html\")\n\n\n# change role of an employee to manager\n@app.route('/assign_manager_role', methods=['POST'])\n@oidc.require_login\ndef assign_manager_role():\n email = request.get_data().decode('utf-8')\n change_manager_role(email)\n users = get_all_users()\n return render_template(\"search.html\", users=users)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=80, ssl_context=('cert.pem', 'key.pem'))\n","repo_name":"RJEMS/RJEMS-Enterprise","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"16160125245","text":"from abc import ABC, abstractmethod\nfrom typing import Any\nfrom hashlib import sha256\nimport math\n\nfrom defichain.exceptions.transactions import RawTransactionError, NotYetSupportedError, DeserializeError\nfrom defichain.networks import Network\nfrom defichain.transactions.address import Address\nfrom defichain.transactions.keys import PrivateKey, KeyError, PublicKey\nfrom defichain.transactions.utils import Converter, Calculate\nfrom defichain.transactions.constants import SIGHASH, AddressTypes\n\nfrom .txbase import TxBase\nfrom .txinput import TxBaseInput, TxInput, TxP2PKHInput, TxP2SHInput, TxP2WPKHInput, TxCoinbaseInput\nfrom .txoutput import TxBaseOutput, TxOutput, TxAddressOutput, TxDataOutput, TxDefiOutput, TxCoinbaseOutput\nfrom .witness import WitnessHash, Witness\nfrom .sign import sign_segwit_input, sign_legacy_input\n\n\nclass BaseTransaction(TxBase, ABC):\n\n def __init__(self, inputs: [], outputs: [], lockTime: int = 0):\n self._version, self._marker, self._flag, self._sigHash, self._inputs, self._outputs, self._lockTime = None, None, None, None, [], [], None\n self._signed = False\n self._coinbase = False\n self._segwit = False\n self.set_inputs(inputs)\n self.set_outputs(outputs)\n self.set_lockTime(lockTime)\n\n # Abstract Methods\n @abstractmethod\n def sign(self, network: Any, private_keys: [str]) -> \"Transaction\":\n pass\n\n @abstractmethod\n def _analyse(self):\n pass\n\n def unsigned(self) -> str:\n return Converter.bytes_to_hex(self.bytes_unsigned())\n\n def bytes_unsigned(self) -> bytes:\n # Version\n result = self.get_bytes_version()\n\n # Inputs\n result += Calculate.write_compactSize(len(self.get_inputs()), \"bytes\")\n for input in self.get_inputs():\n result += input.get_bytes_unsignedInput()\n\n # Outputs\n result += Calculate.write_compactSize(len(self.get_outputs()), \"bytes\")\n for output in self.get_outputs():\n result += output.bytes()\n\n # Coinbase Add On\n if self.is_coinbase():\n result += Converter.int_to_bytes(1, 1) # Number of Elements\n result += Converter.int_to_bytes(32, 1) # Length of Zeros\n result += Converter.int_to_bytes(0, 32) # Zeros\n\n # LockTime\n result += self.get_bytes_lockTime()\n return result\n\n # Calculated Information\n def get_inputsValue(self) -> \"int | None\":\n result = 0\n for input in self.get_inputs():\n if input.get_value() is None:\n return None\n result += input.get_value()\n return result\n\n def get_outputsValue(self) -> int:\n result = 0\n for outputs in self.get_outputs():\n result += outputs.get_value()\n return result\n\n def get_fee(self) -> \"int | None\":\n return self.get_inputsValue() - self.get_outputsValue() if self.get_inputsValue() is not None else None\n\n def get_weight(self) -> int:\n return 3 * len(self.bytes_unsigned()) + self.size()\n\n def get_vSize(self) -> int:\n return math.ceil(len(self.bytes_unsigned()) + (self.size() - len(self.bytes_unsigned())) / 4)\n\n def get_unspent(self, p2wpkh_for_p2sh_input: [] = None) -> [TxInput]:\n outputs = self.get_outputs()\n unspent = []\n for vout in range(len(outputs)):\n if isinstance(outputs[vout], TxAddressOutput):\n addressType = Address.from_address(outputs[vout].get_address()).get_addressType()\n\n if addressType == AddressTypes.P2PKH:\n unspent.append(TxP2PKHInput(self.get_txid(), vout, outputs[vout].get_address(),\n outputs[vout].get_value()))\n elif addressType == AddressTypes.P2SH:\n if not isinstance(p2wpkh_for_p2sh_input, list):\n raise TypeError(\"The addresses must be passed via a list\")\n if not p2wpkh_for_p2sh_input:\n raise RawTransactionError(\"To create the unspents, you need to provide the corresponding P2WPKH\"\n \" address for the P2SH address.\")\n unspent.append(TxP2SHInput(self.get_txid(), vout, p2wpkh_for_p2sh_input.pop(),\n outputs[vout].get_value()))\n elif addressType == AddressTypes.P2WPKH:\n unspent.append(TxP2WPKHInput(self.get_txid(), vout, outputs[vout].get_address(),\n outputs[vout].get_value()))\n return unspent\n\n # Get Information\n def get_version(self) -> int:\n return self._version\n\n def get_marker(self) -> int:\n return self._marker\n\n def get_flag(self) -> int:\n return self._flag\n\n def get_inputs(self) -> []:\n return self._inputs\n\n def get_outputs(self) -> []:\n return self._outputs\n\n def get_sigHash(self) -> int:\n return self._sigHash\n\n def get_witnessHash(self) -> [WitnessHash]:\n return self._witnessHash\n\n def get_witness(self) -> [Witness]:\n return self._witness\n\n def get_lockTime(self) -> int:\n return self._lockTime\n\n def get_txid(self) -> str:\n return Converter.bytes_to_hex(bytes(reversed(Calculate.dHash256(self.bytes_unsigned()))))\n\n def get_hash(self):\n return Converter.bytes_to_hex(bytes(reversed(Calculate.dHash256(self.bytes()))))\n\n def get_bytes_version(self) -> bytes:\n return Converter.int_to_bytes(self.get_version(), 4)\n\n def get_bytes_marker(self) -> bytes:\n return Converter.int_to_bytes(self.get_marker(), 1)\n\n def get_bytes_flag(self) -> bytes:\n return Converter.int_to_bytes(self.get_flag(), 1)\n\n def get_bytes_inputs(self) -> bytes:\n result = b''\n for input in self._inputs:\n result += input.bytes()\n return result\n\n def get_bytes_outputs(self) -> bytes:\n result = b''\n for output in self._outputs:\n result += output.bytes()\n return result\n\n def get_bytes_sigHash(self) -> bytes:\n return Converter.int_to_bytes(self.get_sigHash(), 4)\n\n def get_bytes_lockTime(self) -> bytes:\n return Converter.int_to_bytes(self.get_lockTime(), 4)\n\n def is_signed(self) -> bool:\n return self._signed\n\n def is_segwit(self) -> bool:\n return self._segwit\n\n def is_coinbase(self) -> bool:\n return self._coinbase\n\n # Set Information\n def set_version(self, version: int) -> None:\n self._version = version\n\n def set_marker(self, marker: int) -> None:\n self._marker = marker\n\n def set_flag(self, flag: int) -> None:\n self._flag = flag\n\n def set_inputs(self, inputs: []) -> None:\n self._inputs = inputs\n self._analyse()\n\n def set_outputs(self, outputs: []) -> None:\n self._outputs = outputs\n self._analyse()\n\n def set_sigHash(self, sigHash: int) -> None:\n self._sigHash = sigHash\n\n def set_witnessHash(self, witnessHash: []) -> None:\n self._witnessHash = witnessHash\n\n def set_witness(self, witness: []) -> None:\n self._witness = witness\n\n def set_lockTime(self, lockTime: int) -> None:\n self._lockTime = lockTime\n\n def set_bytes_version(self, version: bytes) -> None:\n self.set_version(Converter.bytes_to_int(version))\n\n def set_bytes_marker(self, marker: bytes) -> None:\n self.set_marker(Converter.bytes_to_int(marker))\n\n def set_bytes_flag(self, flag: bytes) -> None:\n self.set_flag(Converter.bytes_to_int(flag))\n\n def set_bytes_sigHash(self, sigHash: bytes) -> None:\n self.set_sigHash(Converter.bytes_to_int(sigHash))\n\n def set_bytes_lockTime(self, lockTime: bytes) -> None:\n self.set_lockTime(Converter.bytes_to_int(lockTime))\n\n # Append information\n def add_input(self, input: TxBaseInput) -> None:\n input.verify()\n self._analyse()\n self._inputs.append(input)\n\n def add_output(self, output: TxBaseOutput) -> None:\n output.verify()\n self._analyse()\n self._outputs.append(output)\n\n def add_witnessHash(self, witnessHash: WitnessHash) -> None:\n witnessHash.verify()\n self._witnessHash.append(witnessHash)\n\n def add_witness(self, witness: Witness) -> None:\n witness.verify()\n self._witness.append(witness)\n\n\nclass Transaction(BaseTransaction):\n\n @staticmethod\n def deserialize(network: Any, hex: str) -> \"Transaction\":\n \"\"\"\n Deserializes unsigned and signed transaction.\n\n Creates the respective transaction object.\n\n Limitations: Not all raw transactions can be deserialized for now. For example, it's not possible to deserialize\n multi signature transactions.\n\n :param network: (required) the corresponding network from the raw transaction\n :type network: Network\n :param hex: (required) the raw transaction as hexadecimal sting\n :type hex: str\n :return: Transaction\n \"\"\"\n try:\n tx = Transaction([], [])\n position = 0\n\n # Set Version\n version = Converter.hex_to_int(hex[position: position + 8])\n tx.set_version(version)\n position += 8\n\n # Set Marker and Flag if exists\n if hex[position: position + 4] == \"0001\":\n marker = Converter.hex_to_int(hex[position: position + 2])\n tx.set_marker(marker)\n position += 2\n flag = Converter.hex_to_int(hex[position: position + 2])\n tx.set_flag(flag)\n position += 2\n tx._segwit = True\n\n # Find number of inputs\n length_numberOfInputs = Calculate.length_compactSize(hex[position: position + 2])\n numberOfInputs = Calculate.read_compactSize(hex[position: position + length_numberOfInputs * 2])\n position += length_numberOfInputs * 2\n\n # Iterate through all inputs\n inputs = []\n for _ in range(numberOfInputs):\n length_scriptSig = Converter.hex_to_int(hex[position + 72: position + 72 + 2]) * 2\n input = TxInput.deserialize(network, hex[position: position + 82 + length_scriptSig])\n inputs.append(input)\n position += 82 + length_scriptSig\n\n # Find number of outputs\n length_numberOfOutputs = Calculate.length_compactSize(hex[position: position + 2])\n numberOfOutputs = Calculate.read_compactSize(hex[position: position + length_numberOfOutputs * 2])\n position += length_numberOfOutputs * 2\n\n # Iterate through all outputs\n outputs = []\n for _ in range(numberOfOutputs):\n scriptLengthSize = Calculate.length_compactSize(hex[position + 16: position + 18])\n length_script = Calculate.read_compactSize(hex[position + 16: position + 16 + scriptLengthSize * 2]) * 2\n\n # If transaction version is 1 or 2 there is no token id at the back of an output\n if tx.get_version() == 1 or tx.get_version() == 2:\n length_script -= 2\n\n output = TxOutput.deserialize(network, hex[position: position + 20 + length_script])\n outputs.append(output)\n position += 20 + length_script\n\n # If transaction is a segwit transaction: Iterate through all witnisses\n witnesses = []\n if tx.is_segwit:\n while hex[position: position + 2] == \"02\" or hex[position: position + 2] == \"00\":\n if hex[position: position + 2] == \"00\":\n if len(hex) - position <= 8:\n break\n position += 2\n elif hex[position: position + 2] == \"02\":\n position += 2\n length_signature = Converter.hex_to_int(hex[position: position + 2]) * 2\n length_publicKey = Converter.hex_to_int(\n hex[position + 2 + length_signature: position + 2 + length_signature + 2]) * 2\n length_witness = 2 + length_signature + 2 + length_publicKey\n witness = Witness.deserialize(network, hex[position: position + length_witness])\n witnesses.append(witness)\n position += length_witness\n tx._signed = True\n\n # Match all witnisses with the corresponding input\n if tx.is_signed():\n count_witness = 0\n count_inputs = 0\n for input in inputs:\n if isinstance(input, TxP2SHInput):\n publicKey_address = PublicKey(network,\n witnesses[count_witness].get_publicKey()).p2wpkh_address()\n script_address = Address.from_scriptPublicKey(network, input.get_scriptSig()[2:]).get_address()\n if publicKey_address == script_address:\n input.set_witness(witnesses[count_witness])\n count_witness += 1\n else:\n raise DeserializeError(\"The given p2sh input script signature does not correspond with the \"\n \"given witness\")\n elif isinstance(input, TxInput) and not input.get_scriptSig():\n newInput = input.to_p2wpkhInput()\n newInput.set_witness(witnesses[count_witness])\n inputs[count_inputs] = newInput\n count_witness += 1\n count_inputs += 1\n\n tx.set_inputs(inputs)\n tx.set_outputs(outputs)\n tx.set_witness(witnesses)\n\n # Coinbase Transaction\n if tx.is_coinbase():\n numberOfCoinbaseElements = Converter.hex_to_int(hex[position: position + 2])\n position += 2\n\n length_coinbase = Converter.hex_to_int(hex[position: position + 2]) * 2\n position += 2\n\n coinbaseElement = Converter.hex_to_int(hex[position: position + length_coinbase])\n position += length_coinbase\n\n # LockTime\n lockTime = Converter.hex_to_int(hex[position: position + 8])\n tx.set_lockTime(lockTime)\n position += 8\n\n return tx\n except NotYetSupportedError as e:\n print(\"This transaction can not be decoded, because the DefiTx is not yet implemented\")\n except Exception as e:\n print(\"This transaction can not be decoded, either this transaction is not supported or there is an error \"\n \"in the code.\")\n\n def __init__(self, inputs: [], outputs: [], lockTime: int = 0):\n \"\"\"\n A transaction object that holds inputs and outputs and can sign these with a provides private key.\n\n :param inputs: (required) the inputs to spend\n :type inputs: [TxInput]\n :param outputs: (required) the outputs of where to spend\n :type outputs: [TxOutput]\n :param lockTime: (optional) the time to lock the inputs after they have been spent\n :type lockTime: int\n \"\"\"\n super().__init__(inputs, outputs, lockTime)\n self._analyse()\n\n def __bytes__(self):\n # Version\n result = self.get_bytes_version()\n\n # Marker and Flag (Only when Segwit Tx and Signed)\n if self.is_segwit() and self.is_signed():\n result += self.get_bytes_marker()\n result += self.get_bytes_flag()\n\n # Inputs\n result += Calculate.write_compactSize(len(self.get_inputs()), \"bytes\")\n for input in self.get_inputs():\n result += input.bytes()\n\n # Outputs\n result += Calculate.write_compactSize(len(self.get_outputs()), \"bytes\")\n for output in self.get_outputs():\n result += output.bytes()\n\n # Witness\n if self.is_signed():\n numberOfWitnessInputs = 0\n for input in self.get_inputs():\n if isinstance(input, TxP2SHInput) or isinstance(input, TxP2WPKHInput):\n numberOfWitnessInputs += 1\n\n # Append 00 if not a segwit input -> if there are more than one non segwit inputs at the end, just append\n # the double zeros one time\n stack = []\n if numberOfWitnessInputs > 0:\n for input in self.get_inputs():\n if isinstance(input, TxP2SHInput) or isinstance(input, TxP2WPKHInput):\n for byte in stack:\n result += byte\n stack = []\n result += Converter.int_to_bytes(2, 1)\n result += input.get_witness().bytes()\n elif isinstance(input, TxP2PKHInput):\n stack.append(Converter.int_to_bytes(0, 1))\n for byte in stack:\n result += byte\n\n # Coinbase Add On\n if self.is_coinbase():\n result += Converter.int_to_bytes(1, 1) # Number of Elements\n result += Converter.int_to_bytes(32, 1) # Length of Zeros\n result += Converter.int_to_bytes(0, 32) # Zeros\n\n # LockTime\n result += self.get_bytes_lockTime()\n\n return result\n\n def sign(self, network: Any, private_keys: [str]) -> \"Transaction\":\n \"\"\"\n Signs the raw transaction with the given private keys\n\n :param network: (required) the corresponding network from the raw transaction\n :type network: Network\n :param private_keys: (required) a list of private keys. If there is only one private key in the list, it will\n be used to sign all inputs of the transaction. If you have inputs witch use different private keys, provide a\n separate private key for every input in the list. The private keys have to be provided in the same\n order as the inputs\n :type private_keys: [str]\n :return: Transaction\n \"\"\"\n if not isinstance(private_keys, list):\n raise RawTransactionError(\"The given private keys have to be parsed in a list: [key, key, ...]\")\n\n # Check if wif and calc hexadecimal private key and public key\n keys = []\n for key in private_keys:\n if PrivateKey.is_privateKey(network, key):\n key = PrivateKey(network, privateKey=key)\n elif PrivateKey.is_wif(network, key):\n key = PrivateKey(network, wif=key)\n else:\n raise KeyError(\"Given private key is not valid\")\n keys.append({\"private\": key.get_privateKey(), \"public\": key.get_publicKey()})\n\n index = 0\n scriptSignatures = []\n for input in self.get_inputs():\n # Check Keys for Input\n if len(private_keys) == 1:\n privKey = keys[0][\"private\"]\n pubKey = keys[0][\"public\"]\n else:\n if len(private_keys) <= index:\n raise RawTransactionError(\"The transaction could not from be signed. Not enough private keys \"\n \"were provided in the list\")\n else:\n privKey = keys[index][\"private\"]\n pubKey = keys[index][\"public\"]\n\n # Sign different Inputs\n if isinstance(input, TxP2PKHInput):\n SIGHASH_ALL = Converter.int_to_bytes(SIGHASH, 4)\n\n input.set_scriptSig(Address.from_address(input.get_address()).get_scriptPublicKey())\n h = sha256(self.bytes() + SIGHASH_ALL).digest()\n\n signature = sign_legacy_input(privKey, h, SIGHASH_ALL)\n scriptSignature = Converter.int_to_hex(int(len(signature) / 2), 1) + signature + \\\n Converter.int_to_hex(int(len(pubKey) / 2), 1) + pubKey\n input.set_scriptSig(\"\")\n scriptSignatures.append(scriptSignature)\n\n elif isinstance(input, TxP2WPKHInput) or isinstance(input, TxP2SHInput):\n witness_hash = WitnessHash(self, input)\n signature = sign_segwit_input(privKey, witness_hash.bytes_hash())\n witness = Witness(signature, pubKey)\n input.set_witness(witness)\n\n else:\n raise NotYetSupportedError()\n index += 1\n\n # Assign ScriptSignatures to P2PKH Inputs after signing\n index = 0\n for input in self.get_inputs():\n if isinstance(input, TxP2PKHInput):\n input.set_scriptSig(scriptSignatures[index])\n index += 1\n\n self._signed = True\n\n return self\n\n def _analyse(self):\n # Analyse Inputs\n signed = None\n for input in self.get_inputs():\n if isinstance(input, TxP2WPKHInput) or isinstance(input, TxP2SHInput):\n self._segwit = True\n if input.get_witness():\n signed = True\n else:\n signed = False\n elif isinstance(input, TxCoinbaseInput):\n self._coinbase = True\n self._segwit = True\n signed = True\n elif isinstance(input, TxP2PKHInput):\n if len(input.get_scriptSig()) > 50:\n signed = True\n else:\n signed = False\n self._signed = signed\n\n if not self.get_version():\n self.set_version(4)\n if self.is_segwit():\n if not self.get_marker():\n self.set_marker(0)\n if not self.get_flag():\n self.set_flag(1)\n if not self.get_sigHash():\n self.set_sigHash(SIGHASH)\n\n def verify(self) -> bool:\n pass\n\n def to_json(self) -> {}:\n json = {}\n json.update({\"txid\": self.get_txid()})\n json.update({\"hash\": self.get_hash()})\n json.update({\"size\": self.size()})\n json.update({\"vsize\": self.get_vSize()})\n json.update({\"weight\": self.get_weight()})\n json.update({\"fee\": self.get_fee()})\n json.update({\"version\": self.get_version()})\n if not self.get_marker() is None:\n json.update({\"marker\": self.get_marker()})\n if not self.get_flag() is None:\n json.update({\"flag\": self.get_flag()})\n inputs = []\n for input in self.get_inputs():\n inputs.append(input.to_json())\n json.update({\"inputs\": inputs})\n outputs = []\n for output in self.get_outputs():\n outputs.append(output.to_json())\n json.update({\"outputs\": outputs})\n json.update({\"lockTime\": self.get_lockTime()})\n json.update({\"serialized\": self.serialize()})\n return json\n","repo_name":"eric-volz/DefichainPython","sub_path":"defichain/transactions/rawtransactions/tx.py","file_name":"tx.py","file_ext":"py","file_size_in_byte":23102,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"57"} +{"seq_id":"26187097618","text":"\n# | 0| 1| 2| 3| 4| 5| 6| 7|\n# -------------------------\n# | 8| 9|10|11|12|13|14|15|\n# -------------------------\n# |16|17|18|19|20|21|22|23|\n# -------------------------\n# |24|25|26|27|28|29|30|31|\n# -------------------------\n# |32|33|34|35|36|37|38|39|\n# -------------------------\n# |40|41|42|43|44|45|46|47|\n# -------------------------\n# |48|49|50|51|52|53|54|55|\n# -------------------------\n# |56|57|58|59|60|61|62|63|\n\n\ntop_edges = [0, 1, 2, 3, 4, 5, 6, 7]\nbottom_edges = [56, 57, 58, 59, 60, 61, 62, 63]\nleft_edges = [0, 8, 16, 24, 32, 40, 48, 56]\nright_edges = [7, 15, 23, 31, 39, 47, 55, 63]\n\ndef create_graph(numbers):\n graph = {}\n\n for i in numbers:\n graph[i] = []\n number_is_top_edge = i in top_edges \n number_is_bottom = i in bottom_edges\n number_is_left_edge = i in left_edges \n number_is_right_edge = i in right_edges \n # traverse vertically up the board\n if not number_is_top_edge and i - 16 >= 0:\n # can traverse upwards\n working_number = i - 16\n # try to go one left\n if not number_is_left_edge:\n graph[i].append(working_number - 1)\n # try to go one right\n if not number_is_right_edge:\n graph[i].append(working_number + 1)\n pass\n # traverse down the board\n if not number_is_bottom and i + 16 <= 63:\n # can traverse down\n working_number = i + 16\n # try to go one left\n if not number_is_left_edge:\n graph[i].append(working_number - 1)\n if not number_is_right_edge:\n graph[i].append(working_number + 1)\n # traverse left 2 places\n if not number_is_left_edge and i - 1 not in left_edges:\n # go left two places\n working_number = i - 2\n # try to go up one row (-8)\n if not number_is_top_edge:\n graph[i].append(working_number - 8)\n # try to go down one row\n if not number_is_bottom:\n graph[i].append(working_number + 8)\n # traverse right 2 places\n if not number_is_right_edge and i + 1 not in right_edges:\n # can traverse right\n working_number = i + 2\n # try to go up one row (-8)\n if not number_is_top_edge:\n graph[i].append(working_number - 8)\n # try to down two rows (16)\n if working_number + 8 not in bottom_edges:\n graph[i].append(working_number + 8)\n\n return graph\n \n# visits all the nodes of a graph (connected coamponent) using BFS\ndef bfs_shortest_path(graph, start, goal):\n # keep track of explored nodes\n explored = []\n # keep track of all the paths to be checked\n queue = [[start]]\n \n # return path if start is goal\n if start == goal:\n return \"That was easy! Start = goal\"\n # keeps looping until all possible paths have been checked\n while queue:\n # pop the first path from the queue\n path = queue.pop(0)\n # get the last node from the path\n node = path[-1]\n if node not in explored:\n neighbours = graph[node]\n # go through all neighbour nodes, construct a new path and\n # push it into the queue\n for neighbour in neighbours:\n new_path = list(path)\n new_path.append(neighbour)\n queue.append(new_path)\n # return path if neighbour is goal\n if neighbour == goal:\n return len(new_path) - 1\n \n # mark node as explored\n explored.append(node)\n \n # in case there's no path between the 2 nodes\n return \"no connecting path.\"\n \ndef answer(start, end):\n numbers = list(range(0, 64))\n graph = create_graph(numbers)\n return bfs_shortest_path(graph, start, end)\n\ndef test_create_graph_for_top_edge():\n numbers = [0, 1, 2, 3, 4, 5, 6, 7]\n graph = {\n 0: [10, 17],\n 1: [11, 16, 18],\n 2: [8, 12, 17, 19],\n 3: [9, 13, 18, 20],\n 4: [10, 14, 19, 21],\n 5: [11, 15, 20, 22],\n 6: [12, 21, 23],\n 7: [13, 22]\n }\n\n result = create_graph(numbers)\n\n d = {key: sorted(value) for (key, value) in result.items()}\n\n assert d== graph\n\ndef test_corners():\n numbers = [0, 7, 56, 63]\n graph = {\n 0: [10, 17],\n 7: [13, 22],\n 56: [41, 50],\n 63: [46, 53]\n }\n\n result = create_graph(numbers)\n d = {key: sorted(value) for (key, value) in graph.items()}\n assert d== graph\n\ndef test_left_edge():\n numbers = [8, 40]\n graph = {\n 8: [2, 18, 25],\n 40: [25, 34, 50, 57]\n }\n\n result = create_graph(numbers)\n d = {key: sorted(value) for (key, value) in result.items()}\n assert d== graph\n\ndef test_right_edge():\n numbers = [31, 55]\n graph = {\n 31: [14, 21, 37, 46],\n 55: [38, 45, 61]\n }\n result = create_graph(numbers)\n d = {key: sorted(value) for (key, value) in result.items()}\n assert d== graph\n\ndef test_middle():\n numbers = [26, 44]\n graph = {\n 26: [9, 11, 16, 20, 32, 36, 41, 43],\n 44: [27, 29, 34, 38, 50, 54, 59, 61]\n }\n\n result = create_graph(numbers)\n d = {key: sorted(value) for (key, value) in result.items()}\n assert d == graph\n\ndef test_get_shortest_path():\n result = answer(19, 36)\n assert result == 1\n","repo_name":"j23schoen/chess_moves","sub_path":"test_solution.py","file_name":"test_solution.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13424381932","text":"pound = int(input(\"Please enter how much the item weighs in lbs: \"))\nconvert = input(\"Please enter the weight you want to convert to:\\nkg\\nstone\\nounce\\ntonne\\nImperial tonne\\ngram\\nUS ton\")\n\ndef kg(pound):\n kg = pound * 0.45 \n return str(kg)\n\ndef stone(pound):\n stone = pound * 0.07\n return str(stone)\n\ndef ounce(pound):\n ounce = pound * 16\n return str(ounce)\n\ndef tonne(pound):\n tonne = pound * 0.000453\n return str(tonne)\n\ndef impton(pound):\n impton = pound * 0.000446\n return str(impton)\n\ndef gram(pound):\n gram = pound * 453.59\n return str(gram)\n\ndef uston(pound):\n uston = pound * 0.0005\n return str(uston)\n\n\nif convert.startswith('k'):\n print(f\"{pound} lbs converted to {convert} is: \" + kg(pound) + \" kg\")\nelif convert.startswith('s'):\n print(f\"{pound} lbs converted to {convert} is: \" + stone(pound) + \" st\")\nelif convert.startswith('o'):\n print(f\"{pound} lbs converted to {convert} is: \" + ounce(pound) + \" ounces\")\nelif convert.startswith('t'):\n print(f\"{pound} lbs converted to {convert} is: \" + tonne(pound) + \" tonne\")\nelif convert.startswith('i'):\n print(f\"{pound} lbs converted to {convert} is: \" + uston(pound) + \" Imperial tonne\")\nelif convert.startswith('g'):\n print(f\"{pound} lbs converted to {convert} is: \" + gram(pound) + \" grams\")\nelif convert.startswith('u'):\n print(f\"{pound} lbs converted to {convert} is: \" + uston(pound) + \" US ton\")\nelse:\n print(\"Please enter one of the weights listed above to convert to\")\n \n \n","repo_name":"Chrisgarlick/Python_Projects","sub_path":"My_Projects/Weight_converter.py","file_name":"Weight_converter.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"74529094897","text":"# ./turn_templates.py\n\nfrom extensions.annoy_ltm.helpers import replace_all\n\ndef get_turn_templates(state, is_instruct, logger):\n\n logger(f\"state['turn_template']: {state['turn_template']}\", 5)\n \n # Building the turn templates\n if 'turn_template' not in state or state['turn_template'] == '':\n if is_instruct:\n template = '<|user|>\\n<|user-message|>\\n<|bot|>\\n<|bot-message|>\\n'\n else:\n template = '<|user|>: <|user-message|>\\n<|bot|>: <|bot-message|>\\n'\n else:\n template = state['turn_template'].replace(r'\\n', '\\n')\n\n replacements = {\n '<|user|>': state['name1'].strip(),\n '<|bot|>': state['name2'].strip(),\n }\n logger(f\"turn_template replacements: {replacements}\", 5)\n\n user_turn = replace_all(template.split('<|bot|>')[0], replacements)\n bot_turn = replace_all('<|bot|>' + template.split('<|bot|>')[1], replacements)\n user_turn_stripped = replace_all(user_turn.split('<|user-message|>')[0], replacements)\n bot_turn_stripped = replace_all(bot_turn.split('<|bot-message|>')[0], replacements)\n\n logger(f\"turn_templates:\\nuser_turn:{user_turn}\\nbot_turn:{bot_turn}\\nuser_turn_stripped:{user_turn_stripped}\\nbot_turn_stripped:{bot_turn_stripped}\", 5)\n\n return user_turn, bot_turn, user_turn_stripped, bot_turn_stripped\n\ndef apply_turn_templates_to_rows(rows, state, logger):\n is_instruct = state['mode'] == 'instruct'\n user_turn, bot_turn, user_turn_stripped, bot_turn_stripped = get_turn_templates(state, is_instruct, logger=logger)\n output_rows = []\n for i, row in enumerate(rows):\n if row[0] not in ['', '<|BEGIN-VISIBLE-CHAT|>']:\n user_row = replace_all(user_turn, {'<|user-message|>': row[0].strip(), '<|round|>': str(i)})\n else:\n user_row = row[0]\n bot_row = bot_turn.replace('<|bot-message|>', row[1].strip())\n output_rows.append((user_row, bot_row))\n\n return output_rows","repo_name":"YenRaven/annoy_ltm","sub_path":"turn_templates.py","file_name":"turn_templates.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"57"} +{"seq_id":"25567361908","text":"\"\"\"\nOrdinary Conv Block and Residual Block\n\"\"\"\n\nimport torch.nn as nn\n\n\ndef default_conv(in_channels, out_channels, kernel_size):\n \"\"\"\n Ordinary Conv Block\n Args:\n in_channels (int) : number of input channels,\n out_channels (int) : number of output channels,\n kernel_size (int) : size of the kernel\n\n Returns:\n nn.Conv2d (nn.Module): Convolutional layer\n \"\"\"\n return nn.Conv2d(\n in_channels, out_channels, kernel_size,\n padding=1, bias=True)\n\n\nclass ResBlock(nn.Module):\n \"\"\"\n Residual Block\n Args:\n n_feats (int) : number of features\n res_scale (float) : scale of the residual\n \"\"\"\n\n def __init__(self, n_feats, res_scale=0.1):\n super(ResBlock, self).__init__()\n mid_feats = int(n_feats / 4)\n self.body = nn.Sequential(\n nn.Conv2d(n_feats, mid_feats,\n kernel_size=1, padding=0, stride=1),\n nn.PReLU(),\n nn.Conv2d(mid_feats, mid_feats,\n kernel_size=3, padding=1, stride=1),\n nn.PReLU(),\n nn.Conv2d(mid_feats, n_feats,\n kernel_size=1, padding=0, stride=1)\n )\n self.res_scale = res_scale\n\n def forward(self, x):\n \"\"\"\n Forward pass\n Args:\n x (torch.Tensor) : input tensor\n Returns:\n res (torch.Tensor) : output tensor\n\n \"\"\"\n res = self.body(x).mul(self.res_scale)\n res += x\n\n return res\n","repo_name":"orange-fritters/fast-mri","sub_path":"utils/models/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"57"} +{"seq_id":"6372307448","text":"from gradientone import InstrumentDataHandler\nfrom gradientone import query_to_dict\nfrom gradientone import render_json_cached\nfrom gradientone import author_creation\nfrom onedb import TestResultsDB\nfrom onedb import TestResultsDB_key\nfrom onedb import TestDB\nfrom onedb import TestDB_key\nfrom onedb import ConfigDB\nfrom onedb import ConfigDB_key\nfrom onedb import company_key\nfrom onedb import StateDB\nfrom onedb import BlobberDB\nfrom onedb import agilentU2000data\nimport itertools\nimport jinja2\nimport json\nimport logging\nimport os\nimport webapp2\nfrom google.appengine.api import memcache\nfrom google.appengine.api import oauth\nfrom google.appengine.api import users\nfrom google.appengine.api import search\nfrom google.appengine.ext import db\nimport appengine_config\nimport datetime\nfrom string import maketrans\nimport docs\nimport searchconfig\n\n\nclass Handler(InstrumentDataHandler):\n def post(self, company_nickname=\"\", testplan_name=\"\", config_name=\"\", stop_tse=\"\"):\n test_complete_content = json.loads(self.request.body)\n test_plan = test_complete_content['test_plan']\n i_settings = test_complete_content['i_settings']\n start_tse = test_complete_content['start_tse']\n hardware_name = test_complete_content['hardware_name']\n logging.debug(\"TEST_CONTENT: %s\" % test_complete_content)\n a = agilentU2000data(\n parent = company_key(), \n key_name = str(start_tse),\n config_name=config_name,\n company_nickname = company_nickname,\n hardware_name=hardware_name,\n i_settings=str(i_settings),\n test_results_data= str(test_complete_content['cha']),\n start_tse=int(start_tse),\n ) \n a.put()\n if test_plan == 'True': \n stop_time = datetime.datetime.now()\n key = testplan_name+'U2001A'+config_name\n prior_key = db.Key.from_path('StateDB', key, parent = company_key())\n prior_event = db.get(prior_key)\n prior_event.stop_time = stop_time\n prior_event.put() \n config_to_update = ConfigDB.get(ConfigDB_key(config_name))\n config_to_update.commence_test = False\n config_to_update.active_testplan_name = None\n config_to_update.put()\n q = db.Query(StateDB)\n q.filter('company_nickname =', company_nickname).filter('testplan_name =', testplan_name)\n q.filter('start_time =', None).filter('widget =', 'U2001A').order('order')\n next_state = q.get()\n to_save = []\n key = 'TestResultsDB'+testplan_name+config_name\n test_plan = True\n trace = False\n r = TestResultsDB(parent = company_key(), testplan_name=testplan_name, key_name = key,\n company_nickname = company_nickname, \n config_name=config_name,\n hardware_name=hardware_name,\n test_plan = test_plan,\n test_complete_bool = True,\n test_complete = int(stop_tse),\n trace = trace, \n start_tse=test_complete_content['start_tse'],\n u2000_result = str(test_complete_content['cha']),\n )\n to_save.append(r) \n db.put(to_save)\n if next_state == None:\n key = db.Key.from_path('TestDB', testplan_name, parent = company_key())\n testplan = db.get(key)\n testplan.stop_time = stop_time\n testplan.put()\n else:\n start_time = datetime.datetime.now()\n next_state.start_time = start_time\n next_config_name = next_state.name\n next_state.put()\n config_to_update = ConfigDB.get(ConfigDB_key(next_config_name))\n config_to_update.commence_test = True\n config_to_update.active_testplan_name = testplan_name\n config_to_update.put()\n\n else:\n test_plan = False\n trace = True\n config_name = test_complete_content['config_name']\n hardware_name = test_complete_content['hardware_name']\n to_save = []\n key = testplan_name + str(test_complete_content['start_tse'])\n logging.debug(\"TRACE COMPLETE - KEY: %s\" % key)\n logging.debug(\"TRACE COMPLETE - CONTENT: %s\" % test_complete_content)\n r = TestResultsDB(parent = company_key(), testplan_name=testplan_name, key_name = key,\n company_nickname = company_nickname, \n config_name=config_name,\n hardware_name=hardware_name,\n test_plan = test_plan,\n test_complete_bool = True,\n test_complete = int(stop_tse),\n trace = trace, \n start_tse=test_complete_content['start_tse'],\n u2000_result = str(test_complete_content['cha']),\n )\n to_save.append(r) \n db.put(to_save)\n # Testplan or not, index the results \n blobber_key = db.Key.from_path('BlobberDB', testplan_name, parent = company_key())\n the_blob = BlobberDB.get(blobber_key)\n logging.debug(\"THE BLOB!!! %s\" % the_blob)\n blob_key = the_blob.b_key\n pass_fail_type = i_settings['pass_fail_type']\n if pass_fail_type == 'N/A':\n max_value = 0.0\n min_value = 0.0\n else:\n max_value = float(i_settings['max_value'])\n min_value = float(i_settings['min_value'])\n fields = [ \n search.DateField(name='start_datetime', \n value=datetime.datetime.fromtimestamp(\n int(test_complete_content['start_tse'])/1000\n )),\n search.TextField(name=docs.U2000.START_TSE, value=str(test_complete_content['start_tse'])),\n search.NumberField(name=docs.U2000.CORRECTION_FREQ, value=float(i_settings['correction_frequency'])), \n search.NumberField(name=docs.U2000.MAX_VALUE, value=max_value), \n search.NumberField(name=docs.U2000.MIN_VALUE, value=min_value), \n search.NumberField(name=docs.U2000.OFFSET, value=float(i_settings['offset'])), \n search.TextField(name=docs.U2000.PASS_FAIL_TYPE, value=i_settings['pass_fail_type']),\n search.TextField(name=docs.U2000.TEST_PLAN, value=test_complete_content['test_plan']),\n search.TextField(name=docs.U2000.PASS_FAIL, value=i_settings['pass_fail']),\n search.TextField(name=docs.U2000.HARDWARE_NAME, value=hardware_name),\n search.NumberField(name=docs.U2000.DATA, value=test_complete_content['cha']),\n search.TextField(name=docs.U2000.INSTRUMENT_TYPE, value='U2000'),\n search.TextField(name=docs.U2000.CONFIG_NAME, value=config_name),\n search.TextField(name=docs.U2000.TESTPLAN_NAME, value=test_complete_content['active_testplan_name']),\n ] \n d = search.Document(doc_id=blob_key, fields=fields)\n try:\n add_result = search.Index(name=searchconfig.U2000_INDEX_NAME).put(d)\n logging.debug(\"INDEXED IN %s\" % searchconfig.U2000_INDEX_NAME)\n except search.Error:\n logging.exception(\"Search error adding document\")\n #memcache.set(key, to_save)\n\n\nclass UpdateResults(InstrumentDataHandler):\n \"\"\"Called from testops.js to update results with pass_fail data\"\"\"\n def post(self):\n results_data = json.loads(self.request.body)\n config_name = results_data['config_name']\n trace_name = results_data['trace_name']\n start_tse = results_data['start_tse']\n pass_fail = results_data['pass_fail']\n min_pass = results_data['min_pass_value']\n max_pass = results_data['max_pass_value']\n key = trace_name + str(start_tse) + pass_fail\n cached_result = memcache.get(key)\n key_name = trace_name + str(start_tse)\n test_key = db.Key.from_path('TestResultsDB', key_name, parent=company_key())\n # if not cached_result:\n # test_results = TestResultsDB.get(test_key)\n # test_results.pass_fail = pass_fail\n # test_results.min_pass = float(min_pass)\n # test_results.max_pass = float(max_pass)\n # test_results.put()\n # memcache.set(key, test_results)\n\n\n\n","repo_name":"williamvennard/frogger","sub_path":"AppEngine/u2000_testcomplete.py","file_name":"u2000_testcomplete.py","file_ext":"py","file_size_in_byte":8515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"19871206276","text":"# 내장함수 sort() 사용\ndef sortList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n cur = head\n node_list = []\n while cur:\n node_list.append(cur.val)\n cur = cur.next\n\n node_list.sort()\n\n cur = head\n for val in node_list:\n cur.val = val\n cur = cur.next\n\n return head\n\n# 병합정렬 사용\ndef mergeTwoList(self, list1, list2) -> ListNode:\n if list1 and list2:\n if list1.val > list2.val:\n list1, list2 = list2, list1\n list1.next = self.mergeTwoList(list1.next, list2)\n\n return list1 or list2\n\ndef sortList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if not (head and head.next):\n return head\n\n # 런너 기법(중간 노드를 찾는다)\n half, slow, fast = None, head, head\n while fast and fast.next:\n half = slow\n slow = slow.next\n fast = fast.next.next\n half.next = None\n\n # 재귀로 모두 분할\n list1 = self.sortList(head)\n list2 = self.sortList(slow)\n\n # 정렬하며 병합\n return self.mergeTwoList(list1, list2)\n","repo_name":"codesquad-backend-study/algorithm-study","sub_path":"leetcode/ayaan/week24/L148.py","file_name":"L148.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33289886605","text":"from typing import Union\n\nimport sbca_wrapper.error as indy_error\nfrom fastapi import APIRouter, HTTPException\n\nfrom src.data_models.base_data_model import APIBaseModel\nfrom src.data_models.indy_data_model import CreateWallet, DeleteWallet, \\\n CreateDID\nfrom src.services.azure_storage import delete_wallet\nfrom src.services.custom_errors import WalletAlreadyExists, WalletDoesNotExists\nfrom src.services.indy import create_wallet, query_wallet, create_did\n\n# Init APIRouter\nrouter = APIRouter()\n\n\ndef _return_response_model(message_detail: Union[str, dict],\n message_type: str) -> dict:\n \"\"\"HelperFunction to return correct ResponseModel on Success\n\n :param message_detail: str or dict - Message that will be in msg\n :param message_type: str - Message type defined like:\n \"interaction_'success/error etc'_(status code if available)\"\n :return: dict - Dictionary with the expected Response format\n of the defined response_model\n \"\"\"\n return {'detail': [{'msg': message_detail, 'type': message_type}]}\n\n\ndef _return_exception_model(message_detail: str, message_type: str) -> list:\n \"\"\"HelperFunction to return correct ResponseModel on Exception\n\n :param message_detail: str - Message that will be in msg\n :param message_type: str - Message type defined like:\n \"interaction_'success/error etc'_(status code if available)\"\n :return: list - List with the expected Response format\n of the defined response_model\n \"\"\"\n return [{'msg': message_detail, 'type': message_type}]\n\n\n@router.post('/{account_id}/wallet', response_model=APIBaseModel)\nasync def create_wallet_for_account_id(account_id: str,\n req_body: CreateWallet) -> dict:\n \"\"\"Endpoint to create a Wallet for a AccountID in the Azure-Cloud-Storage\n\n :param account_id: str - AccountID of the caller\n :param req_body: dict - Request Body of the POST-Request with keys:\n [name, passphrase]\n :except WalletAlreadyExistsError: str - Wallet already exists\n in local Storage\n :except UnknownWalletTypeError: str - Unknown wallet type got used\n to try to create a wallet\n :except WalletStorageError: str - Wallet storage experience some\n problems during the creation\n :except LibIndyError: str - This exception handles all LibindyErrors\n :return: dict - APIBaseModel\n \"\"\"\n try:\n message = await create_wallet(account_id=account_id,\n wallet_name=req_body.name,\n passphrase=req_body.passphrase)\n return _return_response_model(message_detail=message,\n message_type='request_success')\n except WalletAlreadyExists:\n raise HTTPException(status_code=409, detail=_return_exception_model(\n message_detail=f'AccountID already has a Wallet called: '\n f'{req_body.name}',\n message_type=f'azure_error.409'))\n except indy_error.WalletAlreadyExistsError as e:\n raise HTTPException(status_code=409, detail=_return_exception_model(\n message_detail=e.message,\n message_type=f'indy_error.{e.indy_code}'))\n except indy_error.LibindyError as e:\n raise HTTPException(status_code=500, detail=_return_exception_model(\n message_detail=e.message,\n message_type=f'indy_error.{e.indy_code}'))\n except Exception:\n raise HTTPException(status_code=500, detail=_return_exception_model(\n message_detail='Internal Server Error',\n message_type=f'server_error_500'))\n\n\n@router.delete('/{account_id}/wallet', response_model=APIBaseModel)\ndef delete_wallet_for_account_id(account_id: str,\n req_body: DeleteWallet) -> dict:\n \"\"\"Endpoint to delete a Wallet for the AccountID of the Azure-Cloud-Storage\n\n :param account_id: str - AccountID of the caller\n :param req_body: dict - Request Body of the DELETE-Request with keys:[name]\n :except WalletDoesNotExists: Exception when Wallet can't be found in the\n Azure Cloud Storage\n :except Exception: Broad Exception to handle all of them\n :return: dict - APIBaseModel\n \"\"\"\n try:\n message = delete_wallet(account_id=account_id,\n wallet_name=req_body.name)\n return _return_response_model(message_detail=message,\n message_type='request_success')\n except WalletDoesNotExists:\n raise HTTPException(status_code=404, detail=_return_exception_model(\n message_detail='Wallet does not exists under this AccountID',\n message_type=f'azure_error.404'))\n except Exception as e:\n raise HTTPException(status_code=500, detail=_return_exception_model(\n message_detail=str(e),\n message_type=f'azure_error'))\n\n\n@router.get('/{account_id}/{wallet_name}', response_model=APIBaseModel)\nasync def get_wallet_info(account_id: str, wallet_name: str,\n wallet_passphrase: str):\n \"\"\"Endpoint to get information about the stored Data in the Wallet from\n the Azure-Cloud-Storage\n\n :param account_id: str - AccountID of the caller\n :param wallet_name: str - The name of the folder how its stored in\n the Azure-Cloud\n :param wallet_passphrase: str - Wallet passphrase which will be used to\n unlock (open) the wallet\n :except WalletDoesNotExists: Exception when Wallet can't be found in the\n Azure Cloud Storage\n :except LibIndyError: str - This exception handles all LibindyErrors\n :return: dict - APIBaseModel\n \"\"\"\n try:\n message = await query_wallet(account_id=account_id,\n wallet_name=wallet_name,\n wallet_passphrase=wallet_passphrase)\n return _return_response_model(message_detail=message,\n message_type='request_success')\n except WalletDoesNotExists:\n raise HTTPException(status_code=404, detail=_return_exception_model(\n message_detail='Wallet does not exists under this AccountID',\n message_type=f'azure_error.404'))\n except indy_error.LibindyError as e:\n raise HTTPException(status_code=500, detail=_return_exception_model(\n message_detail=e.message,\n message_type=f'indy_error.{e.indy_code}'))\n except Exception:\n raise HTTPException(status_code=500, detail=_return_exception_model(\n message_detail='Internal Server Error',\n message_type=f'server_error_500'))\n\n\n@router.post('/{account_id}/did', response_model=APIBaseModel)\nasync def create_did_in_wallet(account_id: str, req_body: CreateDID):\n \"\"\"Endpoint to create a Decentralized Identifiers inside the callers Wallet\n\n :param account_id: str - AccountID of the caller\n :param req_body: dict - Request Body of the DELETE-Request with keys:\n [name, passphrase, (seed)]\n :except WalletDoesNotExists: Exception when Wallet can't be found in the\n Azure Cloud Storage\n :except LibIndyError: str - This exception handles all LibindyErrors\n :return: dict - APIBaseModel\n \"\"\"\n try:\n message = await create_did(account_id, wallet_name=req_body.name,\n wallet_passphrase=req_body.passphrase,\n seed=req_body.seed)\n return _return_response_model(message_detail=message,\n message_type='request_success')\n except WalletDoesNotExists:\n raise HTTPException(status_code=404, detail=_return_exception_model(\n message_detail='Wallet does not exists under this AccountID',\n message_type=f'azure_error.404'))\n except indy_error.LibindyError as e:\n raise HTTPException(status_code=500, detail=_return_exception_model(\n message_detail=e.message,\n message_type=f'indy_error.{e.indy_code}'))\n except Exception:\n raise HTTPException(status_code=500, detail=_return_exception_model(\n message_detail='Internal Server Error',\n message_type=f'server_error_500'))\n","repo_name":"JeromeK13/ipa_wallet_storage","sub_path":"src/routers/indy_router.py","file_name":"indy_router.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73766269937","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport pylab as pl\r\nfrom sklearn import svm\r\n\r\n#we create 40 separable points\r\nnp.random.seed(0) #每次运行程序时抓取的随机值相同\r\n#X为训练实例,通过正太分布的方式产生20行2列的数\r\nX = np.r_[np.random.randn(20,2)-[2,2],np.random.randn(20,2)+[2,2]]\r\n#Y 为分类标记,前20个为1,后20个为1\r\nY = [0] * 20 + [1] * 20\r\n\r\n#fit the model\r\nclf = svm.SVC(kernel='linear')\r\nclf.fit(X,Y)\r\n\r\n#get the separating hyperplane\r\nw = clf.coef_[0] #w是2维的\r\na = -w[0] / w[1] #a代表画出的点的斜率\r\nxx = np.linspace(-5,5) #从-5到5产生连续的x值\r\n#(clf.intercept_[0]) / w[1]为所画直线的截距\r\nyy = a * xx - (clf.intercept_[0]) / w[1] \r\n\r\n#plot the parallels to the separating hyperplane that pass through the\r\n#support vectors\r\nb = clf.support_vectors_[0] #取一个支持向量\r\nyy_down = a * xx + (b[1] - a * b[0]) #(b[1] - a * b[0])为截距\r\nb = clf.support_vectors_[-1] #取最后一个支持向量\r\nyy_up = a * xx + (b[1] - a * b[0])\r\n\r\nprint('w: ',w)\r\nprint('a: ',a)\r\nprint('xx: ',xx)\r\nprint('yy: ',yy)\r\nprint('support vectors: ',clf.support_vectors_)\r\nprint('clf.coef_: ',clf.coef_)\r\n\r\n\r\npl.plot(xx,yy,'k-')\r\npl.plot(xx,yy_down,'k--')\r\npl.plot(xx,yy_up,'k--')\r\n\r\n#pl.scatter(clf.support_vectors_[:,0],clf.support_vectors_[:,1],\r\n# s=80,facecolors='none')\r\npl.scatter(X[:,0],X[:,1],c=Y,cmap=pl.cm.Paired) #画出两类点\r\n\r\n\r\npl.axis('tight') #使坐标系的最大值和最小值和我设定的坐标范围一致\r\npl.show()","repo_name":"xiangrikuikui/machine-learning","sub_path":"SVM/svm2.py","file_name":"svm2.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22603345190","text":"from django import template\nfrom women.models import *\n\nregister = template.Library() # экземпляр класса Library в модуле template\n\n\n@register.simple_tag()\ndef get_categories(filter=None):\n if not filter:\n return Category.objects.all()\n else:\n return Category.objects.filter(pk=filter) # возваращает коллекцию данных\n\n\n@register.inclusion_tag(\"women/list_categories.html\")\ndef show_categories(sort=None, cat_selected=0): # sort хранит значение полей, по которому будет проводиться сортировка\n if not sort:\n cats = Category.objects.all()\n else:\n cats = Category.objects.order_by(sort)\n return {\"cats\": cats,\n \"cat_selected\": cat_selected} # возращает словарь, который будет передаваться шаблону, возвращает фрагмент html страницы\n\n\n@register.inclusion_tag(\"women/menu_list.html\")\ndef show_menu():\n menu = [\n {'title': 'О сайте', 'url_name': 'about'},\n {'title': 'Добавить статью', 'url_name': 'add_page'},\n {'title': 'Обратная связь', 'url_name': 'contact'},\n {'title': 'Войти', 'url_name': 'login'},\n ]\n return {'menu': menu}\n","repo_name":"MaximrRomanov/django_practice","sub_path":"coolsite/women/templatetags/women_tags.py","file_name":"women_tags.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"40313344864","text":"class ImageRecommender : \n \n def __init__(self, model, list_of_image, filespath) : \n self.model = model\n self.filespath = filespath\n self.list_of_image = list_of_image\n #since ouput.shape return object dimension just eval it to get integer ...\n self.image_width = eval(str(self.model.layers[0].output.shape[1]))\n self.image_height = eval(str(self.model.layers[0].output.shape[2]))\n # remove the last layers in order to get features instead of predictions\n self.image_features_extractor = Model(inputs=self.model.input, \n outputs=self.model.layers[-2].output)\n self.processed_image = self.Pics2Matrix()\n self.sim_table = self.GetSimilarity(self.processed_image)\n \n def ddl_images(self, image_url) :\n try : \n return load_img(self.filespath + image_url, \n target_size=(self.image_width, self.image_height))\n except OSError : \n # image unreadable // remove from list\n self.list_of_image = [x for x in self.list_of_image if x != image_url]\n #self.list_of_image.remove(image_url)\n pass\n \n def Pics2Matrix(self) :\n \"\"\"\n # convert the PIL image to a numpy array\n # in PIL - image is in (width, height, channel)\n # in Numpy - image is in (height, width, channel)\n # convert the image / images into batch format\n # expand_dims will add an extra dimension to the data at a particular axis\n # we want the input matrix to the network to be of the form (batchsize, height, width, channels)\n # thus we add the extra dimension to the axis 0.\n \"\"\"\n #from keras.preprocessing.image import load_img,img_to_array\n list_of_expanded_array = list()\n for i in tqdm(range(len(self.list_of_image) - 1)) :\n try :\n tmp = img_to_array(self.ddl_images(self.list_of_image[i]))\n expand = np.expand_dims(tmp, axis = 0)\n list_of_expanded_array.append(expand)\n except ValueError : \n self.list_of_image = [x for x in self.list_of_image if x != self.list_of_image[i]]\n #self.list_of_image.remove(self.list_of_image[i])\n images = np.vstack(list_of_expanded_array)\n \"\"\"\n list_of_expanded_array = [try np.expand_dims(img_to_array(self.ddl_images(self.list_of_image[i])), axis = 0) except ValueError pass \\\n for i in tqdm(range(len(self.list_of_image)))]\n images = np.vstack(list_of_expanded_array)\n #from keras.applications.imagenet_utils import preprocess_input()\n # prepare the image for the model\"\n \"\"\"\n return preprocess_input(images)\n \n def GetSimilarity(self, processed_imgs) :\n print('============ algorithm predict featurs =========')\n imgs_features = self.image_features_extractor.predict(processed_imgs)\n print(\"Our image has %i features:\" %imgs_features.size)\n cosSimilarities = cosine_similarity(imgs_features)\n cos_similarities_df = pd.DataFrame(cosSimilarities, \n columns=self.list_of_image[:len(self.list_of_image) -1],\n index=self.list_of_image[:len(self.list_of_image) -1])\n return cos_similarities_df\n \n def most_similar_to(self, given_img, nb_closest_images = 5):\n\n print(\"-----------------------------------------------------------------------\")\n print(\"original manga:\")\n\n original = self.ddl_images(given_img)\n plt.imshow(original)\n plt.show()\n\n print(\"-----------------------------------------------------------------------\")\n print(\"most similar manga:\")\n\n closest_imgs = self.sim_table[given_img].sort_values(ascending=False)[1:nb_closest_images+1].index\n closest_imgs_scores = self.sim_table[given_img].sort_values(ascending=False)[1:nb_closest_images+1]\n\n for i in range(0,len(closest_imgs)):\n original = self.ddl_images(closest_imgs[i])\n plt.imshow(original)\n plt.show()\n print(\"similarity score : \",closest_imgs_scores[i])\n","repo_name":"Olaniyiajayi2/NiyiNet-Neural_network","sub_path":"recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24870256480","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Attention(nn.Module):\n def __init__(self):\n super().__init__()\n self.wx = nn.Linear(512, 512)\n self.wg = nn.Linear(512, 512)\n self.whx = nn.Linear(512, 1)\n self.activation = nn.Tanh()\n self.dropout = nn.Dropout(p=0.5)\n\n def forward(self, X, g):\n if type(g) is int:\n H = self.wx(X)\n else:\n g = g.unsqueeze(1)\n H = self.wx(X) + self.wg(g)\n\n H = self.activation(H)\n H = self.dropout(H)\n alpha = F.softmax(self.whx(H), dim=1)\n x_hat = torch.sum(alpha * X, dim=1)\n return x_hat\n\n\nclass AlternatingCoAttention(nn.Module):\n def __init__(self):\n super().__init__()\n self.image_attention = Attention()\n self.question_attention_1 = Attention()\n self.question_attention_2 = Attention()\n\n def forward(self, question_features, image_features):\n s = self.question_attention_1(question_features, 0)\n v = self.image_attention(image_features, s)\n out = self.question_attention_2(question_features, v)\n return v, out\n\n\nclass CoattentionNet(nn.Module):\n \"\"\"\n Predicts an answer to a question about an image using the Hierarchical Question-Image Co-Attention\n for Visual Question Answering (Lu et al, 2017) paper.\n \"\"\"\n def __init__(self, question_word_list_length, answer_list_length):\n super().__init__()\n ############ 3.3 TODO\n self.word_fc = nn.Linear(question_word_list_length, 512)\n\n self.unigram_conv = nn.Conv1d(512, 512, kernel_size=1, padding=0)\n self.bigram_conv = nn.Conv1d(512, 512, kernel_size=2, padding=0)\n self.trigram_conv = nn.Conv1d(512, 512, kernel_size=3, padding=1)\n\n self.lstm = nn.LSTM(input_size=512, hidden_size=512, batch_first=True)\n\n self.word_level = AlternatingCoAttention()\n self.phrase_level = AlternatingCoAttention()\n self.question_level = AlternatingCoAttention()\n\n self.ww = nn.Linear(512, 512)\n self.wp = nn.Linear(1024, 512)\n self.ws = nn.Linear(1024, 1024)\n self.wh = nn.Linear(1024, answer_list_length)\n\n self.activation = nn.Tanh()\n self.dropout1 = nn.Dropout(p=0.5)\n self.dropout2 = nn.Dropout(p=0.5)\n self.dropout3 = nn.Dropout(p=0.5)\n self.dropout4 = nn.Dropout(p=0.5)\n ############\n\n def forward(self, image, question_encoding):\n ############ 3.3 TODO\n image_features = image.reshape((image.shape[0], -1, image.shape[2]))\n word_features = self.word_fc(question_encoding)\n word_features = self.activation(word_features)\n # word_features = self.dropout1(word_features)\n\n word_features = word_features.reshape((word_features.shape[0],\n word_features.shape[2],\n word_features.shape[1]))\n\n unigram_features = self.unigram_conv(word_features)\n unigram_features = self.activation(unigram_features)\n bigram_features = self.bigram_conv(F.pad(word_features, (0, 1)))\n bigram_features = self.activation(bigram_features)\n trigram_features = self.trigram_conv(word_features)\n trigram_features = self.activation(trigram_features)\n\n combined_features = torch.stack((unigram_features, bigram_features, trigram_features))\n combined_features, _ = torch.max(combined_features, dim=0)\n combined_features = combined_features.reshape((word_features.shape[0], word_features.shape[2],\n word_features.shape[1]))\n question_features = self.lstm(combined_features)[0]\n\n word_features = word_features.reshape((word_features.shape[0],\n word_features.shape[2],\n word_features.shape[1]))\n\n vw, qw = self.word_level(word_features, image_features)\n vp, qp = self.phrase_level(combined_features, image_features)\n vs, qs = self.question_level(question_features, image_features)\n\n hw = self.ww(qw + vw)\n hw = self.activation(hw)\n hw = self.dropout2(hw)\n hp = self.wp(torch.cat((qp + vp, hw), dim=1))\n hp = self.activation(hp)\n hp = self.dropout3(hp)\n hs = self.ws(torch.cat((qs + vs, hp), dim=1))\n hs = self.activation(hs)\n hs = self.dropout4(hs)\n\n return self.wh(hs)\n ############\n","repo_name":"jiaqigeng/CMU-16824-Vision-Learning-and-Recognition","sub_path":"hw3/coattention_net.py","file_name":"coattention_net.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"28060340992","text":"f = open(\"input.txt\")\n\nfoundFlag = False\ntotal = 0\nfreqDict = set()\n\nwhile foundFlag is False:\n for number in f.readlines():\n val = int(number[:-1])\n total += val\n if total not in freqDict:\n freqDict.add(total)\n else:\n foundFlag = True\n break\n\n # go back to first line\n if foundFlag is False:\n f.seek(0)\n\nf.close()\n\nprint(\"Answer:\", total)\n","repo_name":"MatejBabis/AdventOfCode2k18","sub_path":"day1/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"70113850418","text":"import random\nfrom tabulate import tabulate\nfrom functools import reduce\n\n\nclass Player:\n \"\"\"\n Player class represent a player and their capabilities\n \"\"\"\n no_of_players = 0\n all_players = []\n no_of_rounds = 3\n cube1 = list(range(1, 7))\n cube2 = ['CMB', 'TYK', 'NY', 'LON', 'ATN', 'SYN']\n cube3 = [True, False]\n\n def __init__(self, name):\n self.name = name\n self.play_count = 0\n self.result = []\n self.final_anlysed_data = {}\n Player.no_of_players += 1\n Player.all_players.append(self)\n\n def play(self):\n \"\"\"\n function to assign random lucky draws of cubes\n :return: dictionary consist of random values got from each cube\n \"\"\"\n if self.play_count < Player.no_of_rounds:\n d = {'first': random.choice(Player.cube1), 'second': random.choice(Player.cube2), 'third': random.choice(Player.cube3)}\n self.result.append(d)\n self.play_count += 1\n return self.result\n else:\n return -1\n\n def get_total_score(self):\n \"\"\"\n function to calculate the total score for all 3 rounds\n :return: return total score after 3 rounds\n \"\"\"\n value_list = list(map(lambda x: x['first'] if x['third']==True else -1*x['first'], self.result))\n total = reduce(lambda x, b: x+b, value_list)\n self.final_anlysed_data['total_score'] = total\n return total\n\n def get_total_with_para(self, list_dictionaries):\n \"\"\"\n\n :param list_dictionaries: list of dictionary values\n :return: total score after 3 rounds\n \"\"\"\n value_list = list(map(lambda x: x['first'] if x['third']==True else -1*x['first'], list_dictionaries))\n total = reduce(lambda x, b: x+b, value_list)\n return total\n\n def get_final_score_for_round(self, game_round):\n \"\"\"\n\n :param game_round: game round\n :return: total score for the round\n \"\"\"\n if game_round['third']:\n return game_round['first']\n else:\n return -1 * game_round['first']\n\n def analyse_by_country(self):\n \"\"\"\n function to analyse country details got from 3 rounds\n :return: list of categorized data\n \"\"\"\n categorized_list = []\n l1 = [self.result[0]['second'], self.result[1]['second'], self.result[2]['second']]\n l2 = list(set(l1))\n\n for i in range(0, len(l2)):\n filter_l = filter(lambda x: x['second'] == l2[i], self.result)\n value = self.get_total_with_para(filter_l)\n country_dictionary = {'second': l2[i], 'value': value}\n self.final_anlysed_data[l2[i]] = value\n categorized_list.append(country_dictionary)\n return categorized_list\n\n def view_round_results(self):\n \"\"\"\n function to view round details of a player\n :return: list of final analyzed data\n \"\"\"\n print('\\n--------------Round Details---------------\\n')\n tabulate_list = []\n self.get_total_score()\n self.analyse_by_country()\n # self.get_final_winner()\n for dic in self.result:\n tabulate_list.append(['1st round', (dic['first'], dic['second'], dic['third']), 'you have earned '+str(self.get_final_score_for_round(dic))+' at '+dic['second']])\n print(tabulate([tabulate_list[0], tabulate_list[1], tabulate_list[2]], headers=['Round', 'Result', 'meaning of the result']))\n keys = list(self.final_anlysed_data)\n keys.remove('total_score')\n print('\\n--------------Final Result after 3 Rounds of player {}---------------\\n'.format(self.name))\n print('total score : '+str(self.final_anlysed_data['total_score'])+'\\ncity scores :-')\n for i in range(len(keys)):\n print('\\r '+keys[i]+' : '+str(self.final_anlysed_data[keys[i]]))\n return self.final_anlysed_data\n\n @staticmethod\n def get_match_results(self):\n \"\"\"\n function to evaluate the winner of the game\n :param self:\n :return: name of the winner\n \"\"\"\n self.get_total_score()\n self.analyse_by_country()\n max_score = Player.all_players[0].get_total_score()\n winner = ''\n for i in Player.all_players:\n if i.get_total_score() >= max_score:\n max_score = i.get_total_score()\n winner = i\n return winner.name\n","repo_name":"GayaniNisa/rubik-game","sub_path":"rubik_game.py","file_name":"rubik_game.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}